query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Load healthchecks from name.
def loadTestsFromName(self, name, module=None): suite = super(HealthCheckLoader, self).loadTestsFromName(name, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def load(name):\n return []", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def __load__(self, name):\n raise KeyError(name)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])", "def load_scenario(self, name):\n if name[:-5] != \".json\":\n name += \".json\"\n scr_path = os.path.dirname(os.path.abspath(__file__))\n f_path = os.path.join(scr_path, \"scenarios\", name)\n if not os.path.exists(f_path):\n raise IOError\n f = open(f_path, 'r')\n sc = Json(f)\n self._loaded_sc = sc\n self._scenario_script_elements = len(sc[\"script\"])\n self._scenario_script_cur = 0", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def from_name(self, name):\n return self._name_to_loadout.get(name.lower())", "def _load_support(name):\n curr = P.dirname(P.abspath(__file__))\n with open(P.join(curr, \"data\", \"%s.yml\" % name)) as fin:\n return yaml.full_load(fin)", "def load(self, name):\n # ext = os.path.splitext(name)[1]\n # if ext == '.mat':\n # self.load_matlab(name)\n # else:\n # self.load_pkl(name)\n self.load_pkl(name)\n nhashes = sum(self.counts)\n # Report the proportion of dropped hashes (overfull table)\n dropped = nhashes - sum(np.minimum(self.depth, self.counts))\n print(\"Read fprints for\", sum(n is not None for n in self.names),\n \"files (\", nhashes, \"hashes) from\", name,\n \"(%.2f%% dropped)\" % (100.0 * dropped / max(1, nhashes)))", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def _load_vulnerabilities_report_file(file_name):\n with open(os.path.join(module_path, test_name, file_name)) as file:\n json_data = json.load(file)\n return ImageVulnerabilitiesReport.from_json(json_data)", "def get_by_name(self, name):\n # type: (str) -> BoundLoadBalancer\n return super(LoadBalancersClient, self).get_by_name(name)", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_health_get(self):\n pass", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def __getitem__ (self, name):\n try:\n return self.load_module (name)\n except ImportError: pass\n raise KeyError (name)", "def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def load(self, file_path, name=None):\n self.yaml_dict = u.load_yaml(file_path)\n if name is None:\n name = u.get_file_name(file_path)\n self.name = name\n self._check_scenario_sections_valid()\n\n self._parse_subnets()\n self._parse_topology()\n self._parse_os()\n self._parse_services()\n self._parse_processes()\n self._parse_sensitive_hosts()\n self._parse_exploits()\n self._parse_privescs()\n self._parse_scan_costs()\n self._parse_host_configs()\n self._parse_firewall()\n self._parse_hosts()\n self._parse_step_limit()\n return self._construct_scenario()", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def load(self, path):\n\n try:\n with open(path) as f:\n try:\n self.hooks = yaml.load(f.read())\n except ScannerError:\n self.warning('Error loading {0} hooks - Is it '\n 'correctly formatted?'.format(path))\n else:\n self.out('Loading hooks')\n except IOError:\n self.warning('{0} not found'.format(path))", "def load_internal(self, name, profile='default', **kwargs):\n version = kwargs.get('version', '1.0')\n uri = self.conf['uri']\n uri = uri.format(name=name, profile=profile, version=version)\n if uri.startswith('http://') or uri.startswith('https://'):\n r = requests.get(uri)\n if r.status_code == 200:\n return yaml.load(r.content)\n import os\n if not os.path.isabs(uri):\n uri = os.path.join(self.context.workdir, uri)\n with open(uri, 'r') as f:\n content = f.read()\n return yaml.load(content, Loader=yaml.FullLoader)", "def load(cls, name):\n try:\n return importlib.import_module(cls._plugins[name])\n except Exception as err:\n print(\"** could not load command [%s]:\\n%s\" % (name, err))", "def test_fake_health_get(self):\n pass", "def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}", "def loadmodule(self, name):\n\n if name in self._modules:\n return self._modules[name]()\n\n raise Error(\"No such module: {0}\".format(name))", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "async def _load_hsm_status(self) -> None:\n hsm: Dict[str, str] = await self._api_request(\"hsm\")\n _LOGGER.debug(\"Loaded hsm status\")\n self._hsm_status = hsm[\"hsm\"]", "def pre_loadbalancer_healthmonitor_read(self, resource_id):\n pass", "def loadHandlersFrom(self, name):\n try:\n module = __import__(name)\n except Exception as err:\n raise BadHandler(\"Failed import: %s\" % err)\n #\n # Look for handler classes\n for cls_name in dir(module):\n object = getattr(module, cls_name)\n try:\n is_handler = issubclass(object, BaseTreeHandler)\n except TypeError:\n is_handler = False\n if is_handler:\n g.es(\"... found handler '%s'\" % (cls_name,), color=\"blue\")\n self.handlers[cls_name.lower()] = object", "def load_config_dict_by_name(name):\n full_name = os.path.join(BBCONFIG_DIR, name)\n for x in name, full_name:\n if os.path.exists(x):\n return yaml.load(open(x))\n return None", "def loadaddon(self, addonName):\r\n es.load(\"%s/addons/%s\" % (info.basename, addonName))", "async def health(self) -> Health:\n response = await self._http_requests.get(build_url(Paths.HEALTH))\n return Health(**response.json())", "def load_active(self, name):\n self.set_config(self.load_configuration(name))\n self.set_last_config(name)", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")", "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def shelve_load(file_name: str, *args):\n res = {}\n with shelve.open(os.path.splitext(file_name)[0]) as db:\n for k, v in db.items():\n res[k] = v\n return res", "def loadConfigModule(name, options, tags):\n if isinstance(name, str):\n LOG.info('Loading %s', name)\n d = {}\n module = __import__(name[:-3], d, d)\n else:\n module = reload(name)\n onload = module.__dict__.get('onload')\n if callable(onload):\n try:\n onload(options, tags)\n except:\n LOG.fatal('Exception while loading %s', name)\n raise\n return module", "def get_by_dns_name(cls, dns_name: str) -> \"ELB\":\n _, region, _ = dns_name.split(\".\", maxsplit=2)\n client = BotoClientProxy(\"elb\", region)\n\n response = client.describe_load_balancers()\n next_marker = response.get(\"NextMarker\")\n load_balancers = response[\"LoadBalancerDescriptions\"] # type: List\n while next_marker:\n response = client.describe_load_balancers(Marker=next_marker)\n next_marker = response.get(\"NextMarker\")\n load_balancers.extend(response[\"LoadBalancerDescriptions\"])\n\n for load_balancer in load_balancers:\n if load_balancer[\"DNSName\"] == dns_name:\n return cls.from_boto_dict(load_balancer)\n\n raise ELBNotFound(dns_name)", "def _load_file(name):\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)", "def load(self, skillName):\r\n es.load(\"%s/skills/%s\" % (info.basename, skillName))", "def test_get_hyperflex_health_list(self):\n pass", "async def check_health():\n return {\"healthy\": True}", "def add_load_location(self, location_path, location_name):\n self.load_locations[location_name] = location_path", "def health_check(self, *, scope: Scope) -> HealthCheckStatus:", "def run(name, **kwargs):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": \"Check succeeded.\",\n }\n\n try:\n result = __salt__[name](**kwargs)\n except KeyError:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Module '{}' does not exist.\".format(name)\n except CheckError as exc:\n ret[\"result\"] = False\n ret[\"comment\"] = str(exc)\n\n if result:\n ret[\"comment\"] = result\n\n return ret", "def loadTestsFromNames(self, names, module=None):\r\n suites = [self.loadTestsFromName(name, module) for name in names]\r\n return self.suiteClass(suites)", "def test_simple_health_check(self):\n response = self.client.open(\n '/awadallah/VaultsManager/1.0.0/health',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def api_healthcheck(self) -> Optional[Exception]:\n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc", "def load(name, **options):\n\n return get_component(CachingPackage.COMPONENT_NAME).load(name, **options)", "def test_health(self):\n self.assert_request('get', '/_health')", "def prechecks(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(hostnames=hostnames,\n servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.precheck(hostnames, servicenames)\n return Job(ansible_job)", "def health_check():\n return dict(api_status='OK')", "def healthcheck(url):\n try:\n r = requests.get('http://localhost:5000/healthcheck')\n output = r.json()\n _ = output['Success']\n return True\n except:\n return False", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def import_load(pkg, name):\n def loader():\n mod = importlib.import_module(pkg)\n return getattr(mod, name)\n return loader", "def find(self, name):\n path = self.directory.joinpath(name).with_suffix('.yaml')\n if path.is_file():\n return self.from_path(path)\n raise LookupError(\"Job {} does not exist\".format(repr(name)))", "def __init__(self, name, dir='.'):\n try:\n full_name = os.path.join(dir, name + '.task1')\n self.load(full_name)\n except Exception as e:\n print('Failed to load \"{}\"'.format(full_name))\n print(e)", "def pre_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def load(name):\n\n clovr = pymongo.Connection().clovr\n clusters = clovr.clusters\n instances = clovr.instances\n \n cluster = clusters.find_one(dict(name=name))\n if not cluster:\n raise ClusterDoesNotExist(name)\n\n\n return cluster", "def test_health_checks_constructed(self):\n\n node = Node(\n {\n 'healthchecks': [\n {\n 'command': '/some/basic/example',\n 'on_failure': None,\n 'on_failure_even_if_security_violation': False\n },\n\n {\n 'command': '/some/basic/example',\n 'on_failure': '/some/rescue-command',\n 'on_failure_even_if_security_violation': True\n },\n\n {\n 'command': '/some/basic/example'\n }\n ]\n },\n {},\n mock.Mock()\n )\n\n self.assertEqual(3, len(node.get_health_checks()))", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def check(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.check(hostnames, servicenames)\n return Job(ansible_job)", "def loadconfig(self, ref):\n mod, var = ref.rsplit(\".\", 1)\n mod = importlib.reload(importlib.import_module(mod))\n jobs = getattr(mod, var)\n return jobs", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def get_healthchecks(\n self, service_namespace_config: ServiceNamespaceConfig\n ) -> List[HealthcheckDict]:\n\n mode = self.get_healthcheck_mode(service_namespace_config)\n\n graceperiodseconds = self.get_healthcheck_grace_period_seconds()\n intervalseconds = self.get_healthcheck_interval_seconds()\n timeoutseconds = self.get_healthcheck_timeout_seconds()\n maxconsecutivefailures = self.get_healthcheck_max_consecutive_failures()\n\n if mode == \"http\" or mode == \"https\":\n http_path = self.get_healthcheck_uri(service_namespace_config)\n protocol = f\"MESOS_{mode.upper()}\"\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": protocol,\n \"path\": http_path,\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"tcp\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"TCP\",\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"cmd\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"COMMAND\",\n \"command\": self.get_healthcheck_cmd(),\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode is None:\n healthchecks = []\n else:\n raise InvalidHealthcheckMode(\n \"Unknown mode: %s. Only acceptable healthcheck modes are http/https/tcp/cmd\"\n % mode\n )\n return healthchecks", "def get_check_file(group, name):\n return os.path.join(get_checks_path(), group, name + \".py\")", "def load_regimes_from_file(self, file_name):\n self.regime_file_name = os.path.split(file_name)[-1][:-5]\n self._logger.info(u\"loading regime file: {0}\".format(self.regime_file_name))\n with open(file_name.encode(\"utf-8\"), \"r\") as f:\n self._regimes += yaml.load(f)\n\n self._update_regime_list()\n\n if self._regimes:\n self.actExecuteRegimes.setDisabled(False)\n\n self._logger.info(\"loaded {} regimes\".format(len(self._regimes)))\n self.statusBar().showMessage(u\"loaded {} regimes.\".format(len(self._regimes)), 1000)\n return", "def load_yaml(yaml_name):\n print('training network configuration file is {0}'.format(yaml_name))\n util.check_file_exist(yaml_name)\n config = util.load_yaml_file(yaml_name)\n return config", "def load(self, eng):\n eng.eval(\"load_system('simulink_househeat')\", nargout=0)", "def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)", "def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()", "def get_healthcheck() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n cur.execute(\"SELECT * FROM events.healthchecks\")\n data = cur.fetchall()\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )", "def check_name(self, name, bots_path):\n if not os.path.exists(bots_path + name):\n raise ValueError(\"Could not find bot '{bot}'\".format(bot=bots_path + name))\n else:\n self.name = name\n self.path = bots_path + name + os.sep", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))", "def testLoadConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('postsubmit'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball-power'))", "def load(loggingQueue, name): \n\tlogger = ThreadsafeLogger(loggingQueue, '{0}-{1}'.format(name, 'ConfigLoader'))\n\tthisConfig = {}\n\tconfigParser = configparser.ConfigParser()\n\n\tthisConfig = loadSecrets(thisConfig, logger, configParser)\n\tthisConfig = loadModule(thisConfig, logger, configParser)\n\treturn thisConfig", "def load_file(name):\n path = os.path.abspath(os.path.dirname(__file__))\n filename = os.path.join(path, \"tests/input\", name)\n with open(filename, \"r\") as f:\n data = f.read()\n return data", "def health_check():\n return \"Comet-API\"", "def health_check(cls):\n cb = cls.CACHE_BACKEND()\n return cb.health_check()", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def get_health_check(self):\n return util.create_response(output=\"OK\")", "def load(self, file_name):\n\n self._state.load(file_name)", "def load_app(self, name):\n LOG.debug(_(\"Loading app %(name)s from %(path)s\") %\n {'name': name, 'path': self.config_path})\n return deploy.loadapp(\"config:%s\" % self.config_path, name=name)", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")" ]
[ "0.6285826", "0.5823373", "0.5733951", "0.56802326", "0.56788814", "0.56367904", "0.5605188", "0.5547318", "0.5484213", "0.5442093", "0.5427454", "0.53923076", "0.53107464", "0.5194388", "0.51722986", "0.51498437", "0.5135828", "0.511639", "0.5114361", "0.50697315", "0.504881", "0.5044498", "0.502523", "0.5025077", "0.49866137", "0.4954228", "0.49527237", "0.4944374", "0.49270475", "0.4920355", "0.49190506", "0.4901816", "0.48945218", "0.4882373", "0.48725334", "0.4869944", "0.4869944", "0.48682314", "0.4857195", "0.4844105", "0.48415506", "0.48379338", "0.48232058", "0.48135218", "0.4805058", "0.47850198", "0.47720325", "0.47710395", "0.47705626", "0.47605538", "0.47535503", "0.47444186", "0.47421595", "0.47376552", "0.47349745", "0.47307217", "0.47122097", "0.46865734", "0.46791187", "0.46728203", "0.46727687", "0.46693385", "0.46519688", "0.46501124", "0.4649094", "0.46463996", "0.46395636", "0.46183062", "0.46090683", "0.4603056", "0.4601115", "0.4591706", "0.4584959", "0.4583704", "0.4582089", "0.45760846", "0.4570678", "0.4570575", "0.45652914", "0.4563495", "0.45634213", "0.4556972", "0.45567086", "0.45529553", "0.4544204", "0.45401993", "0.45360744", "0.45277688", "0.4525313", "0.45248905", "0.45151377", "0.45103514", "0.45012978", "0.44859752", "0.44816923", "0.44785315", "0.44723564", "0.44709945", "0.44708747", "0.44708747" ]
0.65243924
0
Load healthchecks from names.
def loadTestsFromNames(self, names, module=None): suite = super(HealthCheckLoader, self).loadTestsFromNames(names, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def load(name):\n return []", "def loadTestsFromNames(self, names, module=None):\r\n suites = [self.loadTestsFromName(name, module) for name in names]\r\n return self.suiteClass(suites)", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def loadFirsts(self, names):\n\n if os.path.exists(names):\n self.firsts, self.w_firsts = self.load(names)\n else:\n self.firsts = [names]\n self.w_firsts = None\n\n return", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute", "def load_all_extensions(self, names):\n loaded = True\n for name in names:\n if not self.load(name):\n loaded = False\n return loaded", "def prechecks(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(hostnames=hostnames,\n servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.precheck(hostnames, servicenames)\n return Job(ansible_job)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def checks(self, all=False):\n if all:\n warn_states = [\"unknown\", \"passing\", \"warning\", \"critical\"]\n else:\n warn_states = [\"unknown\", \"warning\", \"critical\"]\n checks = {}\n for warn_state in warn_states:\n for state in self.consul.health.state(warn_state):\n if not state['Node'] in checks:\n checks[state['Node']] = dict()\n if not state['ServiceID'] in checks[state['Node']]:\n checks[state['Node']][state['ServiceID']] = {\n 'checks': [],\n 'name': state['ServiceName']\n }\n checks[state['Node']][state['ServiceID']]['checks'].append(\n (state['Name'], state['Status'], state['Output'])\n )\n return checks", "def _check_availability(self, names: Iterable) -> None:\n unavailable = [x for x in names if x not in self.__by_name.keys()]\n if unavailable:\n raise ValueError(f'datasets: {unavailable} not available in the {self.region} region.')", "def get_healthchecks(\n self, service_namespace_config: ServiceNamespaceConfig\n ) -> List[HealthcheckDict]:\n\n mode = self.get_healthcheck_mode(service_namespace_config)\n\n graceperiodseconds = self.get_healthcheck_grace_period_seconds()\n intervalseconds = self.get_healthcheck_interval_seconds()\n timeoutseconds = self.get_healthcheck_timeout_seconds()\n maxconsecutivefailures = self.get_healthcheck_max_consecutive_failures()\n\n if mode == \"http\" or mode == \"https\":\n http_path = self.get_healthcheck_uri(service_namespace_config)\n protocol = f\"MESOS_{mode.upper()}\"\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": protocol,\n \"path\": http_path,\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"tcp\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"TCP\",\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"cmd\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"COMMAND\",\n \"command\": self.get_healthcheck_cmd(),\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode is None:\n healthchecks = []\n else:\n raise InvalidHealthcheckMode(\n \"Unknown mode: %s. Only acceptable healthcheck modes are http/https/tcp/cmd\"\n % mode\n )\n return healthchecks", "def xontribs_load(names, verbose=False):\n ctx = builtins.__xonsh__.ctx\n res = ExitCode.OK\n for name in names:\n if verbose:\n print(\"loading xontrib {0!r}\".format(name))\n try:\n update_context(name, ctx=ctx)\n except Exception:\n res = ExitCode.INIT_FAILED\n print_exception(\"Failed to load xontrib {}.\".format(name))\n if hasattr(update_context, \"bad_imports\"):\n res = ExitCode.NOT_FOUND\n prompt_xontrib_install(update_context.bad_imports)\n del update_context.bad_imports\n return res", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "def load_sql_rules():\n logger.info('Loading SQL-based validation rules')\n SQLLoader.load_sql(\"sqlRules.csv\")\n logger.info('Loading non-SQL-based validation labels')\n LabelLoader.load_labels(\"validationLabels.csv\")", "def test_get_hyperflex_health_list(self):\n pass", "def do_health_checks(self, list_of_ips):\n # Calculate a decent overall timeout time for a ping attempt: 3/4th of\n # the monitoring interval. That way, we know we're done with this ping\n # attempt before the next monitoring attempt is started.\n ping_timeout = self.get_monitor_interval() * 0.75\n\n # Calculate a decent number of retries. For very short intervals we\n # shouldn't have any retries, for very long ones, we should have\n # several ones. Converting the timeout to an integer gives us what we\n # want: For timeouts less than 1 we have no retry at all.\n num_retries = int(ping_timeout)\n\n try:\n self.ping_count += len(list_of_ips)\n responses, no_responses = multiping.multi_ping(\n list_of_ips, ping_timeout, num_retries)\n self.update_stats(responses, no_responses)\n\n except Exception as e:\n logging.error(\"Exception while trying to monitor servers: %s\" %\n str(e))\n # Need to assume all IPs failed\n no_responses = list_of_ips\n\n return no_responses, [] # return empty list for questionable IPs", "def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def load_aliases():\n for name in COMMANDS:\n load_alias(name)", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def check(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.check(hostnames, servicenames)\n return Job(ansible_job)", "def test_health_checks_constructed(self):\n\n node = Node(\n {\n 'healthchecks': [\n {\n 'command': '/some/basic/example',\n 'on_failure': None,\n 'on_failure_even_if_security_violation': False\n },\n\n {\n 'command': '/some/basic/example',\n 'on_failure': '/some/rescue-command',\n 'on_failure_even_if_security_violation': True\n },\n\n {\n 'command': '/some/basic/example'\n }\n ]\n },\n {},\n mock.Mock()\n )\n\n self.assertEqual(3, len(node.get_health_checks()))", "def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}", "def Load(self, vms, workloads=None, load_kwargs=None):\n if FLAGS.ycsb_skip_load_stage:\n return []\n\n workloads = workloads or GetWorkloadFileList()\n load_samples = []\n assert workloads, 'no workloads'\n\n def _HasInsertFailures(result_samples):\n for s in result_samples:\n if s.metric == _INCOMPLETE_LOADING_METRIC.value and s.value > 0:\n return True\n return False\n\n if FLAGS.ycsb_reload_database or not self.loaded:\n load_samples += list(\n self._LoadThreaded(vms, workloads[0], **(load_kwargs or {}))\n )\n if _SHOULD_FAIL_ON_INCOMPLETE_LOADING.value and _HasInsertFailures(\n load_samples\n ):\n raise errors.Benchmarks.RunError(\n 'There are insert failures, so the table loading is incomplete'\n )\n\n self.loaded = True\n if FLAGS.ycsb_sleep_after_load_in_sec > 0:\n logging.info(\n 'Sleeping %s seconds after load stage.',\n FLAGS.ycsb_sleep_after_load_in_sec,\n )\n time.sleep(FLAGS.ycsb_sleep_after_load_in_sec)\n if FLAGS.ycsb_load_samples:\n return load_samples\n else:\n return []", "def _load_breaks(self):\n for (filename, lineno) in Breakpoint.bplist.keys():\n self._add_to_breaks(filename, lineno)", "def loadHandlersFrom(self, name):\n try:\n module = __import__(name)\n except Exception as err:\n raise BadHandler(\"Failed import: %s\" % err)\n #\n # Look for handler classes\n for cls_name in dir(module):\n object = getattr(module, cls_name)\n try:\n is_handler = issubclass(object, BaseTreeHandler)\n except TypeError:\n is_handler = False\n if is_handler:\n g.es(\"... found handler '%s'\" % (cls_name,), color=\"blue\")\n self.handlers[cls_name.lower()] = object", "def load_multiple(*args, _all=False):\n\n if _all:\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n for key in NAME_SHELF.keys():\n yield load(key)\n else:\n for arg in args:\n yield load(arg)", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def test_health_get(self):\n pass", "def load_configs(self, filenames: List[str]) -> None:\n for filename in filenames:\n try:\n if self.load_config(filename):\n return\n except Exception as exc:\n click.echo(str(exc))", "def load( self, *filenames ):\n try:\n self.SetModel( pstatsloader.PStatsLoader( *filenames ) )\n except (IOError,OSError,ValueError), err:\n self.SetStatusText( \n _('Failure during load of %(filenames)s: %(err)s'\n )%dict( \n filenames=\" \".join( [repr(x) for x in filenames] ), \n err=err \n ) )", "def testLoadConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('postsubmit'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball-power'))", "def load_names() -> list:\n with open(Path(\"bot/resources/pride/drag_queen_names.json\"), \"r\", encoding=\"utf8\") as f:\n return json.load(f)", "def load(self, name):\n # ext = os.path.splitext(name)[1]\n # if ext == '.mat':\n # self.load_matlab(name)\n # else:\n # self.load_pkl(name)\n self.load_pkl(name)\n nhashes = sum(self.counts)\n # Report the proportion of dropped hashes (overfull table)\n dropped = nhashes - sum(np.minimum(self.depth, self.counts))\n print(\"Read fprints for\", sum(n is not None for n in self.names),\n \"files (\", nhashes, \"hashes) from\", name,\n \"(%.2f%% dropped)\" % (100.0 * dropped / max(1, nhashes)))", "def check_consul_services(con):\n whitelist = get_whitelist(con)\n\n if whitelist:\n LOG.warning(\"Checks from the following hosts will be ignored, \" +\n \"because service/rebootmgr/ignore_failed_checks is set: {}\".format(\", \".join(whitelist)))\n\n local_checks = get_local_checks(con, tags=[\"rebootmgr\"])\n LOG.debug(\"relevant_checks: %s\" % local_checks)\n\n for name, check in get_failed_cluster_checks(con, local_checks).items():\n if check[\"Node\"] in whitelist:\n continue\n\n LOG.error(\"There were failed consul checks. Exit\")\n sys.exit(EXIT_CONSUL_CHECKS_FAILED)\n\n LOG.info(\"All checks passed\")", "def register_startup_checks_extra():\n startup_checks_extra = getattr(settings, RELATE_STARTUP_CHECKS_EXTRA, None)\n if startup_checks_extra is not None:\n if not isinstance(startup_checks_extra, (list, tuple)):\n raise ImproperlyConfigured(\n INSTANCE_ERROR_PATTERN\n % {\"location\": RELATE_STARTUP_CHECKS_EXTRA,\n \"types\": \"list or tuple\"\n }\n )\n for c in startup_checks_extra:\n try:\n check_item = import_string(c)\n except Exception as e:\n raise ImproperlyConfigured(\n GENERIC_ERROR_PATTERN\n % {\n \"location\": RELATE_STARTUP_CHECKS_EXTRA,\n \"error_type\": type(e).__name__,\n \"error_str\": str(e)\n })\n else:\n register(check_item, RELATE_STARTUP_CHECKS_EXTRA_TAG)", "def load_test_users():\n return [load_test_angel(), load_test_troublemaker(), load_test_rebel()]", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()", "def _load_tests(self):\n tests = {\"enabled\":defaultdict(list),\n \"disabled\":defaultdict(list)}\n\n for test_path, test_type, test in self.iter_tests():\n enabled = not test.disabled()\n if not self.include_https and test.environment[\"protocol\"] == \"https\":\n enabled = False\n key = \"enabled\" if enabled else \"disabled\"\n tests[key][test_type].append(test)\n\n self.tests = tests[\"enabled\"]\n self.disabled_tests = tests[\"disabled\"]", "def _load(name, paths):\n for base_path in paths:\n parts = name.split('.')\n number_of_parts = len(parts)\n\n for folder_parts in range(number_of_parts):\n folder = os.path.join(base_path, *parts[:folder_parts])\n filename = '.'.join(parts[folder_parts:]) + '.json'\n json_path = os.path.join(folder, filename)\n\n if os.path.isfile(json_path):\n with open(json_path, 'r') as json_file:\n LOGGER.debug('Loading %s from %s', name, json_path)\n return json.load(json_file)", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def test_fake_health_get(self):\n pass", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def __load__(self, name):\n raise KeyError(name)", "def _load_services(self) -> None:\n # load default services\n self.service_errors = ServiceManager.load_locals()\n # load custom services\n service_paths = self.config.get(\"custom_services_dir\")\n logger.debug(\"custom service paths: %s\", service_paths)\n if service_paths is not None:\n for service_path in service_paths.split(\",\"):\n service_path = Path(service_path.strip())\n custom_service_errors = ServiceManager.add_services(service_path)\n self.service_errors.extend(custom_service_errors)\n # load default config services\n self.service_manager.load_locals()\n # load custom config services\n custom_dir = self.config.get(\"custom_config_services_dir\")\n if custom_dir is not None:\n custom_dir = Path(custom_dir)\n self.service_manager.load(custom_dir)", "def _load_jobs(self):\n\n jobs = self.app.config.get('SCHEDULER_JOBS')\n job_stores = self.app.config.get('SCHEDULER_JOBSTORES')\n\n if not jobs:\n jobs = self.app.config.get('JOBS')\n\n if jobs:\n if job_stores:\n self.reload_jobs(jobs=jobs)\n else:\n for job in jobs:\n self.add_job(**job)", "def load(steps):\n loaded = []\n for s in steps:\n try:\n s.load()\n loaded.append(s)\n except:\n logging.warn('Error during step load:\\n%s' %\n util.indent(traceback.format_exc()))\n pass\n return loaded", "def load_plugins(self, plugin_list):\n for plugin_name in plugin_list:\n self.load_plugin(plugin_name)", "def bulk_load(self, usernames):\n\n # First we make sure the list is unique\n usernames = set(usernames)\n users = []\n for user in self.db_session.query(User).filter(User.username.in_(usernames)):\n try:\n usernames.remove(user.username)\n except:\n log.exception('Exception caught while removing {0} from the usernames list'.format(user.username))\n self.data[user.username] = user\n users.append(user)\n\n for username in usernames:\n # New user!\n user = User(username=username)\n self.db_session.add(user)\n self.data[username] = user\n users.append(user)\n\n return users", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def load_existing_names(self, region_name, types=None):\n if types:\n existing_places = self._request_typed_existing(region_name, types)\n else:\n existing_places = self._request_existing(region_name)\n place_names = []\n for place in existing_places:\n place_names.extend(self._find_names(place))\n return [self._sanitise_name(place) for place in place_names]", "def _load_support(name):\n curr = P.dirname(P.abspath(__file__))\n with open(P.join(curr, \"data\", \"%s.yml\" % name)) as fin:\n return yaml.full_load(fin)", "def load_scenario(self, name):\n if name[:-5] != \".json\":\n name += \".json\"\n scr_path = os.path.dirname(os.path.abspath(__file__))\n f_path = os.path.join(scr_path, \"scenarios\", name)\n if not os.path.exists(f_path):\n raise IOError\n f = open(f_path, 'r')\n sc = Json(f)\n self._loaded_sc = sc\n self._scenario_script_elements = len(sc[\"script\"])\n self._scenario_script_cur = 0", "def do_test_discovery(classes):\n _tests = []\n _loader = loader.TestLoader()\n for classname in classes:\n if \":\" in classname:\n cls, method = classname.split(\":\")\n getattr(importutils.import_class(cls), method)\n else:\n suite_class = _loader.loadTestsFromTestClass(\n importutils.import_class(classname))\n for test_case in suite_class._precache:\n _tests.append(classname + ':' + str(\n test_case).split(' ')[0])\n return _tests", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def load(self):\n del self[0:len(self)]\n\n if not os.path.isfile(self.path):\n self.log.debug('No such file: {}'.format(self.path))\n return\n\n for line in [l.rstrip() for l in open(self.path, 'r').readlines()]:\n if line.startswith('#') or line.strip() == '':\n continue\n\n # Strip list of hosts from line\n hosts, key = line.split(None, 1)\n hosts = hosts.split(',')\n\n try:\n key = KnownHostsEntry(key)\n if key not in self:\n self.append(key)\n else:\n key = self[self.index(key)]\n key.add_hosts(hosts)\n except SSHKeyError:\n pass", "def _load_data_tracks(file_names, load_func=load_gpx_file):\n tracks = {}\n with concurrent.futures.ProcessPoolExecutor() as executor:\n future_to_file_name = {\n executor.submit(load_func, file_name): file_name\n for file_name in file_names\n }\n for future in concurrent.futures.as_completed(future_to_file_name):\n file_name = future_to_file_name[future]\n try:\n t = future.result()\n except TrackLoadError as e:\n log.error(f\"Error while loading {file_name}: {e}\")\n else:\n tracks[file_name] = t\n return tracks", "def load_devices_names_file(self):\n try:\n with open(self._path, \"r\") as infile:\n logger.debug(\"Loading devices names from <%s>\", self._path)\n self._devices_names = yaml.safe_load(infile) or {}\n except yaml.YAMLError as error:\n logger.error(\"In devices file %s: %s\", self._path, error)\n raise DevicesNamesConfigLoadError()\n except Exception:\n logger.error(\n \"Could not load device names config <%s>, a new one will be created after successfull start\",\n self._path,\n )", "def test_list_healthmonitors_sort(self):\r\n resources = \"health_monitors\"\r\n cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def shelve_load(file_name: str, *args):\n res = {}\n with shelve.open(os.path.splitext(file_name)[0]) as db:\n for k, v in db.items():\n res[k] = v\n return res", "def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')", "def loadTestsFromTestCase(self, testCaseClass):\n suite = super(HealthCheckLoader, self).loadTestsFromTestCase(\n testCaseClass)\n return self.filter_suite(suite)", "def get_all(self, name=None, label_selector=None):\n # type: (Optional[str], Optional[str]) -> List[BoundLoadBalancer]\n return super(LoadBalancersClient, self).get_all(\n name=name, label_selector=label_selector\n )", "def load_category_names(category_names_file): \n\n with open(category_names_file, 'r') as f:\n return json.load(f)", "def load_probes(probe_file):\n probes = common.read_file(probe_file)\n probe_list = list(filter(None, probes))\n return probe_list", "def load_test_bots(self):\n records = [\n ('0.0.0.0', self.create_past_date(5), self.create_past_date(4), self.create_past_date(3), 32, 'A status message'),\n ('0.0.0.1', self.create_past_date(0), self.create_past_date(0), self.create_past_date(0), 32, 'A status message'),\n ('0.0.0.2', self.create_past_date(7), self.create_past_date(6), self.create_past_date(5), 32, 'A status message'),\n ]\n for record in records:\n self.db_mgr.exec_cmd('''insert into bot_status (ip, last_startup_time, \n last_activity_time, last_shutdown_time,\n port, message) VALUES (%s, %s, %s, %s, %s, %s)''',\n *record)\n return [r[0] for r in records]", "def pre_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def _load(mapping, **keys):\n return keys[\"loader\"](mapping, **keys)", "def load_data(self, inventory, group_names = None, site_names = None, dataset_names = None):\n\n raise NotImplementedError('load_data')", "def serialize(healthchecks):\n data = {}\n for hc in healthchecks:\n data[hc.name] = hc._HealthCheck__data\n data[hc.name][\"Script\"] = hc.script\n data[hc.name][\"Interval\"] = hc.interval\n data[hc.name][\"Timeout\"] = hc.timeout\n data[hc.name][\"KillCountLimit\"] = hc.kill_count_limit\n data[hc.name][\"KillExitCodes\"] = hc.kill_exit_codes\n return data", "async def check_health():\n return {\"healthy\": True}", "async def _load_hsm_status(self) -> None:\n hsm: Dict[str, str] = await self._api_request(\"hsm\")\n _LOGGER.debug(\"Loaded hsm status\")\n self._hsm_status = hsm[\"hsm\"]", "def load_resources(resource_filename):", "def load_status_table():", "def health_checks(self) -> Dict[str, str]:\n try:\n self.get_object_information('/')['ResponseMetadata']['HTTPStatusCode']\n except Exception:\n return dict(clouddirectory_health_status='unhealthy')\n else:\n return dict(clouddirectory_health_status='ok')", "def testLoadConfigs(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('cluster1'))\n self.assertIsNotNone(pool.GetHostConfigs('cluster2'))", "def load_services_files(yaml_files):\n loaded = {}\n for yaml_file in yaml_files:\n try:\n loaded[yaml_file] = load_yaml(yaml_file)\n except FileNotFoundError:\n loaded[yaml_file] = {}\n\n return loaded", "def reloadcheck(self, event):\n plugloaded = []\n target = event.cbtype or event.cmnd\n try:\n from boot import getcallbacktable \n p = getcallbacktable()[target]\n except KeyError:\n logging.debug(\"botbase - can't find plugin to reload for %s\" % event.cmnd)\n return\n logging.debug(\"%s - checking %s\" % (self.name, unicode(p)))\n for name in p:\n if name in self.plugs: continue\n if name in default_plugins: pass\n elif self.cfg.blacklist and name in self.cfg.blacklist: continue\n elif self.cfg.loadlist and name not in self.cfg.loadlist: continue\n logging.warn(\"%s - on demand reloading of %s\" % (self.name, name))\n try:\n mod = self.plugs.reload(name, force=True, showerror=False)\n if mod: plugloaded.append(mod) ; continue\n except Exception, ex: handle_exception(event)\n return plugloaded", "def load_vacancies(headers, filters):\r\n in_tests.test_dict_data_type(headers)\r\n print (\" Loading vacancies from hh...\")\r\n\r\n url = \"https://api.hh.ru/vacancies\"\r\n response = requests.get(url, headers=headers, params=filters)\r\n vacancies = response.json()\r\n out_tests.test_load_vacancies(response, vacancies, filters)\r\n return (vacancies)", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]", "def load_verify_locations(self, cafile: Optional[Any] = ..., dummy: Optional[Any] = ...):\n ...", "def run(self, fnames):\n logging.info(\"Starting\")\n for fname in fnames:\n linted = self.run_pylint(fname=fname)\n if linted:\n custom_ok, override_standard = self.check_custom_rules()\n override = custom_ok and override_standard\n success = self.check_no_silent_crash(override=override)\n if success:\n self.eval_results(custom_ok, override)\n exit_code = self.report_results()\n if not self.keep_results:\n self.clean_up()\n sys.exit(exit_code)", "def update_health(self, health):\n session = db_api.get_session()\n\n # We need to see if all of the listeners are reporting in\n expected_listener_count = 0\n lbs_on_amp = self.amphora_repo.get_all_lbs_on_amphora(session,\n health['id'])\n for lb in lbs_on_amp:\n listener_count = self.listener_repo.count(session,\n load_balancer_id=lb.id)\n expected_listener_count += listener_count\n\n listeners = health['listeners']\n\n # Do not update amphora health if the reporting listener count\n # does not match the expected listener count\n if len(listeners) == expected_listener_count:\n\n # if the input amphora is healthy, we update its db info\n self.amphora_health_repo.replace(session, health['id'],\n last_update=(datetime.\n datetime.utcnow()))\n else:\n LOG.warning('Amphora %(id)s health message reports %(found)i '\n 'listeners when %(expected)i expected',\n {'id': health['id'], 'found': len(listeners),\n 'expected': expected_listener_count})\n\n # We got a heartbeat so lb is healthy until proven otherwise\n lb_status = constants.ONLINE\n\n # update listener and nodes db information\n for listener_id, listener in listeners.items():\n\n listener_status = None\n # OPEN = HAProxy listener status nbconn < maxconn\n if listener.get('status') == constants.OPEN:\n listener_status = constants.ONLINE\n # FULL = HAProxy listener status not nbconn < maxconn\n elif listener.get('status') == constants.FULL:\n listener_status = constants.DEGRADED\n if lb_status == constants.ONLINE:\n lb_status = constants.DEGRADED\n else:\n LOG.warning(('Listener %(list)s reported status of '\n '%(status)s'), {'list': listener_id,\n 'status': listener.get('status')})\n\n try:\n if listener_status is not None:\n self._update_status_and_emit_event(\n session, self.listener_repo, constants.LISTENER,\n listener_id, listener_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Listener %s is not in DB\", listener_id)\n\n pools = listener['pools']\n for pool_id, pool in pools.items():\n\n pool_status = None\n # UP = HAProxy backend has working or no servers\n if pool.get('status') == constants.UP:\n pool_status = constants.ONLINE\n # DOWN = HAProxy backend has no working servers\n elif pool.get('status') == constants.DOWN:\n pool_status = constants.ERROR\n lb_status = constants.ERROR\n else:\n LOG.warning(('Pool %(pool)s reported status of '\n '%(status)s'), {'pool': pool_id,\n 'status': pool.get('status')})\n\n members = pool['members']\n for member_id, status in members.items():\n\n member_status = None\n if status == constants.UP:\n member_status = constants.ONLINE\n elif status == constants.DOWN:\n member_status = constants.ERROR\n if pool_status == constants.ONLINE:\n pool_status = constants.DEGRADED\n if lb_status == constants.ONLINE:\n lb_status = constants.DEGRADED\n elif status == constants.NO_CHECK:\n member_status = constants.NO_MONITOR\n else:\n LOG.warning('Member %(mem)s reported status of '\n '%(status)s', {'mem': member_id,\n 'status': status})\n\n try:\n if member_status is not None:\n self._update_status_and_emit_event(\n session, self.member_repo, constants.MEMBER,\n member_id, member_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Member %s is not able to update \"\n \"in DB\", member_id)\n\n try:\n if pool_status is not None:\n self._update_status_and_emit_event(\n session, self.pool_repo, constants.POOL,\n pool_id, pool_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Pool %s is not in DB\", pool_id)\n\n # Update the load balancer status last\n # TODO(sbalukoff): This logic will need to be adjusted if we\n # start supporting multiple load balancers per amphora\n lb_id = self.amphora_repo.get(\n session, id=health['id']).load_balancer_id\n if lb_id is not None:\n try:\n self._update_status_and_emit_event(\n session, self.loadbalancer_repo,\n constants.LOADBALANCER, lb_id, lb_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Load balancer %s is not in DB\", lb_id)", "def load_targets(self):\n ldap_services = []\n if self.ldap:\n ldap_services = self.search.get_services(ports=[389], up=True)\n\n self.ldap_strings = [\"ldap://{}\".format(service.address) for service in ldap_services]\n self.services = self.search.get_services(tags=['smb_signing_disabled'])\n self.ips = [str(service.address) for service in self.services]" ]
[ "0.6379476", "0.59809893", "0.58267564", "0.5661063", "0.56127495", "0.5603886", "0.5571648", "0.55102366", "0.5353344", "0.5339407", "0.53231657", "0.5272784", "0.5235252", "0.5182474", "0.5097241", "0.5064218", "0.5057405", "0.504696", "0.5042437", "0.5037706", "0.49992388", "0.49555063", "0.4928204", "0.49271446", "0.49230534", "0.49207217", "0.49136704", "0.4906005", "0.49029723", "0.49004737", "0.48986575", "0.48986575", "0.48884487", "0.4843654", "0.4839728", "0.4837064", "0.48321226", "0.4818554", "0.48178712", "0.48136267", "0.48018217", "0.47827038", "0.4761589", "0.47562054", "0.4749361", "0.4744186", "0.47376427", "0.47248834", "0.47189116", "0.4717096", "0.4708128", "0.4706801", "0.47051847", "0.4685169", "0.46784642", "0.4667245", "0.4657661", "0.46532598", "0.4643211", "0.46402058", "0.4637782", "0.4614442", "0.46065864", "0.4605385", "0.4604491", "0.45995113", "0.4599488", "0.45921567", "0.45872396", "0.45740882", "0.45737237", "0.457122", "0.45667553", "0.45605677", "0.4559671", "0.45596114", "0.45581165", "0.4557089", "0.45549762", "0.45528412", "0.45469502", "0.4538305", "0.4538236", "0.45332006", "0.45230785", "0.452217", "0.4521761", "0.45100057", "0.4506156", "0.4502715", "0.44945854", "0.44940528", "0.4489445", "0.4489343", "0.44846871", "0.44767857", "0.44691244", "0.44654492", "0.4461839", "0.44571498" ]
0.7157677
0
Validate the public key if it is related to the given EC curve and formats the public key to a uncompressed byte string. Afterwards the function create a hash value of the uncompressed public key value
def get_public_key_fingerprint(curve: object, temp_public_key: object) \ -> object: vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve) uncompressed_pub_key = vk.to_string('uncompressed') pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key) return pub_key_hash_fingerprint.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def get_public_compressed_curve_point(private_key):\n encoded_point = private_key.public_key().public_numbers().encode_point()\n return base64.b64encode(encoded_point)", "def convert_public_key_to_ecdsa(self, public_key):\n return PublicKey.fromPem('\\n-----BEGIN PUBLIC KEY-----\\n'+public_key+'\\n-----END PUBLIC KEY-----\\n')", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "def test_public_key_ec(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_EC)", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def import_public_key(self, hex_bytes: str) -> str:\n return self.context.post(\n \"/dsum/public_key\", {\"key\": hex_bytes}, None, \"DSum: failed importing a Curve 25519 public key\")['uid']", "def public_key_to_address(public_key):\n\toutput = []\n\talphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n\tvar = hashlib.new('ripemd160')\n\tencoding = binascii.unhexlify(public_key.encode())\n\tvar.update(hashlib.sha256(encoding).digest())\n\tvar_encoded = ('00' + var.hexdigest()).encode()\n\tdigest = hashlib.sha256(binascii.unhexlify(var_encoded)).digest()\n\tvar_hex = '00' + var.hexdigest() + hashlib.sha256(digest).hexdigest()[0:8]\n\tcount = [char != '0' for char in var_hex].index(True) // 2\n\tn = int(var_hex, 16)\n\twhile n > 0:\n\t\tn, remainder = divmod(n, 58)\n\t\toutput.append(alphabet[remainder])\n\tfor i in range(count):\n\t\toutput.append(alphabet[0])\n\treturn ''.join(output[::-1])", "def _ecdsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n\n public_key = private_key.public_key()\n serialized = public_key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n\n # The SSH agent format somehow combines the elliptic curve's\n # `x` and `y` values (in `numbers.public_numbers`) into a single\n # `Q` value. I couldn't figure the specifics out exactly, but\n # the format is used exactly the same way int the OpenSSH\n # public key format, so we'll just reuse that one instead.\n\n pk_data = b64decode(serialized.split(None,2)[1])\n content.data.extend(pk_data)\n\n # nist = self._ecdsa_nists[private_key.curve.name]\n # content.write_string('ecdsa-sha2-{}'.format(nist))\n # content.write_string(nist)\n #\n # buffer = bytearray()\n # buffer.extend(b'0x04')\n #\n # x = numbers.public_numbers.x\n # y = numbers.public_numbers.y\n # for number in [x,y]:\n # tmp = WriteMessage()\n # tmp.write_mpint(number)\n # buffer.extend(tmp.data[4:])\n\n content.write_mpint(numbers.private_value)\n return content.data", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def q_hashpubkey(abe, page, chain):\n pubkey = wsgiref.util.shift_path_info(page['env'])\n if pubkey is None:\n return \\\n \"Returns the 160-bit hash of PUBKEY.\\n\" \\\n \"For example, the Bitcoin genesis block's output public key,\" \\\n \" seen in its transaction output scriptPubKey, starts with\\n\" \\\n \"04678afdb0fe..., and its hash is\" \\\n \" 62E907B15CBF27D5425399EBF6F0FB50EBB88F18, corresponding\" \\\n \" to address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa.\\n\" \\\n \"/q/hashpubkey/PUBKEY\\n\"\n try:\n pubkey = pubkey.decode('hex')\n except Exception:\n return 'ERROR: invalid hexadecimal byte string.'\n return util.pubkey_to_hash(pubkey).encode('hex').upper()", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def h160_from_pubkey(Q: Point, compressed: bool) -> bytes:\n\n # also check that the Point is on curve\n pubkey = octets_from_point(ec, Q, compressed)\n return h160(pubkey)", "def from_public_parts(self, x: bytes, y: bytes):\n return asymmetric.ec.EllipticCurvePublicNumbers(\n int.from_bytes(x, 'big'),\n int.from_bytes(y, 'big'),\n asymmetric.ec.SECP256R1()\n ).public_key()", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def public_key_to_address(public_key: PublicKey) -> Address:\n key_bytes = public_key.format(compressed=False)\n return Address(keccak(key_bytes[1:])[-20:])", "def save_ecc_public_key(ec_public_key: EllipticCurvePublicKey, file_path: str,\n encoding: Encoding = Encoding.PEM) -> None:\n pem_data = ec_public_key.public_bytes(encoding=encoding, format=serialization.PublicFormat.SubjectPublicKeyInfo)\n with open(file_path, 'wb') as f:\n f.write(pem_data)", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def get_public_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/public_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def public_key(self):", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]", "def fingerprint_public_key_blob(blob):\n hash = sha256(blob).digest()\n encoded = b64encode(hash).decode('UTF-8').rstrip('=')\n return 'SHA256:{}'.format(encoded)", "def public_key(self) -> PublicKey:\n pass", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)", "def verify_curve(curve):\n # What follows is the implementation of the verification algorithm\n # described in \"The Elliptic Curve Digital Signature Algorithm (ECDSA)\",\n # from Certicom. There just a few difference between the original algorithm\n # and the implementation:\n #\n # * a few variable names have been changed for the sake of clarity;\n # * the document from Certicom allows arbritrary seeds with bit length\n # >= 160; here we only care about seeds that are exactly 160-bit long.\n\n if curve.seed.bit_length() > 160:\n raise VerificationFailed('seed too long')\n\n seed_bytes = curve.seed.to_bytes(length=160 // 8, byteorder='big')\n\n # Define t, s and v as specified on the document.\n t = curve.p.bit_length()\n s = (t - 1) // 160\n v = t - 160 * s\n\n # 1. Compute h = SHA-1(seed_bytes) and let c0 denote the bit string of\n # length v bits obtained by taking the v rightmost bits of h.\n h = hashlib.sha1(seed_bytes).digest()\n h = int.from_bytes(h, byteorder='big')\n\n c0 = h & ((1 << v) - 1)\n\n # 2. Let w[0] denote the bit string of length v bits obtained by setting\n # the leftmost bit of c0 to 0.\n #\n # Note: here we use 160 bit instead of v bits, as required by the document.\n # We do so to make the code easier, and because it does not make any\n # difference (see the step 6).\n w0 = c0 & ((1 << v - 1) - 1)\n w = [w0.to_bytes(length=160 // 8, byteorder='big')]\n\n # 3. Let z be the integer whose binary expansion is given by 160-bit string\n # seed_bytes.\n z = curve.seed\n\n # 4. For i from 1 to s do:\n for i in range(1, s + 1):\n # 4.1 Let s_i be 160-bit string which is the binary expansion of the\n # integer (z + i) % (2 ** g).\n z_i = ((z + i) % (2 ** 160))\n s_i = z_i.to_bytes(length=160 // 8, byteorder='big')\n\n # 4.2 Compute w_i = SHA-1(s_i).\n w_i = hashlib.sha1(s_i).digest()\n w.append(w_i)\n\n # 5. Let w be the bit string obtained by concatenating w_0,w_1,...,w_s.\n w = b''.join(w)\n\n # 6. Let c be the integer whose integer expansion is given by w.\n #\n # On step 2, we said that we used a longer bit length for the first element\n # of w. This is correct because the resulting c does not change: using 160\n # bits instead of v bits is equivalent to add some zeroes to the left of c.\n c = int.from_bytes(w, 'big')\n\n # If b ** 2 * c == a ** 3 (mod p) then accept; otherwise reject.\n if (curve.b * curve.b * c - curve.a * curve.a * curve.a) % curve.p != 0:\n raise VerificationFailed('curve verification failed')", "def validate(msg, pubkey: dict, signature):\n if signature is None:\n print(\"Signature is None. probably cause something other than a string or byte being passed to signer\")\n return False\n try:\n x_int = base64.b85decode(pubkey[\"x\"].encode())\n x_int = int.from_bytes(x_int, \"big\")\n\n y_int = base64.b85decode(pubkey[\"y\"].encode())\n y_int = int.from_bytes(y_int, \"big\")\n except KeyError:\n return False\n\n signature = signature.encode()\n signature = base64.b85decode(signature)\n\n # if it a string\n try:\n hash_of_message = SHA256.new(msg)\n except TypeError:\n hash_of_message = SHA256.new(msg.encode())\n\n try:\n pubkey = ECC.construct(point_x=x_int, point_y=y_int, curve=\"P-256\").public_key()\n verifier = DSS.new(pubkey, mode=\"fips-186-3\")\n verifier.verify(hash_of_message, signature=signature)\n except ValueError:\n return False\n else:\n return True", "def extractUncompressedPubKey(script):\n # A pay-to-compressed-pubkey script is of the form:\n # OP_DATA_65 <65-byte uncompressed pubkey> OP_CHECKSIG\n\n # All non-hybrid uncompressed secp256k1 public keys must start with 0x04.\n if (\n len(script) == 67\n and script[66] == opcode.OP_CHECKSIG\n and script[0] == opcode.OP_DATA_65\n and script[1] == 0x04\n ):\n\n return script[1:66]\n return None", "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n params = key_object['private_key_algorithm']['parameters']\n public_key = Integer(pow(\n params['g'].native,\n key_object['private_key'].parsed.native,\n params['p'].native\n ))\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n public_key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key['public_key'].native\n if public_key is None:\n # This is gross, but since the EC public key is optional,\n # and we need to load the private key and use the crypto lib\n # to get the public key, we have to import the platform-specific\n # asymmetric implementation. This is the reason a bunch of the\n # imports are module imports, so we don't get an import cycle.\n public_key_object = load_private_key(key_object).public_key\n public_key = public_key_object.asn1['public_key'].parsed.native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n if isinstance(key_object, PublicKeyInfo):\n if key_object.algorithm == 'rsa':\n key = key_object['public_key'].parsed\n\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n key = key_object['public_key'].parsed\n params = key_object['algorithm']['parameters']\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key_object['public_key'].native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n raise ValueError(pretty_message(\n '''\n key_object must be an instance of the\n asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n classes, not %s\n ''',\n type_name(key_object)\n ))", "async def client_public_key(self) -> bytes:\n raise NotImplementedError", "def extractPubKey(script):\n pubkey = extractCompressedPubKey(script)\n if pubkey:\n return pubkey\n return extractUncompressedPubKey(script)", "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def load_received_public_key_bytes(self, public_key_str):\n return self.load_received_public_key(\n VerifyingKey.from_string(public_key_str, self.curve))", "def p2pkh_address(Q: Point,\n compressed: bool,\n version: bytes = b'\\x00') -> bytes:\n\n vh160 = version + h160_from_pubkey(Q, compressed)\n return base58.encode_check(vh160)", "def _get_keyidv2(pubkey: SupportedKeyTypes) -> int:\n if isinstance(pubkey, RSAPublicKey):\n fmt = serialization.PublicFormat.PKCS1\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n elif isinstance(pubkey, EllipticCurvePublicKey):\n fmt = serialization.PublicFormat.UncompressedPoint\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962, format=fmt)\n else:\n raise UnsupportedAlgorithm(f\"Unsupported public key type {type(pubkey)}\")\n\n default_be = backends.default_backend()\n digest = hashes.Hash(hashes.SHA1(), backend=default_be)\n digest.update(pubbytes)\n keydigest = digest.finalize()\n return int.from_bytes(keydigest[16:], \"big\")", "def update_public_key(self, uid: str, hex_bytes: str) -> str:\n return self.context.put(\n \"/dsum/public_key\", {\"uid\": uid, \"key\": hex_bytes}, None,\n \"DSum: failed updating the Curve 25519 public key with uid: %s\" % uid)['uid']", "def make_public_key(prime, base, rnumber):\n\n pub_key = (base ** rnumber) % prime\n return pub_key", "def load_received_public_key(self, public_key):\n if not self.curve:\n self.curve = public_key.curve\n if self.curve != public_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.public_key = public_key", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def pubkey(self, address : str) -> str:\n account_map = \"13456789abcdefghijkmnopqrstuwxyz\"\n account_lookup = {}\n for i in range(0,32): #make a lookup table\n account_lookup[account_map[i]] = BitArray(uint=i,length=5)\n acrop_key = address[-60:-8] #leave out prefix and checksum\n number_l = BitArray() \n for x in range(0, len(acrop_key)): \n number_l.append(account_lookup[acrop_key[x]]) \n number_l = number_l[4:] # reduce from 260 to 256 bit\n result = number_l.hex.upper()\n return result", "def text2PublicKey(text:str):\n return RSA.importKey(b58decode(text))", "def extractPubKeyHash(script):\n # A pay-to-pubkey-hash script is of the form:\n # OP_DUP OP_HASH160 <20-byte hash> OP_EQUALVERIFY OP_CHECKSIG\n if (\n len(script) == 25\n and script[0] == opcode.OP_DUP\n and script[1] == opcode.OP_HASH160\n and script[2] == opcode.OP_DATA_20\n and script[23] == opcode.OP_EQUALVERIFY\n and script[24] == opcode.OP_CHECKSIG\n ):\n\n return script[3:23]\n return None", "def export_public_key(self, public_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_public_key(self.ctx, public_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPublicKey.take_c_ctx(result)\n return instance", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def public_key(self): # pragma: no cover\n raise NotImplementedError()", "def extractCompressedPubKey(script):\n # pay-to-compressed-pubkey script is of the form:\n # OP_DATA_33 <33-byte compresed pubkey> OP_CHECKSIG\n\n # All compressed secp256k1 public keys must start with 0x02 or 0x03.\n if (\n len(script) == 35\n and script[34] == opcode.OP_CHECKSIG\n and script[0] == opcode.OP_DATA_33\n and (script[1] == 0x02 or script[1] == 0x03)\n ):\n return script[1:34]\n return None", "def _ecssa_pubkey_recovery(ec: EC, hf, e: int, sig: ECSS) -> Point:\n\n r, s = to_ssasig(ec, sig)\n\n # could be obtained from to_ssasig...\n K = r, ec.yQuadraticResidue(r, True)\n\n if e == 0:\n raise ValueError(\"invalid (zero) challenge e\")\n e1 = mod_inv(e, ec.n)\n P = DblScalarMult(ec, e1*s, ec.G, -e1, K)\n assert P[1] != 0, \"how did you do that?!?\"\n return P", "async def server_public_key(self) -> bytes:\n raise NotImplementedError", "def test_get_public_key(self) -> None:\n\n expected = self.pem_public_key\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key.decode())\n\n actual = encryptor.get_public_key()\n\n self.assertEqual(expected, actual)", "def get_public_key_in_pem(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k", "def __init__(self, public_key=None):\n self.public_key = self.convert_public_key_to_ecdsa(public_key) if public_key else public_key", "def sign_ECDSA_msg(private_key, msg, curve=ecdsa.SECP256k1):\n bmessage = msg.encode()\n sk = ecdsa.SigningKey.from_string(bytes.fromhex(private_key), curve=curve)\n signature = base64.b64encode(sk.sign(bmessage))\n return signature, message", "def test_private_key_ec(self):\n priv = \"\"\"-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIJZ57L6f6ywtZa7VhsvthAShxjdrL9EIrVwVgxnmD5b3oAoGCCqGSM49\nAwEHoUQDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALsbu2xNCDBXJ0IJ4Sd\n3u4G1qvrKX0mBHd7yUPGui+7bvp084mNag==\n-----END EC PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_EC)", "def recipient_public_key(self):", "def gen_public_key(g, private, p):\n return pow(g, private, p)", "def get_public_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def payToPubKeyScript(serializedPubKey):\n if not isStrictPubKeyEncoding(serializedPubKey):\n raise DecredError(f\"serialized pubkey has incorrect encoding\")\n script = ByteArray(\"\")\n script += addData(serializedPubKey)\n script += opcode.OP_CHECKSIG\n return script", "def derive_classic_address(public_key: str) -> str:\n account_id = get_account_id(bytes.fromhex(public_key))\n return addresscodec.encode_classic_address(account_id)", "def ecdsa_verify(G, pub_verify, message, sig):\n plaintext = message.encode(\"utf8\")\n digest = sha256(plaintext).digest()\n res = do_ecdsa_verify(G,pub_verify,sig,digest) \n\n return res", "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def public_key():\n if not Authorizer.__public_key:\n Authorizer.__public_key = download_public_key()\n return Authorizer.__public_key", "def rsa_string_to_publickey(mystr):\r\n if len(mystr.split()) != 2:\r\n raise ValueError, \"Invalid public key string\"\r\n \r\n return {'e':long(mystr.split()[0]), 'n':long(mystr.split()[1])}", "def check_public_key(N, e):\n # type: (int, int) -> List[RE]\n results = [check_composite(N), check_modulus_size(N), check_prime(e)]\n results.extend(check_public_rsa_exponent(N, e))\n\n return results", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)", "def rsa_publickey_to_string(publickey):\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n\r\n return str(publickey['e'])+\" \"+str(publickey['n'])", "def __init__(self, public_key):\n self._pk = ed25519.Ed25519PublicKey.from_public_bytes(public_key.bytes)", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def from_base58(cls, address: str) -> 'PublicKey':\n return cls(base58.b58decode(address))", "def ecssa_sign(ec: EC, hf, m: bytes, d: int,\n k: Optional[int] = None) -> Tuple[int, int]:\n\n # the bitcoin proposed standard is only valid for curves\n # whose prime p = 3 % 4\n if not ec.pIsThreeModFour:\n errmsg = 'curve prime p must be equal to 3 (mod 4)'\n raise ValueError(errmsg)\n\n # This signature scheme supports 32-byte messages.\n # Differently from ECDSA, the 32-byte message can be\n # a digest of other messages, but it does not need to.\n\n # The message m: a 32-byte array\n if len(m) != hf().digest_size:\n errmsg = f'message of wrong size: {len(m)}'\n errmsg += f' instead of {hf().digest_size}'\n raise ValueError(errmsg)\n\n # The secret key d: an integer in the range 1..n-1.\n if not 0 < d < ec.n:\n raise ValueError(f\"private key {hex(d)} not in (0, n)\")\n P = pointMult(ec, d, ec.G)\n\n # Fail if k' = 0.\n if k is None:\n k = rfc6979(ec, hf, m, d)\n if not 0 < k < ec.n:\n raise ValueError(f\"ephemeral key {hex(k)} not in (0, n)\")\n\n # Let R = k'G.\n R = pointMult(ec, k, ec.G)\n\n # Let k = k' if jacobi(y(R)) = 1, otherwise let k = n - k'.\n # break the simmetry: any criteria might have been used,\n # jacobi is the proposed bitcoin standard\n if legendre_symbol(R[1], ec._p) != 1:\n # no need to actually change R[1], as it is not used anymore\n # let just fix k instead, as that is used later\n k = ec.n - k\n\n # Let e = int(hf(bytes(x(R)) || bytes(dG) || m)) mod n.\n e = _ecssa_e(ec, hf, R[0], P, m)\n\n s = (k + e*d) % ec.n # s=0 is ok: in verification there is no inverse of s\n # The signature is bytes(x(R)) || bytes(k + ed mod n).\n return R[0], s", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def format_public_key(unformated_pk):\n return unformated_pk.replace(':', '')", "def _ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # the bitcoin proposed standard is only valid for curves\n # whose prime p = 3 % 4\n if not ec.pIsThreeModFour:\n errmsg = 'curve prime p must be equal to 3 (mod 4)'\n raise ValueError(errmsg)\n\n # Let r = int(sig[ 0:32]); fail if r is not [0, p-1].\n # Let s = int(sig[32:64]); fail if s is not [0, n-1].\n r, s = to_ssasig(ec, sig)\n\n # The message m: a 32-byte array\n if len(m) != hf().digest_size:\n errmsg = f'message of wrong size: {len(m)}'\n errmsg += f' instead of {hf().digest_size}'\n raise ValueError(errmsg)\n\n # Let P = point(pk); fail if point(pk) fails.\n ec.requireOnCurve(P)\n if P[1] == 0:\n raise ValueError(\"public key is infinite\")\n\n # Let e = int(hf(bytes(r) || bytes(P) || m)) mod n.\n e = _ecssa_e(ec, hf, r, P, m)\n\n # Let R = sG - eP.\n R = DblScalarMult(ec, s, ec.G, -e, P)\n\n # Fail if infinite(R).\n if R[1] == 0:\n raise ValueError(\"sG - eP is infinite\")\n\n # Fail if jacobi(y(R)) ≠ 1.\n if legendre_symbol(R[1], ec._p) != 1:\n raise ValueError(\"y(sG - eP) is not a quadratic residue\")\n\n # Fail if x(R) ≠ r.\n return R[0] == r", "def sha256_p(value):\n # check if the value has the expected type\n string_p(value)\n\n # SHA-256 hash has 64 hexadecimal characters\n if not re.fullmatch(r\"^[a-fA-F0-9]{64}$\", value):\n raise Invalid(\"the value '{value}' does not seem to be SHA256 hash\".format(value=value))", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def getG(compressed=True):\n priv = \"\\x00\"*31 + \"\\x01\"\n G = secp256k1.PrivateKey(priv, ctx=ctx).pubkey.serialize(compressed)\n return G", "def __init__(self, key_bytes, public=True):\n self.G = _globalECG\n if public:\n self.sec = None\n self.pub = EcPt.from_binary(key_bytes, self.G)\n self.optim = None\n else:\n self.sec = Bn.from_binary(sha256(key_bytes).digest())\n self.pub = self.sec * self.G.generator()\n self.optim = do_ecdsa_setup(self.G, self.sec)", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def test_private_public():\n\n alice_priv = ECScalar(\n bytes.fromhex(\"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a\")\n )\n alice_public = ECPoint(\n bytes.fromhex(\"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a\")\n )\n\n assert x25519_scalarmult_base(alice_priv) == alice_public\n\n bob_priv = ECScalar(\n bytes.fromhex(\"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb\")\n )\n bob_public = ECPoint(\n bytes.fromhex(\"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f\")\n )\n\n assert x25519_scalarmult_base(bob_priv) == bob_public\n\n k = ECPoint(bytes.fromhex(\"4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742\"))\n\n alice_k = x25519_scalarmult(alice_priv, bob_public)\n bob_k = x25519_scalarmult(bob_priv, alice_public)\n\n assert alice_k == bob_k\n assert alice_k == k", "def privatekey_to_publickey(private_key_bin: bytes) -> bytes:\n if not ishash(private_key_bin):\n raise ValueError('private_key_bin format mismatch. maybe hex encoded?')\n private_key = PrivateKey(private_key_bin)\n return private_key.public_key.format(compressed=False)", "def _get_pubickey_sha1_hash(cert):\n pkey = cert.get_pubkey()\n pkey_asn1 = dump_publickey(FILETYPE_ASN1, pkey)\n decoded_pkey, _ = der_decoder.decode(\n pkey_asn1, rfc2459.SubjectPublicKeyInfo())\n pubkey = bit_string_to_bytearray(decoded_pkey['subjectPublicKey'])\n # algorithm = decoded_pkey['algorithm'] # RSA encryption\n sha1_hash = hashlib.sha1()\n sha1_hash.update(pubkey)\n return sha1_hash", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n .replace(\"-----BEGIN PUBLIC KEY-----\", \"\").replace(\n \"-----END PUBLIC KEY-----\", \"\")", "def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)" ]
[ "0.70852506", "0.69108117", "0.6854899", "0.68222594", "0.6751205", "0.65805644", "0.65697986", "0.65697986", "0.64765847", "0.6456603", "0.6373267", "0.6364895", "0.6352023", "0.63497204", "0.6304584", "0.6302101", "0.6292437", "0.620116", "0.6189922", "0.6148473", "0.61129415", "0.60908484", "0.6088895", "0.6088762", "0.60707957", "0.60512316", "0.6021014", "0.600546", "0.60008925", "0.5997902", "0.5988555", "0.5964921", "0.5963314", "0.595422", "0.593375", "0.5930802", "0.5928485", "0.5927439", "0.5916052", "0.59158957", "0.5909741", "0.59077054", "0.59062076", "0.5897344", "0.5867437", "0.5860847", "0.58363366", "0.5833674", "0.5792306", "0.5789955", "0.57792985", "0.5766808", "0.57638544", "0.57600737", "0.57390016", "0.57380515", "0.5730993", "0.5719251", "0.5717288", "0.5709513", "0.5693961", "0.5691206", "0.5660822", "0.56601685", "0.56570053", "0.5639738", "0.56304115", "0.5628486", "0.56196815", "0.5588012", "0.5587675", "0.5576285", "0.55741596", "0.5571782", "0.5571765", "0.55696297", "0.55615723", "0.5545834", "0.5545336", "0.5540665", "0.5539365", "0.55321497", "0.5531393", "0.5517515", "0.55159366", "0.5511371", "0.5489802", "0.5484453", "0.54801553", "0.5477988", "0.5477415", "0.5475891", "0.54721117", "0.54627186", "0.5461496", "0.5457402", "0.54558784", "0.54539686", "0.54532987", "0.5448549" ]
0.70131516
1
Raised when the paramter u is given
def format_public_key(unformated_pk): return unformated_pk.replace(':', '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def h(self, u=None, v=None):\n pass", "def error(self, *args, **kwargs):", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def test_user_argument_not_passed(self):\n @converters.wrap\n def inner_test(user: models.User):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(inner_test, 1301)", "def _arg(self, t):\n self.RaiseError(t, \"Arguments should already have been processed\")", "def UserInput(self, username, userinput):\n pass", "def WarpSetU(u):\n MSG(\"WarpSetU\")\n warp.set_u(u)\n return", "def __call__(self, u, t):\n S, I, R, V = u\n return [-self.beta(t)*S*I-self.p(t)*S,\n self.beta(t)*S*I-self.nu(t)*I,self.nu(t)*I,\n self.p(t)*S]", "def _validate_user(_):\n pass", "def _handleInput(self, paramInput):\n pass", "def username_error(self, msg):\n raise NotImplementedError('username_error')", "def error(self, msg, *args, **kwargs):\n pass", "def _illegal_parameter(self, name):\r\n raise ValueError(\r\n 'parameter \"%s\" is not registered.\\nLegal '\\\r\n 'parameters are\\n%s' %\r\n (name, ' '.join(list(self.prm.keys()))))", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n muFind = paramInput.findFirst('mu')\n if muFind != None:\n self.mu = muFind.value\n else:\n self.raiseAnError(IOError,'mu value needed for poisson distribution')\n self.initializeDistribution()", "def add_vertex(self, u, val):\n raise NotImplementedError()", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n lambdaFind = paramInput.findFirst('lambda')\n if lambdaFind != None:\n self.lambdaVar = lambdaFind.value\n else:\n self.raiseAnError(IOError,'lambda (scale) value needed for Weibull distribution')\n kFind = paramInput.findFirst('k')\n if kFind != None:\n self.k = kFind.value\n else:\n self.raiseAnError(IOError,'k (shape) value needed for Weibull distribution')\n lowFind = paramInput.findFirst('low')\n if lowFind != None:\n self.low = lowFind.value\n else:\n self.low = 0.0\n self.initializeDistribution()", "def exception(self, *args, **kwargs):", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def __call__(self, u, t):\n S, I, R = u\n return [-self.beta(t)*S*I, self.beta(t)*S*I - self.nu(t)*I, self.nu(t)*I]", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def check_params(self, name, fs_in, fs_out, window):\n if not isinstance(name, str):\n raise TypeError('name must be a string, not %s' % name)\n if fs_in <= 0:\n raise ValueError('fs_in should not be less than 0.')\n if fs_out <= 0:\n raise ValueError('fs_out should not be less than 0.')\n if window <= 0:\n raise ValueError('window must be greater than than 0.')", "def add(self, user: U) -> None:\n ...", "def validate_input(self, *args):\n return", "def check_params(self):\n raise NotImplementedError", "def ipsi(self, u, log=False): # pragma: no cover\n pass", "def validate_params(self) -> None:\n if isinstance(self.hamiltonian, PauliSumOp) and isinstance(\n self.hamiltonian.coeff, ParameterExpression\n ):\n raise ValueError(\"A global parametrized coefficient for PauliSumOp is not allowed.\")", "def check_argument(self, struct_class, item, keyword, value):\n pass", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n if not self.upperBoundUsed or not self.lowerBoundUsed:\n self.raiseAnError(IOError,'the Uniform distribution needs both upperBound and lowerBound attributes. Got upperBound? '+ str(self.upperBoundUsed) + '. Got lowerBound? '+str(self.lowerBoundUsed))\n self.range = self.upperBound - self.lowerBound\n self.initializeDistribution()", "def _check_params(self):\n pass", "def _is_valid_input(self, parameter_name):\n raise NotImplementedError()", "def position(self, u, v):\n raise NotImplementedError", "def test_query_user_info_with_extra_argument(self):\n with pytest.raises(TypeError) as typeError:\n self.client.admin_query_user_info(\"foo\", None, \"\")\n\n assert \"admin_query_user_info() takes at most 2 arguments (3 given)\" in str(typeError.value)", "def update(self, user: U) -> None:\n ...", "def uop(*val):\r\n cost = float(input(\"please Enter The Cost Of Asset: \"))\r\n rv = float(input(\"please Enter Estimated Residual Value Of Asset: \"))\r\n lifeinunits = float(input(\"please Enter Estimated Life in Units: \"))\r\n rrr = (float(cost) - float(rv)) / float(lifeinunits)\r\n print \">> Your Depreciation per Unit is \",rrr\r\n for i in val:\r\n print \">> Depreciation for \",i,\"Units is \",i * rrr", "def expt(e):\n # print(e)\n print(\"An error has occurred\")\n if 'float division by zero' in e:\n print(\"Unable to divide by zero\")\n cls()\n menu()\n elif 'factorial() argument should not exceed 2147483647' in e:\n print('That number is too large, sorry')\n cls()\n menu()\n elif 'generic' in e:\n print(\"Please try again\")\n cls()\n menu()\n elif 'could not convert string' or \"'NoneType' object is not subscriptable\" in e:\n print('Incorrect input, please try again')\n cls()\n menu()", "def user_exception(self, frame, exc_info):\n pass", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n self.base = paramInput.findFirst('base').value\n if self.base not in ['natural','decimal']:\n self.raiseAnError(IOError,' base parameter is needed for LogUniform distribution (either natural or decimal)')", "def u2fkn( self , u ):", "def update_user(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\t\tval, msg = self.add_user(u, p)\r\n\t\t\r\n\t\tif val:\r\n\t\t\tmsg = \"%s has been updated.\" % u\r\n\t\t\r\n\t\tlogger.debug(\"Exiting\")\r\n\t\treturn val, msg", "def test_incorrect_arg_type(self):\n\n with pytest.raises(TypeError) as exc_info:\n upper_incomplete_gamma(a='A', z=0.3)\n\n expected_error_msg = (\n 'type of argument \"a\" must be one of (int, float); got str instead'\n )\n assert str(exc_info.value) == expected_error_msg", "def check_user_parameters(user_information):\n\n email = user_information.get('email', '')\n sub_end_date = user_information.get('subenddate', '')\n vineyards = user_information.get('vineyards', '')\n current_date = datetime.date.today().strftime('%Y-%m-%d')\n new_user_id = user_information.get('userid', '')\n is_admin = user_information.get('admin', '')\n is_enable = user_information.get('enable', '')\n\n try:\n message = (\n 'Validating submitted user parameters.'\n )\n logger.info(message)\n check_user_id(new_user_id)\n if email != '':\n if re.match(r\"[^@]+@[^@]+\\.[^@]+\", email) is None:\n raise PlantalyticsDataException(EMAIL_INVALID)\n check_subscription_end_date(sub_end_date, current_date)\n if is_admin != '':\n if not isinstance(is_admin, bool):\n raise PlantalyticsDataException(DATA_INVALID)\n if is_enable != '':\n if not isinstance(is_enable, bool):\n raise PlantalyticsDataException(DATA_INVALID)\n if vineyards != '':\n for vineyard_id in vineyards:\n if int(vineyard_id) < 0:\n raise PlantalyticsDataException(VINEYARD_BAD_ID)\n message = (\n 'Submitted user parameters successfully validated.'\n )\n logger.info(message)\n except PlantalyticsException as e:\n raise e\n except ValueError as e:\n raise e\n except Exception as e:\n raise e", "def test_no_user_argument_but_passed(self):\n @converters.wrap\n def inner_test():\n \"\"\"If this runs without error, the test has passed.\"\"\"\n pass\n inner_test(user=object())", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)", "def test_uparforvarg(self):", "def _arguments(self, t):\n self.RaiseError(t, \"Arguments should already have been processed\")", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n pFind = paramInput.findFirst('p')\n if pFind != None:\n self.p = pFind.value\n else: self.raiseAnError(IOError,'p value needed for Geometric distribution')\n self.initializeDistribution()", "def __call__(self, param, xyz=False):\n pass", "def _raise_performing_request_error(self, *args, **kwargs):", "def UParameters(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_UParameters(self, *args)", "def __init__(self, (u, v)):\r\n self.u = u\r\n self.v = v", "def error(self, *args):\n self.mylog.error(*args)", "def user():\n pass", "def exc_to_catch(request):\n return request.param", "def check_valid_params(cls, **user_params):\n # Check that the appropriate number of params are provided\n if not all(key in user_params for key in cls.param.keys()):\n raise ValueError(f\"Missing parameter! Expected {cls.param.keys()} but was given {user_params.keys()}\")\n\n # Check parameter units and values\n for (key, allowed_params), user_param in zip(cls.param.items(), user_params.values()):\n\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n\n elif np.isin(user_param.to(allowed_params.unit).value, allowed_params.value):\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # Check Combinations (Logic lives inside model subclasses under model.isvalid_param_combo)\n if user_params not in cls.get_param_combinations():\n raise ValueError(\n f\"Invalid parameter combination. See {cls.__class__.__name__}.get_param_combinations() for a \"\n \"list of allowed parameter combinations.\")", "def getValue(self, data):\r\n raise Exception(\"Exception via Error Parameter\")", "def test_validate_unnamed_fparameter_raises(self):\n arg = forge.arg()\n fsig = FSignature([arg], __validate_parameters__=False)\n with pytest.raises(ValueError) as excinfo:\n fsig.validate()\n assert excinfo.value.args[0] == \\\n \"Received unnamed parameter: '{}'\".format(arg)", "def is_uN(self, argno: Union[int, Tuple[int, ...]]) -> '_Checker': # pylint: disable=invalid-name\n if isinstance(argno, tuple):\n for a in argno:\n self.is_uN(a)\n return self\n\n assert isinstance(argno, int), argno\n t = self.arg_types[argno]\n if not isinstance(t, BitsType) or t.signed:\n raise XlsTypeError(\n self.span, t, None,\n 'Want argument {} to be unsigned bits; got {}'.format(argno, t))\n return self", "def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n mock_pwnam.side_effect = KeyError()\n self.configuration.hgst_space_user = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_user = 'Fred!`'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def get_uint_arg(name, default, **kwargs):\n try:\n val = int(kwargs.get(name, default))\n if val < 0:\n logger.error('Parameter %s must not be negative')\n val = default\n return val\n except:\n logger.error('Parameter %s is not an integer' % name)\n return default", "def _input_as_parameter(self, data):\r\n self.Parameters['-i'].on(data)\r\n return ''", "def invaild_param_name_error(name, availables):\n msg = 'cannot find param with name: {}, availables are {}'.format(\n name, ','.join(availables))\n return ValueError(msg)", "def set(self, U):\n pass", "def set(self, U):\n pass", "def execute(self, user):\n pass", "def _check_parameter(h, i, j, v, integral=False, name=None, sym=None):\n if integral:\n try:\n v = integralize(v)\n except TypeError:\n raise InfeasibleError(\"%s %s[%d, %d, %d] is nonintegral\"\n % (name, sym, h, i, j))\n assert checkNonneg(v), \\\n \"%s %s[%d, %d, %d] is negative\" % (name, sym, h, i, j)\n return v", "def C(self, u, v):\n pass", "def error_bad_value(user: discord.User, value_type: str, value: Any) -> str:\n return (\n f\"Hmmmm. {user.mention}, I'm having a hard time understanding the {value_type}\"\n f\" '{value}'.\"\n )", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n) -> None:\r\n ...", "def _ps_error(e):\n\n error(None, str(e))", "def __init__(self, (u, v, o)):\r\n self.u = u\r\n self.v = v\r\n self.o = o", "def exception(self, e):\n pass", "def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )", "def modify_user(user_data):\r\n raise NotImplementedError()", "def verify_args(self, params):\n for key in self.signup_args:\n if key not in params.keys():\n details = \"we don't have %s key\" % key\n logging.info(details)\n raise CustomException(details=details, error_code='')\n\n try:\n user_type = params.get('userType')\n user_types = Config.get('users', 'user_types')\n if user_type not in user_types:\n details = \"user type: %s is not supported\" % params['userType']\n logging.info(details)\n raise CustomException(details=details, error_code='')\n except KeyError as kex:\n details = \"User types undefined, moving on %s\" % kex\n logging.info(details)\n return True", "def user():", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def validate_arguments(self,args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)", "def error(self):\n ...", "def test_single_arg_missing(self):\n from plone.api.exc import MissingParameterError\n _func = required_parameters('arg1')(undecorated_func)\n with self.assertRaises(MissingParameterError):\n _func()", "def user(self):", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n locationFind = paramInput.findFirst('location')\n if locationFind != None:\n self.location = locationFind.value\n else:\n self.raiseAnError(IOError,'location value needed for Logistic distribution')\n scaleFind = paramInput.findFirst('scale')\n if scaleFind != None:\n self.scale = scaleFind.value\n else:\n self.raiseAnError(IOError,'scale value needed for Logistic distribution')\n self.initializeDistribution()", "def test_unnamed_parameter(self):\n\n m = Mothur(**self.init_vars)\n m.help('summary.seqs')\n\n return", "def test_missing_argument(self):\n @converters.wrap\n def inner_test(param: int):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(inner_test, 3102)", "def ee_err_1(sav, *args):\n w, r, labor_supply, b_init, s, t = args\n\n error = u_prime(w[t] * labor_supply[s] + (1 + r[t]) * b_init[s] - sav) - beta * (1 + r[t+1]) * u_prime((1 + r[t+1]) * sav + w[t+1] * labor_supply[s+1])\n\n return error", "def __check_args_val(self):\n if self.__num_prev_scans < 0:\n error_msg = \"num_prev_scans must be greater than or equal to zero\"\n raise ValueError(error_msg)", "def raise_not_enough_arguments(self, string):\n\n\t\trequested = errors.number(self.counter + 1)\n\n\t\tnumber = len(self.positional)\n\n\t\tverb = \"was\" if number == 1 else \"were\"\n\n\t\twhat = \"Requested {} formatting argument for \"\\\n\t\t\t \"'{}' but only {} {} supplied!\"\n\n\t\twhat = what.format(requested, string, number, verb)\n\n\t\traise errors.ArgumentError(what)", "def estimate(self, U, mu=None):\n raise NotImplementedError", "def refu(self, refu):\n\n self._refu = refu", "def test_wrong_argument_for_encoding(self):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.encode(4.5, 10)", "def test_failed_parameter_verification() -> None:\n name = create_random_alphanumeric(10)\n # The lower bound must be smaller than the upper bound!\n parameters = [5, 1, 3]\n\n with pytest.raises(ValueError):\n UnivDist(\n name=name, distribution=DISTRIBUTION_NAME, parameters=parameters\n )\n\n # The mid-point value must be between lower and upper bounds!\n parameters = [3, 4, 10]\n\n with pytest.raises(ValueError):\n UnivDist(\n name=name, distribution=DISTRIBUTION_NAME, parameters=parameters\n )", "def cmd_user(args):", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n apexFind = paramInput.findFirst('apex')\n if apexFind != None:\n self.apex = apexFind.value\n else:\n self.raiseAnError(IOError,'apex value needed for Triangular distribution')\n minFind = paramInput.findFirst('min')\n if minFind != None:\n self.min = minFind.value\n else:\n self.raiseAnError(IOError,'min value needed for Triangular distribution')\n maxFind = paramInput.findFirst('max')\n if maxFind != None:\n self.max = maxFind.value\n else:\n self.raiseAnError(IOError,'max value needed for Triangular distribution')\n # check if lower or upper bounds are set, otherwise default\n if not self.upperBoundUsed:\n self.upperBoundUsed = True\n self.upperBound = self.max\n if not self.lowerBoundUsed:\n self.lowerBoundUsed = True\n self.lowerBound = self.min\n self.initializeDistribution()", "def _sim_step(self, u):\n raise NotImplementedError", "def set_uvals(self, uvals: List[float]) -> None:\n self.uvals = uvals\n if uvals[2] == 0.0: # 0 is Uiso and 1 q-peak hight\n for n, u in enumerate(uvals):\n if abs(u) > 4.0:\n fvar, uval = split_fvar_and_parameter(u)\n # self.uvals[n] = uval\n self.shx.fvars.set_fvar_usage(fvar)\n else:\n if abs(uvals[0]) > 4.0:\n fvar, uval = split_fvar_and_parameter(uvals[0])\n self.shx.fvars.set_fvar_usage(fvar)", "def f(self,x,*params):\n raise NotImplementedError", "def test_special_U(self):\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, -0.1).to_matrix(), \"U\", {})\n self.check_oneq_special_cases(U3Gate(0.0, 0.1, 0.2).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.2, 0.0).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(np.pi / 2, 0.0, 0.2).to_matrix(), \"U\", {\"u\": 1})\n self.check_oneq_special_cases(U3Gate(0.1, 0.2, 0.3).to_matrix(), \"U\", {\"u\": 1})", "def action1(self, e, arg1, arg2, arg3):\n print \"%s.action1() invoked with args (%s:%s, %s:%s, %s:%s)\" %\\\n (self.__machine_name, arg1, type(arg1), arg2, type(arg2), arg3, type(arg3))", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def cmd_missing_arg(self):\n self.respond(\"501 Syntax error: command needs an argument.\")", "def validate(name, args, required, typ):\n value = args.get(name)\n if required and value is None:\n raise errors.Error(\"{0} is required argument\".format(name))\n if value is not None and not isinstance(value, typ):\n raise errors.Error(\"{0} should be {1}\".format(name, typ))", "def enter_username(self):" ]
[ "0.5714732", "0.5587336", "0.5585282", "0.5577008", "0.5479378", "0.54428875", "0.5332927", "0.52931833", "0.5276477", "0.52281755", "0.5210547", "0.5198676", "0.51869273", "0.5171002", "0.51632804", "0.515921", "0.51172", "0.5115", "0.5114484", "0.51063645", "0.5105348", "0.50796896", "0.5073339", "0.5061606", "0.5055695", "0.50522935", "0.50149995", "0.501047", "0.5002593", "0.49935445", "0.49915984", "0.4986849", "0.49836296", "0.49720782", "0.4965391", "0.49624062", "0.4956231", "0.49481037", "0.49388018", "0.49386644", "0.4937836", "0.49326912", "0.4923282", "0.49215654", "0.4920137", "0.4915338", "0.48891628", "0.48802337", "0.487164", "0.48658645", "0.48615018", "0.4858537", "0.48553342", "0.48472023", "0.4840316", "0.48362106", "0.48310837", "0.48307082", "0.48301008", "0.48284298", "0.48167378", "0.48046678", "0.48046678", "0.47927794", "0.47897324", "0.4789228", "0.4788534", "0.4777752", "0.47771296", "0.47752285", "0.47639376", "0.4762868", "0.47589025", "0.47580156", "0.47568786", "0.4755125", "0.47530818", "0.47499582", "0.4741898", "0.47360227", "0.47264928", "0.47241864", "0.4715585", "0.47135213", "0.470831", "0.47067687", "0.47036245", "0.4700862", "0.47008234", "0.4695862", "0.46891603", "0.46867624", "0.4684178", "0.46777335", "0.46688944", "0.4661722", "0.46597937", "0.46592647", "0.46546793", "0.4653914", "0.46528283" ]
0.0
-1
Create a directory to write output to.
def make_output_dir(experiment_dir, identifier): output_dir = Path(experiment_dir, identifier).resolve() output_dir.mkdir(parents=True, exist_ok=True) return output_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def create_dir(output_path):\n if not os.path.exists(output_path) and is_directory(output_path):\n os.makedirs(output_path)", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def _make_output_directory(output_dir: str) -> None:\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n logging.info(f\"output directory does not exist, made '{output_dir}'\")", "def create_output_dir(output_dir, dir_name):\n try:\n os.mkdir(os.path.join(output_dir, dir_name))\n except OSError:\n print(os.path.join(output_dir, dir_name) + \" exits... :(\")", "def create_output_dir(output_dir):\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n for folder in [CHECKPOINT_DIR, LOG_DIR]:\n folder_path = os.path.join(output_dir, folder)\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)", "def create_out_dir(out): \n out_path = os.path.join(out,out_dir_name)\n try:\n os.stat(out_path)\n except:\n os.mkdir(out_path)", "def createPath(self, outPath):\n # Create new directory for output path\n try:\n os.mkdir(outPath)\n except OSError:\n print (\"Creation of the directory %s failed\" % outPath)\n else:\n print (\"Successfully created the directory %s \" % outPath)", "def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path", "def create_out_dir(self):\n\n logging.debug('create_out_dir called \\n'\n 'Output directory to be created: '\n '%s', self.out_dir)\n\n access_rights = 0o755\n list_outs = ['docs/downloaded', 'docs/edited_csv', 'docs/graphics']\n for address in list_outs:\n path = self.out_dir + address\n if os.path.exists(path):\n logging.debug('Path: %s :already exists', path)\n else:\n try:\n os.makedirs(path, access_rights)\n except OSError:\n logging.debug('Creation of directory has failed '\n 'at: %s', path)\n else:\n logging.debug('Successfully created the '\n 'directory path at: %s', path)\n return self.out_dir, list_outs", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)", "def _create_result_directory(self):\n\t\tFileSystem.create_dir(self._result_directory_name)\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Log\")\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Dump\")", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def create_dirs(_log, output_dir, overwrite):\n _log.info(\"Create model directory\")\n\n if output_dir is None:\n raise ValueError(\"Config `output_dir` has to be set!\")\n\n if os.path.exists(output_dir) and not overwrite:\n raise ValueError(\n \"Output directory already exists (set overwrite flag?):\", output_dir\n )\n\n if os.path.exists(output_dir) and overwrite:\n rmtree(output_dir)\n\n if not os.path.exists(output_dir) and output_dir not in [\"\", \".\"]:\n os.makedirs(output_dir)", "def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)", "def create(self, basedir, outdir, name, prefix=None):", "def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))", "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)", "def create_dir(dir_path):\n\n if not path.exists(dir_path):\n log('Creating directory: {0}'.format(dir_path))\n run(sh.mkdir, dir_path, p=True)", "def create_dir_structure():\n LOG.info('In create_dir_structure')\n OutputWrite.change_to_script_directory(__file__)\n path = os.path.abspath(os.path.join('..', 'results',\n global_constants.TEXT_BOARD,\n global_constants.TEXT_INTERFACE,\n global_constants.TEXT_DEVICE,\n global_constants.TEST_EXECUTION_NAME\n ))\n LOG.debug('Path to be Created = {0}'.format(path))\n os.makedirs(path, exist_ok=True, mode=0o755)\n for item in global_constants.TEST_CASE_LIST_NAMES:\n in_path = os.path.exists(os.path.join(path, item))\n if not os.path.exists(in_path):\n LOG.debug('Path with Test Case name = {0}'.format(in_path))\n os.mkdir(in_path)\n LOG.debug('Path = {0}'.format(path))\n return path", "def create_output_dir(self, cfg: dict) -> str:\n output_dir = cfg.get(\"output\").get(\"output_dir\")\n time_sfx = cfg.get(\"output\").get(\"time_suffix\", True)\n if not os.path.isabs(output_dir):\n output_dir = os.path.join(self.repo_path, output_dir)\n subdir = self.project_name\n if time_sfx:\n cur_time = get_cur_time_str()\n subdir = f\"{subdir}_{cur_time}\"\n output_dir = os.path.join(output_dir, subdir) # type: str\n if check_dir(output_dir, make_if_not=True):\n logger.info(\"Results will be in {}\".format(output_dir))\n else:\n exit(ErrorCode.PATH_ERROR)\n return output_dir", "def create_dir(_dir):\n if not os.path.exists(_dir):\n os.makedirs(_dir)", "def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def SetupOutDir(out_dir):\n logging.info('entering ...')\n assert re.match(r'^[a-zA-Z_\\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir\n\n if os.path.exists(out_dir):\n subprocess.check_call(['rm', '-rf', out_dir])\n os.mkdir(out_dir)\n logging.info('... done')", "def getOutputDir():\n directory = os.path.join(Configurations.getProjectRootDir(), OUTPUT_DIR_NAME)\n if not os.path.exists(directory):\n logger.warning('Directory %s not exist, CREATE!', directory)\n os.makedirs(directory)\n\n return directory", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def CreateDirectory(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, 0777)", "def create_directory(directory_name):\n directory = \"./\" + directory_name + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def create_dir(working_dir):\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)", "def ensure_out_dir(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)", "def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass", "def make_output_dir(directory):\r\n if os.path.exists(directory):\r\n try:\r\n shutil.rmtree(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Removing the existing output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Existing output directory removed\")\r\n\r\n try:\r\n os.mkdir(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Creation of the output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Successfully created output directory\")\r\n return True", "def create_directory(self, directory):\n mgm, directory = self._safe_split_mgm(directory)\n logger.warning('Creating directory on SE: {0}'.format(self._join_mgm_lfn(mgm, directory)))\n cmd = [ 'xrdfs', mgm, 'mkdir', '-p', directory ]\n svj.core.utils.run_command(cmd)", "def __setup_output_directory(self):\n print('Setting up output directory')\n time_stamp = datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n self.output_path = os.path.join(self.output_base_path, '%s_%s' % (self.execution_name, time_stamp))\n print('- Creating output directory: %s' % self.output_path)\n os.makedirs(self.output_path)\n print('- Output directory created')", "def create_out_dir_name(params):\n\n current_timestamp = timestamp()\n out_dir = os.path.join('out', current_timestamp)\n return out_dir", "def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path", "def create_directories(self, path):\n os.makedirs(path)\n print('Directory created at:', path)\n return path", "def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path, exist_ok=True)", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def createTargetDir():\n sLogDir = getConfig('system', 'log_dir')\n if not os.path.isdir(sLogDir):\n usageMsg(\"log directory does not exist: \" + sLogDir)\n\n # Target directory combines all pieces including the date/time\n sTargetDir = \"%s/%s\" % (sLogDir, getDates()['gmt'].strftime(\"%Y_%m_%d-%H_%M_%S_%Z\"))\n\n try:\n os.makedirs(sTargetDir)\n except OSError:\n errorMsg(\"unable to create target directory: \" + sTargetDir)\n return sTargetDir", "def mkdir(path):", "def createFolder(self):\n\n self.directory = \"D:\\\\CompositionHelper\"\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n print ('Created new folder')", "def create_dir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def _makeDir(self):\n try:\n os.mkdir(self.dir)\n # log('created directory: %s\\n' % self.dir)\n except OSError, err:\n if err.errno != errno.EEXIST:\n raise", "def create_dir(directory):\n if not os.path.isdir(directory):\n os.makedirs(directory)", "def create_working_directory(self):\n os.makedirs(self.working_directory, exist_ok=True)", "def createDirectory(self, summary_handle,directory,mode,role =\"\",summary_var_dict={}):\n if role:\n directory = directory + \"/\" + role\n \n tmp_var = \"mkdir -p %s%s%s\" %(directory,self,role)\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n return\n\n self.pushMode(CLI_MODES.shell)\n if role:\n self.removePath(directory)\n\n logger.info (\"Directory is %s\" %directory)\n output = self.sendCmd(\"mkdir -p %s\" % directory)\n status = self.command_execution_status()\n if status == \"true\":\n summary_handle.write(\"mkdir -p %s,%s,%s,pass \\n\" %(directory,self,role))\n else:\n summary_handle.write(\"mkdir -p %s,%s,%s,fail \\n\" %(directory,self,role)) \n\n self.popMode()\n return output", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def make_dir(self, path):\n import os\n if not os.path.exists(path):\n os.makedirs(path)", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def make_run_directory(output_root: Union[str, Path]) -> Path:\n run_directory = get_run_directory(output_root)\n mkdir(run_directory)\n return run_directory", "def create_folder(output_directory: str, fldrname: str):\n\n os.makedirs(output_directory, exist_ok=True)\n tstmp = datetime.now().strftime('%Y%m%d_%H%M%S')\n try:\n fldr_path = os.path.join(output_directory, fldrname)\n os.mkdir(fldr_path)\n except FileExistsError:\n fldr_path = os.path.join(output_directory, fldrname + '_{}'.format(tstmp))\n os.mkdir(fldr_path)\n return fldr_path", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_dir(dir_):\n try:\n os.makedirs(dir_)\n logger.debug(\"Creating directory %s\", dir_)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise", "def create_directory(dirname):\n\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)", "def make_directory(path,dirname):\n\n\tdirectory_path = path + '/' + dirname\n\tcommand = 'mkdir ' + directory_path\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def makeDir(dirPath):\n os.makedirs(dirPath)", "def mkdir(name):\n\tspeech.speak(\"Executing 'mkdir \" + name + \"' command to create a directory.\")\n\tsubprocess.call([\"mkdir\", name])", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def createDirectory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def setOutputDir(self, outputdir):\n self.outputdir = outputdir\n if not os.path.isdir(outputdir):\n os.makedirs(outputdir)", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")", "def create_dir(newdir):\n if not os.path.isdir(newdir):\n try:\n os.mkdir(newdir)\n print(newdir)\n except IOError:\n print(\"cannot create %s directoy\" % newdir)", "def make_dir(file_name): # output_file_loc = des\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if not os.path.exists(f'{output_path}/{fld}'):\n os.makedirs(f'{output_path}/{fld}')\n # print(\"MAKE_DIR completed...\") \n return", "def create_directory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def create_directory(directory, name_folder=None):\n if name_folder is not None:\n path = op.join(directory, name_folder)\n try_makedirs(path)\n else:\n path = directory\n return path", "def make_directory(dir_path):\n abs_dir_path = os.path.abspath(dir_path)\n if not os.path.exists(abs_dir_path):\n os.makedirs(abs_dir_path)", "def Create_my_dir(new_path): #Create paths (windows os)\r\n if not os.path.exists(new_path):\r\n os.makedirs(new_path);", "def fs_mkdir(self, dirname: str) -> None:\n self.exec_(\"import uos\\nuos.mkdir('%s')\" % dirname)", "def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)", "def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')", "def create_dir(dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except OSError:\n print('Error: Cannot create directory named \\\"' + dir + '\\\"')", "def make_new_dir(path):\n\n if(not(os.path.isdir(path))):\n os.makedirs(path)\n\n return path", "def create_directory(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def make_directory(name: str):\n try:\n os.mkdir(name)\n except:\n pass", "def create_save_folder(self):\n absolute_output = os.path.abspath(self.output).replace(\"\\\\\", \"/\")\n if self.paddle_length_factor is not None:\n self.save_folder = f\"{absolute_output}/{self.env_name}/PaddleLength_\" \\\n f\"{self.paddle_length_factor}/session{self.session}\"\n else:\n self.save_folder = f\"{absolute_output}/{self.env_name}/StandardEnv/session{self.session}\"\n tmp_folder = self.save_folder\n\n folder_tree = []\n while True:\n if not os.path.exists(self.save_folder):\n folder_tree.insert(0, self.save_folder)\n self.save_folder = self.save_folder[:self.save_folder.rindex(\"/\")]\n else:\n self.save_folder = tmp_folder\n break\n for folder in folder_tree:\n os.mkdir(folder)", "def create_dir(newdir):\n if not os.path.isdir(newdir):\n try:\n os.makedirs(newdir)\n print(newdir)\n except IOError:\n print(\"cannot create %s directoy\" % newdir)\n return 0", "def make_test_dir(path, test_name):\n LOG.info('In make_test_dir')\n OutputWrite.change_to_script_directory(__file__)\n path_with_test_name = os.path.join(path, test_name)\n LOG.debug('Path with Test name = {0}'.format(path_with_test_name))\n os.makedirs(path_with_test_name, exist_ok=True)\n print('path with test name {0}'.format(path_with_test_name))\n return path_with_test_name", "def mkdir_p(cls, path):\n os.makedirs(path)", "def PrepareOutputDir(dirname, preserve=False):\n global outdir, preserve_outdir\n\n preserve_outdir = dirname or preserve\n if dirname:\n outdir = dirname\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError as err:\n raise CmdError(\"Cannot make output directory '%s': '%s'\" %\n (outdir, err.strerror))\n tout.Debug(\"Using output directory '%s'\" % outdir)\n else:\n outdir = tempfile.mkdtemp(prefix='binman.')\n tout.Debug(\"Using temporary directory '%s'\" % outdir)", "def MakeDir(self, dirname):\n if os.path.exists(dirname):\n return\n try:\n os.umask(UMASK_DIR)\n os.makedirs(dirname)\n except OSError:\n self.errors = True\n errstr = '\\nCould not create directory: %s ... ' % dirname\n self.LogErrors(errstr)\n raise OSError(errstr)\n os.umask(UMASK_FILE)", "def __mkdir(self, output_directory):\n try:\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n return True\n except Exception as e:\n print e\n return False", "def make_directory(directory):\n logger.info(\"Create directory %s\", directory)\n if not os.path.exists(directory):\n os.mkdir(directory)\n else:\n logger.warning(\"Cannot create directory %s. Directory already exists\", directory)", "def mkdir(dirName):\r\n raw = 'mockaroo_data/raw/' + dirName\r\n cln = 'mockaroo_data/cln/' + dirName\r\n call(['mkdir', raw])\r\n call(['mkdir', cln])\r\n\r\n return", "def createDir(self, dirName):\n\n if not os.path.exists(dirName):\n self.createDir(os.path.dirname(dirName))\n try:\n os.mkdir(dirName)\n except:\n print 'Current directory =', os.getcwd()\n raise" ]
[ "0.8209889", "0.8200543", "0.81390864", "0.80648106", "0.79630965", "0.7929463", "0.78599143", "0.77202404", "0.76051563", "0.7550654", "0.7502131", "0.73690355", "0.73690355", "0.73546755", "0.72924715", "0.7292125", "0.7285806", "0.7255322", "0.7252684", "0.72381806", "0.72234184", "0.7216353", "0.7207965", "0.7164206", "0.71585506", "0.7132202", "0.7123396", "0.71226233", "0.7118792", "0.7093302", "0.7085902", "0.70756954", "0.7070938", "0.7070854", "0.7070817", "0.70668584", "0.7065357", "0.7063123", "0.7053542", "0.7052582", "0.7045845", "0.7040155", "0.70326495", "0.7023773", "0.7020508", "0.7018334", "0.7016901", "0.70131695", "0.70122695", "0.70122695", "0.70109767", "0.70082074", "0.6987058", "0.6969887", "0.6969677", "0.6969519", "0.6968047", "0.69671357", "0.69663167", "0.6960883", "0.6955757", "0.69551647", "0.69499964", "0.69342554", "0.69325525", "0.69325525", "0.6914116", "0.6899475", "0.68953663", "0.68830496", "0.68714124", "0.685232", "0.6852001", "0.68483216", "0.68427205", "0.6838611", "0.6800846", "0.6800758", "0.67998594", "0.6797524", "0.6796579", "0.6789464", "0.67847997", "0.67832893", "0.6781026", "0.67788076", "0.6776444", "0.6758693", "0.67393374", "0.6737227", "0.6723534", "0.67203516", "0.6704462", "0.6704278", "0.6698349", "0.6693687", "0.66884464", "0.6680832", "0.6680296", "0.66589624" ]
0.75952923
9
Construct a filename from varying experimental parameters.
def construct_filename(output_dir, file_descriptor, extension, *args, **kwargs): if len(args) == 0 and len(kwargs) == 0: return Path(output_dir, '{}{}'.format(file_descriptor, extension)) elif len(args) == 0: return Path(output_dir, '{}_{}{}'.format('_'.join([f'{k}{v}' for k, v in kwargs.items() if v is not None]), file_descriptor, extension)) elif len(kwargs) == 0: return Path(output_dir, '{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]), file_descriptor, extension)) else: return Path(output_dir, '{}_{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]), '_'.join([f'{k}{v}' for k, v in kwargs.items() if v is not None]), file_descriptor, extension))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_filename(self):\n expansion_string = '_'.join(sorted(args.exp)) if args.exp else 'noexp'\n return 'quad--{}--{}{}{}_{}{}_{}{}_{}{}{}{}{}_{}{}--{:02}_{:02}--{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}--{}.log'.format(self.pts_total, hex(self.cnt_T)[-1:], self.cnt_S, self.cnt_U, self.cnt_P, self.cnt_G, self.cnt_F, self.cnt_A, self.cnt_1, self.cnt_2, self.cnt_3, self.cnt_4, self.cnt_5, hex(self.cnt_O)[-1:], self.cnt_M, self.popula, self.energy, self.pts_tower, self.pts_shop, self.pts_public, self.pts_park, self.pts_factory, self.pts_harbor, self.pts_office, self.pts_monument, self.pts_expansion, expansion_string)", "def filename(N, Dr, g, launch):\n\n return 'N%s_R%s_G%s_E%s.datR' % tuple(map(float_to_letters,\n (N, Dr, g, launch)))", "def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)", "def file_name(product, ext='json'):\n return f\"./output/{product}_{datetime.now().strftime('%Y-%m-%d_%H%M%S')}_transformed_{version}.{ext}\"", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def outfigname(num, ext, char=\"\"):\n return \"f{}{}{}\".format(num, char, ext)", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename", "def generate_filename_from_options(opt):\n fs = '{}_emb_{}_hid_{}_de_{}_dd_{}_n_lyrs_{}_lr_{}'.format(\n opt.rnn_cell,\n opt.embedding_size, opt.hidden_size,\n opt.dropout_p_encoder, opt.dropout_p_decoder,\n opt.n_layers, opt.lr)\n\n if opt.optim is not None:\n fs += '_{}'.format(opt.optim)\n if opt.attention:\n fs += '_att_{}'.format(opt.attention_method)\n if opt.positional_attention:\n fs += '_pos_att_{}'.format(opt.positioning_generator_size)\n if opt.attention and opt.positional_attention:\n fs += '_mix_{}'.format(opt.attention_mixer)\n if opt.bidirectional:\n fs += '_bidirect'\n if opt.mini:\n fs += '_mini'\n\n return fs", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def output_filename(phase, debug=False, append=''):\n suffix = ''\n if append:\n suffix = '_{0}'.format(append)\n\n if debug:\n filename = os.path.abspath(config.output_path + tst_map[phase])\n else:\n filename = os.path.abspath(config.output_path + csv_map[phase])\n\n return filename + suffix + '.csv'", "def filename_creator(value, format_str=None, start_date=None, stop_date=None):\n\n estr = ''.join(('This feature has not been implemented yet and is here ',\n 'to support experimentation by the pysat team. If you are ',\n 'here intentionally, please contact the pysat developers ',\n 'at pysat.developers@gmail.com or pysat.slack.com and let ',\n 'us know what you are trying to accomplish so we can ',\n 'evaluate supporting the desired use case.'))\n raise NotImplementedError(estr)\n\n return", "def generate_filename(playlist_or_album_name, user_id_or_artist_id=None):\n filename = ''\n if user_id_or_artist_id:\n filename += user_id_or_artist_id + '_'\n filename += playlist_or_album_name + '_' + str(time_ns())\n return filename", "def outputFilename(name=\"\", ext=\"\", time=True):\n # get the date in the format specifed\n dateTime = datetime.now()\n dateTimeFormat = \"%Y-%m-%d__%H-%M-%S\" if time else \"%Y-%m-%d\"\n fileName = dateTime.strftime(dateTimeFormat)\n\n # construct the filename\n fileName = fileName + \"_\" + name if fileName != \"\" else fileName\n ext = \".\" + ext if ext != \"\" else \"\"\n\n return fileName + ext", "def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def filename_from_kwargs(kwargs_dict, ext='.png'):\n excluded_kws = [\n 'data',\n 'estimator',\n 'hue_order',\n 'palette',\n 'ax',\n 'hue_order',\n 'order',\n 'line_kws',\n 'scatter_kws']\n filename = '_'.join([f'{key}={val}' for key, val in kwargs_dict.items() if key not in excluded_kws])\n filename = f'{filename}{ext}'\n return filename", "def get_filename(self, path, params, type_=None):\n phase = self.phase\n\n if type_:\n phase += ('_' + type_)\n\n filename = self.FILENAME_TEMPLATES[phase].format(**params)\n\n return os.path.join(path, filename)", "def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )", "def test_generate_filename():\n params = dict(id='TEST', db='nucleotide', rettype='gbwithparts')\n\n filename = core._generate_filename(params, 'foo')\n assert filename == 'foo.gbk'\n\n params['rettype'] = 'fasta'\n filename = core._generate_filename(params, 'foo')\n assert filename == 'foo.fa'\n\n params['rettype'] = 'ft'\n filename = core._generate_filename(params, 'foo')\n assert filename == 'foo.ft'\n\n del params['rettype']\n params['report'] = 'gff3'\n filename = core._generate_filename(params, 'foo')\n assert filename == 'foo.gff'\n\n params = dict(id='TEST', db='protein', rettype='fasta')\n filename = core._generate_filename(params, None)\n assert filename == 'TEST.fa'", "def generate_filename(extension, with_path=True, base_folder=None):\n name = get_md5(str(uuid4()))\n # if not extension:\n # extension = get_file_extension()\n if base_folder is not None:\n base_folder = \"%s/\" % base_folder.rstrip(\"/\")\n else:\n base_folder = \"\"\n\n if with_path:\n return \"%s%s/%s/%s/%s.%s\" % (base_folder, name[0], name[1], name[2], name, extension)\n else:\n return \"%s%s.%s\" % (base_folder, name, extension)", "def make_filename(key, extension):\n key = unicode(key.strip())\n return '{}.{}'.format(slugify(key), extension)", "def get_filename(\n self,\n name,\n ext=\".npz\",\n map_tag=None,\n iter_index=None,\n extra_tag=None,\n bp_opts=False,\n ):\n if self.output_root is None:\n return None\n\n if bp_opts:\n if self.ensemble_mean:\n name = \"{}_mean\".format(name)\n elif self.ensemble_median:\n name = \"{}_median\".format(name)\n elif self.sim_index is not None:\n name = \"{}_sim{:04d}\".format(name, self.sim_index)\n if self.signal_type_sim:\n name = \"{}_{}\".format(name, self.signal_type_sim)\n if self.noise_type_sim:\n name = \"{}_{}\".format(name, self.noise_type_sim)\n else:\n if self.data_type != \"raw\":\n name = \"{}_{}\".format(name, self.data_type)\n if getattr(self, \"template_cleaned\", False):\n name = \"{}_clean_{}\".format(name, self.template_type)\n if getattr(self, \"planck_sub\", False):\n name = \"{}_planck_sub\".format(name)\n if self.weighted_bins:\n name = \"{}_wbins\".format(name)\n if getattr(self, \"return_cls\", False):\n name = \"{}_cl\".format(name)\n\n if map_tag is not None:\n name = \"{}_map_{}\".format(name, map_tag)\n if iter_index is not None:\n name = \"{}_iter{:03d}\".format(name, iter_index)\n if extra_tag is not None:\n name = \"{}_{}\".format(name, extra_tag)\n\n tag = \"_{}\".format(self.output_tag) if self.output_tag else \"\"\n if not ext.startswith(\".\"):\n ext = \".{}\".format(ext)\n return os.path.join(self.output_root, \"{}{}{}\".format(name, tag, ext))", "def get_file_name(x, feature_name, ext='npy'):\n # this is kind-of standard\n name = '.'.join(x.split('.')[:-1])\n filename = '{}.{}.{}'.format(name, feature_name, ext)\n return filename", "def getExperimentName(notebook, datalength, epochs, init_lr, *args):\n notebook = os.path.basename(notebook).split(\".\")[0]\n name = \"{}_datalen-{}_epochs-{}_init_lr-{}\".format(\n notebook, datalength, epochs, init_lr\n )\n return name + \"_\" + \"_\".join(args)", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename", "def make_img_name(file_ext='.png'):\r\n fn = []\r\n # format seqs and write out to temp file\r\n for i in range(0, 30):\r\n fn.append(choice(ALPHABET))\r\n return ''.join(fn) + file_ext", "def ez_filename(self, ez):\n return ez.index + '_' + ez['TAXPER'] + '_990EZ'", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def generate_filename(\n radar, field, sweep, ext=\"png\", datetime_format=\"%Y%m%d%H%M%S\", use_sweep_time=False\n):\n name_s = generate_radar_name(radar).replace(\" \", \"_\")\n field_s = field.replace(\" \", \"_\")\n if use_sweep_time:\n time_s = generate_radar_time_sweep(radar, sweep).strftime(datetime_format)\n else:\n time_s = generate_radar_time_begin(radar).strftime(datetime_format)\n sweep_s = str(sweep).zfill(2)\n return f\"{name_s}_{field_s}_{sweep_s}_{time_s}.{ext}\"", "def generate_glider_filename(description):\n filename = (\n \"{glider}-{year:d}-{day:03d}-{mission:d}-{segment}.{type}\".format(**description)\n )\n return os.path.join(description['path'], filename)", "def generate_filename(ext,sha512base16_hash=None):\n## # Timestamp filename\n## timestamp = str(get_current_unix_time())\n## filename = timestamp+\".\"+ext\n # Base16 hash filename\n filename = sha512base16_hash+\".\"+ext\n return filename", "def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname", "def filename(self, age, metal, imf=None):\n imf = 1.3 if imf is None else imf\n msign = \"p\" if metal >= 0. else \"m\"\n azero = \"0\" if age < 10. else \"\"\n fname = \"Ebi{0:.2f}Z{1}{2:.2f}T{3}{4:02.4f}_iTp0.00_baseFe.fits\".format(\n imf, msign, abs(metal), azero, age)\n return os.path.join(self.data_dir, fname)", "def _get_parameter_based_output_prefix(self):\n\n # As you can see the generation of the output filename prefix is\n # straigthforward but pretty tireingsome.\n filename_prefix = \"sequential_alignment_\"\n\n filename_prefix += \"s-%d_e-%d_r-%d_\" % tuple(self.options.sliceRange)\n\n try:\n filename_prefix += \"ROI-%s\" % \"x\".join(map(str, self.options.registrationROI))\n except:\n filename_prefix += \"ROI-None\"\n\n try:\n filename_prefix += \"_Resize-%s\" % \"x\".join(map(str, self.options.registrationResize))\n except:\n filename_prefix += \"_Resize-None\"\n\n filename_prefix += \"_Color-%s\" % self.options.registrationColor\n\n try:\n filename_prefix += \"_Median-%s\" % \"x\".join(map(str, self.options.medianFilterRadius))\n except:\n filename_prefix += \"_Median-None\"\n\n filename_prefix += \"_Metric-%s\" % self.options.antsImageMetric\n filename_prefix += \"_MetricOpt-%d\" % self.options.antsImageMetricOpt\n filename_prefix += \"_Affine-%s\" % str(self.options.useRigidAffine)\n\n filename_prefix += \"_eps-%d_lam%02.2f\" % \\\n (self.options.graphEdgeEpsilon, self.options.graphEdgeLambda)\n\n try:\n filename_prefix += \"outROI-%s\" % \"x\".join(map(str, self.options.outputVolumeROI))\n except:\n filename_prefix += \"outROI-None\"\n\n return filename_prefix", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def file_name(self) -> str:\n if self.service == \"all\":\n service_string = \"\"\n else:\n service_string = f\"_{self.service.lower().strip()}\"\n\n if self.no_params:\n return f\"no_params{service_string}.tf\"\n elif self.params_optional:\n return f\"params_optional{service_string}.tf\"\n elif self.params_required:\n return f\"params_required{service_string}.tf\"", "def get_output_filename(item: str, root: str, i: int) -> str:\n element_split = item.split(\"/\")\n item, ext = element_split[-1].split(\".\")\n if i < 0:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}.{ext}\"\n else:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}_aug{i}.{ext}\"", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n # if self.suffix_time != \"\":\n # index = self.baseFilename.find(\".\" + self.suffix_time)\n # if index == -1:\n # index = self.baseFilename.rfind(\".\")\n # self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self._get_format_filename()\n\n self.mode = 'a'\n if not self.delay:\n self.stream = self._open()", "def _file_name(self, dtype_out_time, extension='nc'):\n out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,\n dtype_vert=self.dtype_out_vert)\n in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,\n self.dtype_in_vert)\n ens_lbl = utils.io.ens_label(self.ens_mem)\n yr_lbl = utils.io.yr_label((self.start_date.year, self.end_date.year))\n return '.'.join(\n [self.name, out_lbl, in_lbl, self.model.name,\n self.run.name, ens_lbl, yr_lbl, extension]\n ).replace('..', '.')", "def _generate_overlay_file_name(self, well, channel, desc):\n \n return \"c\" + channel + \"_w\" + well + \"_\" + desc + \".png\"", "def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)", "def create_filename(value):\n return '%s.mp3' % slugify(value, u'_')", "def generateModelFilename(args, type):\n opt = []\n if args.letters:\n opt.append('l')\n if args.symbols:\n opt.append('s')\n if args.digits:\n opt.append('d')\n opt.sort()\n return \"models/model_{0}_{1}.yml\".format(type, ''.join(opt))", "def dataset_part_filename(dataset_part, num_data):\n if num_data >= 0:\n return '{}_data_{}.npz'.format(dataset_part, str(num_data))\n return '{}_data.npz'.format(dataset_part)", "def _standardized_filename(self, election, bits=None, **kwargs):\n reporting_level = kwargs.get('reporting_level')\n jurisdiction = kwargs.get('jurisdiction')\n office = kwargs.get('office')\n office_district = kwargs.get('office_district')\n extension = kwargs.get('extension')\n if extension is None:\n extension = self._filename_extension(election)\n\n if bits is None:\n bits = []\n\n bits.extend([\n election['start_date'].replace('-', ''),\n self.state,\n ])\n\n if election['special']:\n bits.append('special')\n\n bits.append(election['race_type'].replace('-', '_'))\n\n if jurisdiction:\n bits.append(slugify(jurisdiction))\n\n if office:\n bits.append(slugify(office))\n\n if office_district:\n bits.append(slugify(office_district))\n\n if reporting_level:\n bits.append(reporting_level)\n\n return \"__\".join(bits) + extension", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def get_parcels_output_name(w_10, w_rise, diffusion_type, boundary, alpha, mld=settings.MLD, dt=settings.dt_int.seconds,\n theta=1.0, wave_roughness=False, gamma=1.0):\n name = settings.output_dir + '{}_{}_w10_{}_w_rise_{}_MLD_{}'.format(diffusion_type, boundary, w_10, w_rise, mld)\n if 'Markov' in boundary:\n name += '_alpha_list={}'.format(alpha)\n # Relic when I had already run the dt = 1s simulation and didn't want to redo them due to how long it took\n if dt != 1:\n name += '_dt={}'.format(dt)\n if diffusion_type == 'KPP':\n name += '_theta={}'.format(theta)\n if wave_roughness:\n name += '_wave_roughness'\n elif diffusion_type == 'SWB':\n name += '_gamma={}'.format(gamma)\n return name + '.nc'", "def create_html_filename(coord_filename, name_ending):\r\n outpath = coord_filename.split('/')[-1] + name_ending\r\n return outpath", "def generate_report_file_name(args: Dict[str, Any]) -> str:\n return (\n f\"{args.get('report_type', '').lower().replace(' ', '_')}_fireeye_\"\n f\"{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.\"\n f\"{args.get('type', REPORT_TYPE_ALLOWED_FORMAT[args.get('report_type', '')][0])}\"\n )", "def data_filename_create(movie_filename):\n path, filename = os.path.split(movie_filename)\n filename_stub, ext = os.path.splitext(filename)\n if os.path.splitext(movie_filename)[1] in ['.png','.jpg','.tiff','.JPG']: \n data_filename = os.path.join(path, ''.join([letter for letter in filename_stub if letter.isalpha()]) + '.hdf5')\n else:\n data_filename = os.path.join(path, filename_stub + '.hdf5')\n return data_filename", "def get_filename_with_datetime(name, extension):\n now = datetime.datetime.now()\n\n # truncated version datetime ISO format (withput microseconds and and timezone)\n datetime_ISO_format = now.strftime('%Y-%m-%d %H:%M:%S')\n\n return '{0} {1}.{2}'.format(name, datetime_ISO_format, extension)", "def _get_target_name(self, n, k, att, pol, emb_dim):\n threshold = str(int(self.threshold * 10))\n agg_name = \"_{}_{}_{}_{}_{}_{}\".format(n, k, att, pol, emb_dim, threshold)\n target_file = self.source_file[:-4] + agg_name + \".csv\"\n return target_file", "def generate_datetime_filename(label=\"data\", extension=\"csv\", postfix=None, fractional=True, dt=None):\n if dt is None:\n dt = datetime.now()\n filename = \"\" if label is None else f\"{label}-\"\n fractional_str = f\"-{dt.microsecond:06d}\" if fractional is True else \"\"\n filename += f\"{dt.year}-{dt.month:02d}-{dt.day:02d}_{dt.hour:02d}-{dt.minute:02d}-{dt.second:02d}{fractional_str}\"\n if postfix is not None:\n filename += f\"-{postfix}\"\n if extension is not None:\n filename += f\".{extension}\"\n return filename", "def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name", "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def get_name_from_experiment(name, d):\n name += \"_\"\n for v in d.values():\n fname = v.name\n if fname == \"params.json\":\n continue\n name += \"\".join(fname.split(\".\")[:-1])\n name += \"_\"\n return name[:-1]", "def get_filename(problem, width=3):\n return '{0:0{w}d}.py'.format(problem, w=width)", "def get_filename(self, file_object):\n\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n\n result = \"<show> <season>x<episode> <name>.mp4\"\n result = result.replace(\"<show>\", file_object.show.name)\n result = result.replace(\"<season>\", \"%.2d\" % \\\n int(file_object.season.number))\n result = result.replace(\"<episode>\", \"%s\" % \\\n str(file_object.number))\n result = result.replace(\"<name>\", file_object.name)\n return result", "def make_tex_name(self, end):\n tex_name = \"\"\n if hasattr(self, 'labels'):\n if self.labels.dict['data_name'] == '':\n tex_name += \"data_\"\n else:\n tex_name += \"true_%s_\"%self.labels.dict['data_name']\n if self.detector is not None:\n tex_name += \"%s_\"%self.detector\n if self.selection is not None:\n tex_name += \"%s_\"%self.selection\n tex_name += end\n tex_name += \".tex\"\n return tex_name", "def make_file_name(name):\n expanded_path = os.path.expandvars(make_fp_rel(name))\n return expanded_path", "def comm_filename(S, N, ncomm, bisec, transect=False, abu=None, comm_name=None):\n \n if not comm_name:\n comm_name = 'S%s_N%s' % (S, N)\n if abu:\n empir = '_empirSAD'\n else:\n empir = ''\n if transect:\n runtype = 'transect'\n else:\n runtype = 'grid'\n return './comms/simulated_comms_%s%s_C%s_B%s_%s.txt' % (comm_name,\n empir, ncomm,\n bisec, runtype)", "def generate_raw_filename(self, source_name, table_name, environment, seq_number, upload_time, load_type,\n file_format):\n file_date = upload_time.strftime(\n \"%Y-%m-%d-%H-%M-%S-%f\")[:-3] # [:-3] => Removing the 3 last characters as %f is for millis.\n res = f'{source_name}/{source_name}_{table_name}/' \\\n f'{source_name}_{environment}_{table_name}_{str(seq_number).zfill(3)}_' \\\n f'{file_date}_utc_{load_type}.{file_format}'\n res = res.lower()\n\n # Check if no illegal chars were passed\n #test = FileNameStandardConvention(res)\n #test.check_naming_convention()\n return res", "def _generate_output_name(extension):\n output_name = 'TDG_{:%Y-%m-%d_%H-%M-%S}.{}'.format(datetime.now(), extension)\n return output_name", "def py_simple_output_filename(filename, tag, ending):\n\n py_simple_output_filename = (py_output_dir(tag, ending) + \"/\"\n + filename + \".\"\n + ending)\n\n return py_simple_output_filename", "def generate_grid_filename(grid, field, level, ext=\"png\"):\n name_s = generate_grid_name(grid).replace(\" \", \"_\")\n field_s = field.replace(\" \", \"_\")\n time_s = generate_grid_time_begin(grid).strftime(\"%Y%m%d%H%M%S\")\n level_s = str(level).zfill(2)\n return f\"{name_s}_{field_s}_{level_s}_{time_s}.{ext}\"", "def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):\n if not basename:\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n\n if cwd is None:\n cwd = os.getcwd()\n if ext is None:\n ext = Info.output_type_to_ext(self.inputs.outputtype)\n if change_ext:\n suffix = \"\".join((suffix, ext)) if suffix else ext\n\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname", "def generate_file_name(old_file_name: str) -> str:\r\n return old_file_name.split(\".\")[0] + '_features' + '.npy'", "def params_to_filename(experiment_name, architecture, epoch_val=None):\n\n if isinstance(epoch_val, int):\n return '.'.join([experiment_name, architecture, '%06d' % epoch_val,\n 'path', 'tar'])\n\n\n\n glob_prefix = os.path.join(*[CHECKPOINT_DIR,\n '%s.%s.*' % (experiment_name, architecture)])\n re_prefix = '%s\\.%s\\.' % (experiment_name, architecture)\n re_suffix = r'\\.path\\.tar'\n\n valid_name = lambda f: bool(re.match(re_prefix + r'\\d{6}' + re_suffix,f))\n select_epoch = lambda f: int(re.sub(re_prefix, '',\n re.sub(re_suffix, '', f)))\n valid_epoch = lambda e: (e >= (epoch_val or (0, 0))[0] and\n e <= (epoch_val or (0, float('inf')))[1])\n\n filename_epoch_pairs = []\n for full_path in glob.glob(glob_prefix):\n filename = os.path.basename(full_path)\n if not valid_name(filename):\n continue\n\n epoch = select_epoch(filename)\n if valid_epoch(epoch):\n filename_epoch_pairs.append((filename, epoch))\n\n\n return [_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]", "def generate_namefile(pathfolder, methodvalues):\n datestr = datetime.datetime.now().date().strftime('%F')\n paramsstr = str(hash(str(methodvalues)))\n namefile = datestr + '-' + methodvalues['codename'] + '_' + paramsstr\n namefile = os.path.join(pathfolder, namefile)\n return namefile", "def get_full_filename(dirname, name, ext, tmstamp=False):\n fill = '_' + str_current_time() if tmstamp else ''\n fmt = '/{}{}{}' if ext.startswith('.') else '/{}{}.{}'\n return resolve(dirname) + fmt.format(name, fill, ext)", "def create_results_string(params):\n name = params.data.subject_id\n name += '-' + get_hypothesis(params)\n name += '-' + params['data']['word_embedding_type']\n\n # Data preprocessing.\n if params['data']['space_downsample']:\n name += '-spaceDown'\n if params['data']['normalize_inputs']:\n name += '-normInp'\n if params['data']['normalize_outputs']:\n name += '-normOut'\n if params['data']['brain_scaling']:\n name += '-scale_' + str(params['data']['brain_scaling'])\n if params['data']['avg_time_window_length']:\n name += '-avgTime_' + str(params['data']['avg_time_window_length'])\n if params['data']['num_unique_train_words_to_keep']:\n name += '-keep%dWords-seedRemove_%d' % \\\n (params['data']['num_unique_train_words_to_keep'], params['data']['seed_remove_words'])\n\n # Zero-shotness.\n if params.learning.zero_shot_words:\n name += '-zeroShotWords'\n if params.learning.zero_shot_questions:\n name += '-zeroShotQuest'\n\n name += '-' + params.output.output_dir_suffix\n\n return name", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def generate_filename(self, evla_conf, subband):\n\n node = os.uname()[1]\n node_idx = node.split('-')[-1] # Assumes cbe-node-XX naming\n\n # This is the old pulsar version:\n #self.data_dir = \"/lustre/evla/pulsar/data\"\n #self.outfile_base = \"%s.%s.%s.%s\" % (evla_conf.source,\n # evla_conf.projid, evla_conf.seq, node)\n\n # New version, 'normal' VLA data sets (SDM+BDF) are stored\n # using datasetId as the main folder name. Store here using\n # node-specific subdirs because there are lots of files..\n # Could make a subdir for each datasetId..\n self.data_dir = \"/lustre/evla/pulsar/data/%s\" % node\n #self.outfile_base = \"%s.%d.%s.%s\" % (evla_conf.datasetId,\n # int(evla_conf.seq),evla_conf.source,node_idx)\n #self.outfile_base = \"%s.%d.%s.%s-%02d\" % (evla_conf.datasetId,\n # int(evla_conf.seq), evla_conf.source,\n # subband.IFid, subband.swIndex-1)\n # New-new version, use scan+subscan number rather than seq number, \n # remove source name from filename\n self.outfile_base = \"%s.%d.%d.%s-%02d\" % (evla_conf.datasetId,\n evla_conf.scanNo, evla_conf.subscanNo,\n subband.IFid, subband.swIndex-1)", "def get_validation_file_name(self):\n name = self.test_name + \" (T\" + str(self.test_index) + \"_P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \".\" + \\\n str(self.parameters_incremental_index)\n\n if self.replay_source is not None:\n name = name + \"_\"+ self.replay_source\n\n if self.helper_decoders_one_class:\n name = name + \"_1\"\n\n name = name + \")\"\n\n return name", "def normalized_export_filename(title, extension):\n filename = timezone.localtime().strftime('%Y-%m-%d_%H-%M-%S__') + slugify(title)\n if extension.startswith(os.path.extsep):\n filename += extension\n else:\n filename += os.path.extsep + extension\n return filename", "def gen_result_fname(fs, q_idx=0, p_idx=0, v_idx=None):\n\n appendix = gen_appendix(q_idx, p_idx, v_idx)\n fname = fs['identifier']+'_' + appendix +'.csv'\n\n full_fname = os.path.join(fs['dir']['res'], fname)\n\n return full_fname", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def _get_variation_filename(variation, filename):\n splitted_filename = list(os.path.splitext(filename))\n splitted_filename.insert(1, '.%s' % variation['name'])\n return ''.join(splitted_filename)", "def generate_file_name(hour, minute):\n hour = str(hour)\n if len(hour) == 1:\n hour = \"0\" + hour\n minute = str(minute)\n if len(minute) == 1:\n minute = \"0\" + minute\n file_name = date + \"--\" + hour + special_char + minute + special_char + \"00,00.mvol\"\n\n # print \"filename: \",file_name\n return file_name", "def get_exp_identifier(dataset_type, FOLD, AR, AUTHOR, POST):\n global FEATURES_STR\n global AR_TYPE\n return dataset_type + '-' + AR_TYPE + '-fo' + str(FOLD) + '-fe' +\\\n FEATURES_STR + '-ar' + str(AR) + '-a' + str(AUTHOR) + '-p' + str(POST)", "def _PartName(self,partindex,nparts):\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\n\t\tp = '%d_%d' % (partindex, nparts * nparts - 1)\n\t\tfd,name = mkstemp(prefix = p, suffix = '.image')\n\t\tos.close(fd)\n\t\tself.result = name\n\t\treturn name", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size):\n\treturn '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size)", "def generate_outname(self, keep_name='False', fileappend=''):\n if not keep_name:\n\n if len(self.file_) == 0:\n outname = self.file_[0]\n else:\n outname = self.file_[0] + ' PlusOther'\n else:\n outname = '{}_{}_{}{}'.format(self._attrs['project'], self._attrs['trip_recovered'], self._attrs['site'], fileappend)\n\n return outname", "def generateFileName(self):\n return 'Covid' + self.map_type + '.html'", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def format_filename(self, s):\n valid_chars = \"-_ %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename", "def expid(val,expt_name=None):\n global experiment_name\n if not expt_name:\n assert experiment_name, \"Must set experiment name\"\n expt_name = experiment_name\n return \"{}_{}\".format(expt_name, val)", "def generateFilename(self, name):\n return self.context.generateUniqueId(type_name='Module')", "def create_motion_name(test_name, sensor_code, code_suffix=\"\"):\n return \"%s-%s-%s\" % (test_name, sensor_code, code_suffix)", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def makeTimeFilename(prefix, ext): \n suffix = time.strftime(\"%b%d_%H%M\") + ext\n return prefix + suffix" ]
[ "0.7097925", "0.66764444", "0.6525119", "0.64324546", "0.62918216", "0.6271143", "0.62647027", "0.6250804", "0.6229975", "0.6168056", "0.6167285", "0.61520386", "0.6138422", "0.6122441", "0.61129963", "0.6110849", "0.6091121", "0.60664004", "0.6061048", "0.60583425", "0.5980936", "0.5978534", "0.597843", "0.596222", "0.5961443", "0.5947317", "0.59439194", "0.5939773", "0.5938466", "0.59309757", "0.5930414", "0.5919461", "0.5901457", "0.5895069", "0.589179", "0.5891213", "0.5876906", "0.5868833", "0.58567256", "0.58419394", "0.58407146", "0.5828079", "0.5818968", "0.58149385", "0.58128834", "0.580262", "0.57924426", "0.5790317", "0.57775426", "0.57769555", "0.5771581", "0.57660884", "0.5754684", "0.57537436", "0.5750785", "0.57405835", "0.5710748", "0.5703461", "0.56979746", "0.56876373", "0.5683278", "0.56801623", "0.567256", "0.5669998", "0.566748", "0.56587696", "0.5658744", "0.56546247", "0.5649178", "0.5645589", "0.56452596", "0.5644644", "0.56415933", "0.5641476", "0.5640937", "0.5639238", "0.56378233", "0.5631567", "0.56284565", "0.5622442", "0.56175524", "0.5603047", "0.55997336", "0.55903995", "0.5588377", "0.5585291", "0.55801564", "0.5574206", "0.5574144", "0.5568138", "0.55653876", "0.55647767", "0.55521864", "0.5545741", "0.554253", "0.5530028", "0.55290985", "0.5525933", "0.55217606", "0.5519481" ]
0.59908104
20
Save model hyperparameters/metadata to output directory. model_options is an argparse Namespace, and is converted to a dictionary and pickled.
def save_model_options(output_dir, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model_options', '.pkl', training_data, predictor, model_options.model, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(vars(model_options), f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(output_dir, model, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir,\n 'model',\n '.pkl',\n gene,\n training_data,\n model_options.model,\n predictor,\n s=model_options.seed)\n\n with open(output_file, 'wb') as f:\n pkl.dump(model, f)", "def save_model(self, output_model: ModelEntity):\n logger.info(\"called save_model\")\n buffer = io.BytesIO()\n hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))\n labels = {label.name: label.color.rgb_tuple for label in self._labels}\n model_ckpt = torch.load(self._model_ckpt)\n modelinfo = {\n \"model\": model_ckpt,\n \"config\": hyperparams_str,\n \"labels\": labels,\n \"VERSION\": 1,\n }\n\n torch.save(modelinfo, buffer)\n output_model.set_data(\"weights.pth\", buffer.getvalue())\n output_model.set_data(\n \"label_schema.json\",\n label_schema_to_bytes(self._task_environment.label_schema),\n )\n output_model.precision = self._precision", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(self, model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def save_model(model, filename):\n with open(filename, 'wb') as f:\n joblib.dump(model, f)", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def save(self, path=None):\n path = self.opt.get('model_file', None) if path is None else path\n\n if path and hasattr(self, 'model'):\n model = {'model': self.model.state_dict(),\n 'longest_label': self.model.longest_label,\n 'optimizer': self.optimizer.state_dict(),\n 'optimizer_type': self.opt['optimizer']}\n\n with open(path, 'wb') as write:\n torch.save(model, write)\n\n # save opt file as json\n with open(path + \".opt\", 'wb') as handle:\n pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save_model(model, model_filepath):\n\n outfile = open('model_filepath','wb')\n pickle.dump(model, outfile)\n outfile.close()", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_model(model, model_filepath):\n\n logging.info(\"run save_model\")\n\n # save model with jolib library\n joblib.dump(model, model_filepath)", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def save_model(model, model_filepath): \n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self, model):\n # serialize model to JSON\n model_json = model.to_json()\n os.makedirs(os.path.dirname(self.model_json_path), exist_ok=True)\n with open(self.model_json_path, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(self.model_weights_path)\n print(\"Saved model to disk\")", "def save_model(model, model_path):\n pickle.dump(model.best_estimator_,open(model_path,'wb'))", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))", "def export_model(self, output_model_dir):\n logger.info(\"Exporting model to directory : {}\".format(output_model_dir))\n self.model.export(output_model_dir=output_model_dir)", "def save(self, model_name):\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Save the Keras models\n if self.mol_to_latent_model is not None:\n self.mol_to_latent_model.save(dirpath + \"/mol_to_latent_model.h5\")\n\n self.latent_to_states_model.save(dirpath + \"/latent_to_states_model.h5\")\n self.batch_model.save(dirpath + \"/batch_model.h5\")\n\n # Exclude unpicklable and unwanted attributes\n excl_attr = [\n \"_DDC__mode\",\n \"_DDC__train_gen\",\n \"_DDC__valid_gen\",\n \"_DDC__mol_to_latent_model\",\n \"_DDC__latent_to_states_model\",\n \"_DDC__batch_model\",\n \"_DDC__sample_model\",\n \"_DDC__multi_sample_model\",\n \"_DDC__model\",\n ]\n\n # Cannot deepcopy self.__dict__ because of Keras' thread lock so this is\n # bypassed by popping and re-inserting the unpicklable attributes\n to_add = {}\n # Remove unpicklable attributes\n for attr in excl_attr:\n to_add[attr] = self.__dict__.pop(attr, None)\n\n # Pickle metadata, i.e. almost everything but the Keras models and generators\n pickle.dump(self.__dict__, open(dirpath + \"/metadata.pickle\", \"wb\"))\n\n # Zip directory with its contents\n shutil.make_archive(model_name, \"zip\", dirpath)\n\n # Finally, re-load the popped elements for the model to be usable\n for attr in excl_attr:\n self.__dict__[attr] = to_add[attr]\n\n print(\"Model saved.\")", "def save_model(self, suffix: str = '', unwrap_parallel: bool = True) -> None:\n # TODO: Logging\n model = self.model\n # We do this awkard check because there are too many different\n # parallel wrappers in PyTorch and some of them have changed names\n # in different releases (DataParallel, DistributedDataParallel{,CPU}).\n is_wrapped = (\n hasattr(model, 'module') and\n 'parallel' in str(type(model)).lower() and\n isinstance(model.module, torch.nn.Module)\n )\n if is_wrapped and unwrap_parallel:\n # If a parallel wrapper was used, the only thing we should save\n # is the model.module, which contains the actual model and params.\n # If we saved the wrapped module directly, deserialization would\n # get unnecessarily difficult.\n model = model.module\n\n state_dict_path = os.path.join(self.save_path, f'state_dict{suffix}.pth')\n model_path = os.path.join(self.save_path, f'model{suffix}.pt')\n\n torch.save(model.state_dict(), state_dict_path)\n torch.save(model, model_path)", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def saveModel(model, outfile, train_opts, model_opts, view_names=None, sample_names=None, feature_names=None):\n\n # QC checks\n assert model.trained == True, \"Model is not trained yet\"\n assert len(np.unique(view_names)) == len(view_names), 'View names must be unique'\n assert len(np.unique(sample_names)) == len(sample_names), 'Sample names must be unique'\n\n # Create output directory\n if not os.path.isdir(os.path.dirname(outfile)):\n print(\"Output directory does not exist, creating it...\")\n os.makedirs(os.path.dirname(outfile))\n\n # For some reason h5py orders the datasets alphabetically, so we have to sort the likelihoods accordingly\n idx = sorted(range(len(view_names)), key=lambda k: view_names[k])\n tmp = [model_opts[\"likelihood\"][idx[m]] for m in range(len(model_opts[\"likelihood\"]))]\n model_opts[\"likelihood\"] = tmp\n\n # Open HDF5 handler\n hdf5 = h5py.File(outfile,'w')\n\n # Save expectations\n saveExpectations(model,hdf5,view_names)\n\n # Save parameters\n # saveParameters(model,hdf5,view_names)\n\n # Save training statistics\n saveTrainingStats(model,hdf5)\n\n # Save training options\n saveTrainingOpts(train_opts,hdf5)\n\n # Save model options\n saveModelOpts(model_opts,hdf5)\n\n # Save training data\n saveTrainingData(model, hdf5, view_names, sample_names, feature_names, model_opts[\"likelihood\"])\n\n # Close HDF5 file\n hdf5.close()", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger.info('Saving model')\n dst_config_file = os.path.join(output_dir, self.CONFIG_FILE)\n if self.fullpath_input_configfile != dst_config_file:\n shutil.copy(self.fullpath_input_configfile, dst_config_file)\n\n pickle.dump(self.word_det_rfc,\n open(os.path.join(output_dir, self.WORD_DET_RFC), 'wb'))\n pickle.dump(self.reg_coeffs, open(\n os.path.join(output_dir, self.REGRESSION_PARAMS), 'wb'))", "def _save_model_and_checkpoint(self, save_model_class=False):\n import os\n\n try:\n import cloudpickle\n except ImportError:\n cloudpickle = None\n\n logger.info(\"Saving model...\")\n output_dir = os.path.join(\n self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n )\n\n # save model parameters\n self._save_checkpoint(self.model, trial=None, metrics=None)\n # save the serialized model\n if save_model_class:\n # TODO : fix serialization of DatasetSchema object\n if cloudpickle is None:\n raise ValueError(\"cloudpickle is required to save model class\")\n\n with open(os.path.join(output_dir, \"model_class.pkl\"), \"wb\") as out:\n cloudpickle.dump(self.model.module, out)", "def save_model(model, model_filepath):\n # save the classifier\n with open(model_filepath, 'wb') as fid:\n pkl.dump(model, fid)", "def save_model(model, model_filepath):\n pickle.dump( model, open( model_filepath, \"wb\" ) )", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)", "def save_model(model, filename):\n model_dir = \"models\"\n os.makedirs(model_dir,exist_ok=True) #create only if model directory dosent exists\n filePath = os.path.join(model_dir, filename)\n logging.info(filePath)\n joblib.dump(model, filePath)", "def save(model: nn.Module, path):\n save_model(model, path)", "def save_model(self, model_path: str):", "def save_model(model, model_filepath):\n\n with open(model_filepath , 'wb') as file:\n pickle.dump(model, file)", "def save_model(model, filepath):\n try:\n dump(model, filepath)\n except Exception as e:\n print(e)\n print('Failed to pickle model.')", "def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')", "def save_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n saved_path.mkdir(parents=True, exist_ok=True)\n self.model.save_weights(str(saved_path / 'model.vec'))", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name in [\"encoder\", \"decoder\"]:\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n if model_name == 'encoder':\n to_save = self.encoder.state_dict()\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n else:\n to_save = self.decoder.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.optimizer.state_dict(), save_path)", "def save_opts(self):\n models_dir = os.path.join(self.log_path, \"models\")\n if not os.path.exists(models_dir):\n os.makedirs(models_dir)\n to_save = self.opt.__dict__.copy()\n\n with open(os.path.join(models_dir, 'opt.json'), 'w') as f:\n json.dump(to_save, f, indent=2)", "def dump_model(model, filename):\n import pickle\n logging.info(\"Dumping model into model.pkl\")\n with open(filename, 'w') as dump_file:\n pickle.dump(model, dump_file)", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass", "def save_model(model, model_path, model_name):\n config_dict = model.config\n os.makedirs(model_path, exist_ok=True)\n config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)\n with open(config_file, \"w\") as f:\n json.dump(config_dict, f)\n torch.save(model.state_dict(), model_file)", "def save_model(self, model_file):\n net_params = self.get_policy_param() # get model params\n torch.save(net_params, model_file)", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, 'params')\n with open(filename, 'w+') as f:\n pickle.dump(data, f)", "def save(self, save_path):\n # params\n model_params = {\n \"batch_size\": self.batch_size,\n \"lr\": self.lr,\n \"epsilon\": self.epsilon,\n \"gamma\": self.gamma,\n \"epsilon_min\": self.epsilon_min,\n \"epsilon_decay\": self.epsilon_decay,\n \"memory\": self.memory,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"_seed\": self._seed,\n }\n\n serialized_params = data_to_json(model_params)\n self.policy.save(save_path + \".h5\")\n\n # Check postfix if save_path is a string\n if isinstance(save_path, str):\n _, ext = os.path.splitext(save_path)\n if ext == \"\":\n save_path += \".zip\"\n\n # Create a zip-archive and write our params\n # there. This works when save_path\n # is either str or a file-like\n with zipfile.ZipFile(save_path, \"w\") as file_:\n # Do not try to save \"None\" elements\n file_.writestr(\"parameters\", serialized_params)", "def save_model(model, model_filepath): \n \n model_file = open(model_filepath,\"wb\")\n pickle.dump(model, model_file)\n model_file.close()", "def save_model(model, filename=\"model.json\"):\n model_json = model.to_json()\n with open(filename, \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(\"model.h5\")\n print(\"Saved model to disk\")", "def save_model(model, model_filepath):\n\n with open(model_filepath, 'wb') as f:\n pickle.dump(model, f)", "def save_model(model, file_name):\n with open(file_name, 'wb') as file:\n pickle.dump(model, file)", "def save_model(self):\n\n print('Save model')\n self.feature_extractor.save_weights(\n self.path_save_model + self.name_model + '.h5')\n\n print('Mean and std')\n np.save(self.path_save_model + 'mean.npy', self.mean)\n np.save(self.path_save_model + 'std.npy', self.std)", "def save_model(self, model, save_path):\n fname = save_path + '/model.joblib'\n self.save_scikit_file(model, fname)", "def save(model, filename):\n print(\"... saving model in {}\".format(filename))\n f = open(filename, \"wb\")\n pickle.dump(model, f)\n f.close()", "def save_model(model, model_filepath):\n try:\n filename = f'{model_filepath}.pkl'\n joblib.dump(model,\n open(filename, 'wb'),\n compress=3)\n except:\n raise Exception(\"Could not save model.\")", "def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, PARAM_EXTENSION)\n with open(filename, 'w') as f:\n pickle.dump(data, f)", "def save_model(self, model):\n # get model file name\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', self._this_party, self._task_chain_id)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n model_file_name = os.path.join(model_path, self._task_id + '.model')\n\n # save model to disk\n dump(model, model_file_name)", "def saveModel(model, file_name):\n with open(SAVE_PATH + file_name, \"wb\") as out_file:\n # wo do not want to save redundant data, so keys and vals are excluded\n pickle.dump(model, out_file)\n print(\"model save to\", SAVE_PATH + file_name)", "def _dump_keras(self, key, model=None, model_path=None, **kwargs):\n kwargs = self._get_basic_kwargs(**kwargs)\n kwargs['model_type'] = 'keras'\n kwargs['timestamp'] = logger.get_now()\n\n if model is not None:\n if model_path is None:\n model_path = f'{self._save_dir}/{key}'\n logger.info(f'keras model saved path = {model_path}')\n model.save_weights(model_path, save_format='tf')\n # model.save(model_path, save_format='tf') # not work if the model is complicated\n kwargs['model_path'] = model_path\n\n self[key] = kwargs", "def save_model(self, model_name):\n\n # Set up the main destination folder for the model\n dst_root = './data/LinearSVM/{0:s}'.format(model_name)\n if not os.path.exists(dst_root):\n os.makedirs(dst_root)\n print(f'No folder for LinearSVM model {model_name} storage found')\n print(f'Make folder to store model at')\n\n # Dump the model into the designated folder\n file_name = \"{0:s}_{1:s}.pkl\".format(model_name, self.amine)\n with open(os.path.join(dst_root, file_name), \"wb\") as f:\n pickle.dump(self, f)", "def _dump_pytorch(self, key, model=None, model_path=None, **kwargs):\n kwargs = self._get_basic_kwargs(**kwargs)\n kwargs['model_type'] = 'pytorch'\n kwargs['timestamp'] = logger.get_now()\n\n if model is not None:\n if model_path is None:\n model_path = f'{self._save_dir}/{key}'\n logger.info(f'pytorch model saved path = {model_path}')\n import torch\n torch.save(model.state_dict(), model_path)\n kwargs['model_path'] = model_path\n\n kwargs.pop('_dataloaders', None)\n self[key] = kwargs", "def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)", "def save_model(path: Path, model, config: dict):\n with open(path / \"model.pkl\", \"wb\") as p:\n pickle.dump(model, p)", "def persist_model_utilities(self, model_path: Path) -> None:\n model_filename = self._metadata_filename()\n rasa.utils.io.json_pickle(\n model_path / f\"{model_filename}.priority.pkl\", self.priority\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.meta.pkl\", self.config\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.data_example.pkl\", self.data_example\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.fake_features.pkl\", self.fake_features\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.label_data.pkl\",\n dict(self._label_data.data) if self._label_data is not None else {},\n )\n entity_tag_specs = (\n [tag_spec._asdict() for tag_spec in self._entity_tag_specs]\n if self._entity_tag_specs\n else []\n )\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n model_path / f\"{model_filename}.entity_tag_specs.json\", entity_tag_specs\n )", "def save_model(args,model,epoch):\n path='./model_'+args.name\n if not os.path.exists(path):\n os.mkdir(path)\n model_name='checkpoint_epoch={}'.format(epoch)\n filepath=os.path.join(path,model_name)\n torch.save(model.state_dict(), filepath)", "def dump(self, model_path):\n pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)\n# pickle.dump(self.mapper, gzip.open(os.path.join(model_path, 'mapper.pkl.gz'),'w'),\n# protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, folder):\n if self.is_predict_only:\n raise Exception(\"Model is predict only! save not supported!\")\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"depth\": self.depth,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"nr_labels\": self.nr_labels,\n }\n param = self.append_meta(param)\n open(f\"{folder}/param.json\", \"w\", encoding=\"utf-8\").write(json.dumps(param, indent=True))\n for d in range(self.depth):\n local_folder = f\"{folder}/{d}.model\"\n self.model_chain[d].save(local_folder)", "def save_model(model, output):\n\n # model.save(os.path.join(output))\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n\n # tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved at: {}\".format(output))", "def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"model already exists\")\n\t\treturn\n\telse:\n\t\tos.mkdir(path)\n\t\tjoblib.dump(model, path + filename)", "def save_models(\n output_path,\n asv_model,\n asv_preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n):\n asv_state_dict = asv_model.state_dict()\n # Add preprocessing data for Xvectors (if any)\n asv_state_dict.update(asv_preprocessing_parameters)\n torch.save(asv_state_dict, output_path + \"_asv_model\")\n\n # Use existing function to save CM model\n save_cm_model(\n cm_feature_network,\n cm_model,\n bonafide_cm_features,\n output_path + \"_cm_model\"\n )", "def save(self, filename):\n # serialize model to JSON\n model_json = self._model.to_json()\n with open('models/' + filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n self._model.save_weights('models/' + filename + \".h5\")\n print(\"Saved model to disk\")", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return", "def serialize_model(model,model_dir): \n # Make output directory to store model\n pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)\n model_json = model.to_json()\n \n # Serialize model to JSON\n with open(model_dir + '/' + model_dir + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n \n # Serialize weights to HDF5\n model.save_weights(model_dir +'/' + model_dir + \".h5\")\n print(\"Saved model to disk\")\n print(\"Model info stored within local directory: {model_name}/\")\n print(\"Model weights stored as: {model_name}/{model_name}.h5\")\n print(\"Model structure stored as: {model_name}/{model_name}.json\")", "def save_model(args, user_map, item_map, row_factor, col_factor):\n \n model_dir = os.path.join(args.output_dir, 'model')\n \n # write model files to /tmp, then copy to GCS\n gs_model_dir = model_dir\n model_dir = '/tmp/{0}'.format(args.job_name)\n \n os.makedirs(model_dir)\n np.save(os.path.join(model_dir, 'user'), user_map)\n np.save(os.path.join(model_dir, 'item'), item_map)\n np.save(os.path.join(model_dir, 'row'), row_factor)\n np.save(os.path.join(model_dir, 'col'), col_factor)\n \n sh.gsutil('cp', '-r', os.path.join(model_dir, '*'), gs_model_dir)", "def save_model(self, dir=\"\", **kwargs):\n ckpt_fn = os.path.join(dir, f\"model.pkl\")\n torch.save(\n {\n \"global_step\": self.global_step_,\n \"epoch\": self.epoch_,\n \"model\": self.net_.state_dict(),\n \"optimizer\": self.optimizer_.state_dict(),\n \"sampler_state\": self.sampler.state_dict(),\n \"model_samples\": list(self.model_samples_),\n \"ais_state\": self.ais_loss.state_dict(),\n \"replay_prob\": self.replay_prob,\n \"max_replay\": self.max_replay,\n },\n ckpt_fn,\n )\n return ckpt_fn", "def save(self, model_path: str) -> None:\n metadata_string = json.dumps({ \"classes\": self.classes })\n with open(os.path.join(model_path, \"metadata.json\"), \"w\") as metadata_file:\n metadata_file.write(metadata_string)\n with self.graph.as_default():\n with self.session.as_default():\n self.model.save_weights(os.path.join(model_path, \"weights.h5\"))", "def save_model(model, model_path) -> None:\n try:\n createFolder(model_path)\n exportJSON(model.to_json(), model_path+'/model.json')\n model.save_weights(model_path+'/model_weights.h5')\n except Exception as error:\n print(f\"Error: save_model(model, {model_path}) -> {error}\")", "def save_model(self, epoch=None, out_tag='my_lstm'):\n\n Utils.check_dir('./models/')\n if epoch is not None:\n self.model.save_weights('{}/models/{}_model_epoch_{}.hdf5'.format(os.getcwd(), out_tag, epoch))\n with open(\"{}/models/{}_model_architecture_epoch_{}.json\".format(os.getcwd(), out_tag, epoch), \"w\") as f_out:\n f_out.write(self.model.to_json())\n else: \n self.model.save_weights('{}/models/{}_model.hdf5'.format(os.getcwd(), out_tag))\n with open(\"{}/models/{}_model_architecture.json\".format(os.getcwd(), out_tag), \"w\") as f_out:\n f_out.write(self.model.to_json())", "def save_model(self, epoch, model):\n filename = 'model-epoch-' + str(epoch) + '.pth'\n model_path = os.path.join(self.models_path, filename)\n torch.save(model.state_dict(), model_path)", "def save_model(path_name, model):\n\n # Specify a path\n PATH = path_name\n \n # Save\n torch.save(model.state_dict(), PATH)", "def model_save(model, name):\n extension = \".pickle\"\n\n save_model_name = os.path.join(MODEL_SAVE_DIR, name + extension)\n pk.dump(model, save_model_name)", "def save_model(self, path):\n try:\n # os.makedirs(osp.dirname(path), exist_ok=1)\n joblib.dump(self.model, path)\n except Exception as e:\n print(e)\n print(\"Couldn't save scikit learn model on path {}!\".format(path))", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def save_model(self, output_dir, epoch=0, iteration=0, losses=None): \n \n saved_filename = 'model_{}_{}.pth'.format(epoch, iteration) \n saved_path = os.path.join(output_dir, saved_filename) \n print('Saving model to {}'.format(saved_path))\n cp = {'epoch': epoch, \n 'iteration': iteration,\n 'loss': losses, \n 'state_dict': self.network.cpu().state_dict()\n }\n self.network.to(device)\n torch.save(cp, saved_path)", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def saveSvmModel(self, model):\n # Create the save dialog box\n name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Model Parameters',\n '', 'pkl files (*.pkl)', 'pkl file (*.pkl)')\n\n if not name:\n return\n # Check the extension when saving\n if self.joblibExt in name:\n joblib.dump(model, name)\n else:\n message = 'Error saving file {}.'.format(name)\n self.messageBox(message)", "def save_best_params(output_dir, best_params, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir,\n 'params',\n '.pkl',\n gene,\n training_data,\n model_options.model,\n predictor,\n s=model_options.seed)\n\n with open(output_file, 'wb') as f:\n pkl.dump(best_params, f)", "def save_model(model):\n json_string = model.to_json()\n with open(MODEL_SAVE_JSON, 'w') as fp:\n fp.write(json_string)" ]
[ "0.7630544", "0.7089318", "0.7027596", "0.6957321", "0.6957321", "0.6957321", "0.6866317", "0.68396884", "0.6824873", "0.6792376", "0.6774526", "0.66822237", "0.6664956", "0.6664538", "0.66604686", "0.6630448", "0.66119593", "0.6600588", "0.6593003", "0.65876275", "0.6582033", "0.6567448", "0.6563555", "0.6555261", "0.65513647", "0.65513647", "0.654198", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6537048", "0.6533324", "0.65180856", "0.65157765", "0.6514526", "0.651067", "0.65065104", "0.64978", "0.6490635", "0.6489628", "0.64883596", "0.6481033", "0.64810205", "0.6479417", "0.64508784", "0.644516", "0.644512", "0.6438561", "0.64372456", "0.64325154", "0.64223075", "0.64082795", "0.6407556", "0.64009166", "0.63874316", "0.6382343", "0.6381169", "0.63785166", "0.63678753", "0.6364943", "0.6361879", "0.635422", "0.634844", "0.63477725", "0.6342407", "0.6342303", "0.6336831", "0.6332157", "0.6318925", "0.63062215", "0.62980175", "0.629507", "0.6291618", "0.629076", "0.628828", "0.62798613", "0.62756205", "0.6258248", "0.62557197", "0.6244877", "0.62421316", "0.62357384", "0.62352043", "0.6233829", "0.6227411", "0.6224215", "0.62191606", "0.62161994", "0.6206132", "0.61971766", "0.61886185", "0.6183757", "0.618294", "0.61821795" ]
0.7801818
0
Save serialized (pickled) classifier to output directory.
def save_model(output_dir, model, gene, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model', '.pkl', gene, training_data, model_options.model, predictor, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(model, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump(cls, classifier, filename=None):\n filename = filename or Configuration.get_instance().classifier_file\n with open(filename, 'w') as output_file:\n pickle.dump(classifier, output_file)", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/badlymappedfinder/badlymappedfinder.joblib\",\n )", "def pickle_clf(self, path='models/SentimentClassifier.pkl'):\n with open(path, 'wb') as f:\n pickle.dump(self.clf, f)\n print(\"Pickled classifier at {}\".format(path))", "def save_model(clf, save_folder, filename):\n import pickle\n path = save_folder + filename\n with open(path, 'wb') as handle:\n pickle.dump(clf, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def persist_classifier(clf, X_test, y_test, pickle_file):\n joblib.dump((clf, X_test, y_test), pickle_file)", "def saveModel(self, save_path):\n if not os.path.exists('/'.join(os.path.split(save_path)[:-1])):\n os.makedirs('/'.join(os.path.split(save_path)[:-1]))\n with open(save_path, 'wb') as fw:\n pickle.dump(self.clf, fw)", "def save_to_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.save_to_disk(file_name)", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/repeatsfinder/repeatsfinder.joblib\",\n )", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))", "def save(self, path: utils.URLPath):\n save_somclassifier_config(self.config, path / \"config.json\")\n self.model.save(str(path / \"model.h5\"))\n io_functions.save_joblib(self.binarizer, path / \"binarizer.joblib\")\n\n io_functions.save_json(self.data_ids[\"validation\"], path / \"ids_validate.json\")\n io_functions.save_json(self.data_ids[\"train\"], path / \"ids_train.json\")", "def save(self, prefix_file):\n self.save_encoder(prefix_file)\n sklearn.externals.joblib.dump(\n self.classifier,\n prefix_file + '_' + self.architecture + '_classifier.pkl'\n )", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def save(self, save_dir='models'):\n with open(os.path.join(save_dir, 'model_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.model, f)\n with open(os.path.join(save_dir, 'vectorizer_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.vectorizer, f)\n with open(os.path.join(save_dir, 'userid2name.pkl'), 'wb') as f:\n pickle.dump(self.userid2name, f)\n with open(os.path.join(save_dir, 'name2userid.pkl'), 'wb') as f:\n pickle.dump(self.name2userid, f)", "def save_model(model, model_filepath):\n # save the classifier\n with open(model_filepath, 'wb') as fid:\n pkl.dump(model, fid)", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)", "def save(self, pickle_fp):\n if isinstance(self.model, KerasClassifier) and self.model_trained:\n model_fp = os.path.splitext(pickle_fp)[0]+\".h5\"\n self.model.model.save(model_fp)\n current_model = self.model.__dict__.pop(\"model\", None)\n with open(pickle_fp, \"wb\") as fp:\n dill.dump(self, fp)\n setattr(self.model, \"model\", current_model)\n else:\n dill.dump(self, fp)", "def save(self, filename):\n import pickle\n if path.dirname(filename) and not path.exists(path.dirname(filename)):\n makedirs(path.dirname(filename))\n pickle.dump(self, open(filename, 'wb'), protocol=-1)", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def serialize(cl, filename):\n file = filename+'.pickle'\n makedir(file)\n with open(file, 'wb') as f:\n pickle.dump(cl, f, 2)\n return file", "def save(self, filename):\n path = Path(filename)\n root = Path(*path.parts[:-1])\n root.mkdir(parents=True, exist_ok=True)\n with path.open('wb') as f:\n pickle.dump(self.model, f)", "def save(self):\n\n try:\n joblib.dump(self._clf, self._modelFile)\n except:\n return False\n\n return True", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def save_model(clf, scaler, pickle_file):\n f = open(pickle_file, \"wb\")\n to_dump = {\n 'clf': clf,\n 'scaler': scaler\n }\n pickle.dump(to_dump, f)\n f.close()", "def save_model(self, path):\n try:\n # os.makedirs(osp.dirname(path), exist_ok=1)\n joblib.dump(self.model, path)\n except Exception as e:\n print(e)\n print(\"Couldn't save scikit learn model on path {}!\".format(path))", "def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass", "def save(self, model_name=None, verbose=True):\n with open(self._get_fname(model_name or self.name), 'wb') as f:\n pickle.dump(self.classifier, f, 2)\n if verbose:\n print(\"[{0}] Model saved. To load, use name\\n\\t\\t{1}\".format(\n self.name, model_name\n ))", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def save(self, folder):\n if self.is_predict_only:\n raise Exception(\"Model is predict only! save not supported!\")\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"depth\": self.depth,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"nr_labels\": self.nr_labels,\n }\n param = self.append_meta(param)\n open(f\"{folder}/param.json\", \"w\", encoding=\"utf-8\").write(json.dumps(param, indent=True))\n for d in range(self.depth):\n local_folder = f\"{folder}/{d}.model\"\n self.model_chain[d].save(local_folder)", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(path))\n with open(path, 'w') as f:\n pickle.dump(obj, f)", "def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)", "def save(self, filename):\n if '.pkl' not in filename:\n filename = filename + '.pkl'\n with open(filename, 'wb') as f:\n pickle.dump(self, f)", "def dump(self, model_path):\n pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)\n# pickle.dump(self.mapper, gzip.open(os.path.join(model_path, 'mapper.pkl.gz'),'w'),\n# protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, folder):\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"nr_labels\": self.nr_labels,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"bias\": self.bias,\n \"pred_kwargs\": self.pred_params.to_dict(),\n }\n param = self.append_meta(param)\n with open(\"{}/param.json\".format(folder), \"w\") as f:\n f.write(json.dumps(param, indent=True))\n smat_util.save_matrix(\"{}/W.npz\".format(folder), self.W)\n smat_util.save_matrix(\"{}/C.npz\".format(folder), self.C)", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)", "def save(self):\n pickle_save(self.results, 'results', self.main_dir)", "def save(self, filename):\n cPickle.dump(self, open(filename, \"wb\"))", "def SaveDecoderAndData(clf, X, X_not_scaled, y, subjID):\n time_to_save = datetime.datetime.now().isoformat()\n time_to_save = time_to_save.replace('T','-')\n time_to_save = time_to_save.replace(':','-')\n \n model = clf\n model_file = 'Models/' + subjID + '_MI_classifier_' + time_to_save[:19] + '.sav'\n pickle.dump(model, open(model_file, 'wb'))\n \n filepath_export_data = 'Models/' + subjID + '_data_for_MI_classifier_' + time_to_save[:19] + '.npz'\n np.savez_compressed(filepath_export_data, subjID=subjID, X=X, X_not_scaled=X_not_scaled, y=y)", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def save_ml_output(arrays, out_fname, force):\n if not force:\n if os.path.isfile(out_fname):\n return\n try:\n os.makedirs(os.path.dirname(out_fname))\n except FileExistsError:\n pass\n np.save(out_fname, arrays, allow_pickle=False)", "def class2json(classifier, filename = \"classifier\"):\n model_json = classifier.to_json()\n with open(filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # Serialize weights to HDF5\n classifier.save_weights(filename + \".h5\")\n print(\"Successfully saved the classifier to file \" + filename + \".\")", "def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)", "def save_model(self, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger.info('Saving model')\n dst_config_file = os.path.join(output_dir, self.CONFIG_FILE)\n if self.fullpath_input_configfile != dst_config_file:\n shutil.copy(self.fullpath_input_configfile, dst_config_file)\n\n pickle.dump(self.word_det_rfc,\n open(os.path.join(output_dir, self.WORD_DET_RFC), 'wb'))\n pickle.dump(self.reg_coeffs, open(\n os.path.join(output_dir, self.REGRESSION_PARAMS), 'wb'))", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_to(self, save_path=\"./\", run_flag='', save_method=\"pickle\"):\n # TODO: Finish the save_method parameters\n time_stamp = self.time_stamp\n time_stamp = self.time_stamp + \"_\" + run_flag\n save_path = os.path.join(save_path, time_stamp)\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if self.feature_importance_pool:\n file_path = os.path.join(save_path, \"feature_importances.pkl\")\n save_file(file_path, self.feature_importance_pool)\n\n if self.feature_importance_hist:\n file_path = os.path.join(save_path, \"feature_importances_hist.png\")\n save_file(file_path, self.feature_importance_hist[0])\n\n if self.area_under_curve_pool:\n file_path = os.path.join(save_path, \"auc_fpr_tpr.pkl\")\n save_file(file_path, self.area_under_curve_pool)\n\n if self.receiver_operating_characteristic_curve:\n file_path = os.path.join(save_path, \"roc_curve.png\")\n save_file(file_path, self.receiver_operating_characteristic_curve[0])\n\n if self.training_report_pool:\n file_path = os.path.join(save_path, \"training_report.pkl\")\n save_file(file_path, self.training_report_pool)\n\n if self.learning_line:\n file_path = os.path.join(save_path, \"learning_curve.png\")\n save_file(file_path, self.learning_line[0])\n\n file_path = os.path.join(save_path, time_stamp + \"_object.pkl\")\n with open(file_path, 'wb') as opfh:\n pickle.dump(self, opfh)", "def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))", "def pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))", "def save(self, output_path):\n with open(output_path, \"wb\") as file:\n dill.dump(self, file)", "def save (self, filename) :\n\t\tserialFile = open (filename, \"wb\")\n\t\tpickle.dump (self.production_rules, serialFile)\n\t\tpickle.dump (self.unitrelation, serialFile)\n\t\tpickle.dump (self.labels, serialFile)\n\t\tpickle.dump (self.keeper, serialFile)\n\t\tpickle.dump (self.strnodes, serialFile)\n\t\tpickle.dump (self.tokens, serialFile)\n\t\tserialFile.close()", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save(self, fname):\n with open(fname, \"wb\") as f:\n cloudpickle.dump(self, f)\n # pickle.dump(self, open(fname, 'wb'))", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, output_path):\r\n self.graph.cleanup().toposort()\r\n model = gs.export_onnx(self.graph)\r\n output_path = os.path.realpath(output_path)\r\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\r\n onnx.save(model, output_path)\r\n log.info(\"Saved ONNX model to {}\".format(output_path))", "def persist(self, model_dir):\n\n classifier_file = os.path.join(model_dir, TOPIC_MODEL_FILE_NAME)\n joblib.dump(self, classifier_file)\n\n return {\"topic_file\": TOPIC_MODEL_FILE_NAME}", "def save(self, directory):\n logging.info(f\"Saving to dir {directory}\")\n self.estimator.save(directory)", "def save(self,filename):\n with open(filename,'wb') as f:\n pickle.dump(self,f)", "def save(self, directory=None, fname=None):\n if directory is None:\n directory = './'\n if fname is None:\n fname = self.name.replace(' ', '_') + '.pickle'\n filename = os.path.join(directory, fname)\n \n with open(filename, 'wb') as f:\n pickle.dump(self, f)", "def save(self, filename):\n with open(filename, 'w') as f:\n pickle.dump((self.components, self.mean), f)", "def pickle_nn(clf):\n\n filename = 'nnMLPClass'\n outfile = open(filename, 'wb')\n pickle.dump(clf, outfile)\n outfile.close()", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)", "def save(self,filename): \n with open(filename, 'wb') as f:\n pickle.dump(self,f)", "def save(self, tfidf_vectorizer_path):\n with open(tfidf_vectorizer_path, \"wb\") as fw:\n pickle.dump(self, fw)", "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)", "def save(self, path=\"word2vec_keras.tar.gz\"):\n tokenizer_path = os.path.join(tempfile.gettempdir(), \"tokenizer.pkl\")\n label_encoder_path = os.path.join(tempfile.gettempdir(), \"label_encoder.pkl\")\n params_path = os.path.join(tempfile.gettempdir(), \"params.pkl\")\n keras_path = os.path.join(tempfile.gettempdir(), \"model.h5\")\n w2v_path = os.path.join(tempfile.gettempdir(), \"model.w2v\")\n\n # Dump pickle\n pickle.dump(self.tokenizer, open(tokenizer_path, \"wb\"))\n pickle.dump(self.label_encoder, open(label_encoder_path, \"wb\"))\n pickle.dump(self.__attributes__(), open(params_path, \"wb\"))\n pickle.dump(self.w2v_model, open(w2v_path, \"wb\"))\n self.k_model.save(keras_path)\n # self.w2v_model.save(w2v_path)\n\n # Create Tar file\n tar = tarfile.open(path, \"w:gz\")\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n tar.add(name, arcname=os.path.basename(name))\n tar.close()\n\n # Remove temp file\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n os.remove(name)", "def _set_final_model(self, classifier, X, y):\n\n # standardize data if specified\n if self._standardize_data:\n X = self._final_scaler.transform(X)\n\n # set the final model to the fit classifer\n self._final_model = classifier.fit(X, y)\n\n if self._logger: self._logger.info(f'({self.name}) Storing final model to disk.')\n\n # Store the model on disk as .pickle\n pickle.dump((self._final_model, self._standardize_data, self._optimal_threshold), open(self._model_path, \"wb\"))", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def writePickle(self, filename):\n \n assert filename.endswith('.pkl')\n file = open(filename, 'wb')\n cPickle.dump(self, file, cPickle.HIGHEST_PROTOCOL)", "def save_pipeline(model_to_persist):\n\n save_file_name = 'model.pkl'\n save_path = configuracion.TRAINED_MODEL_DIR / save_file_name\n joblib.dump(model_to_persist, save_path)\n\n print('saved pipeline')", "def save_model(self):\n pickle.dump(self, open(\"Logistic_Regression_Model.pkl\", \"wb\"))", "def save(self, obj, filename):\n if not self.enabled:\n return\n\n # get unique filepath and filename\n index = 0\n while True:\n filepath = join(self.path, filename+\"_\"+str(index))\n if os.path.isfile(filepath):\n index = index + 1\n continue\n break\n\n # save object\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n try:\n pickle.dump(obj, f)\n except Exception as e:\n log.exception(e)\n log.warning(f\"save failed for {filename} {type(obj)}\")", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def save(self, path):\n print(\"Warning: Default save used\")\n with open(path, 'wb') as f:\n pickle.dump(self, f)", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def save(self, main_dir):\n with open(f'{main_dir}/models/model_N{self.N}.pkl', 'wb') as f:\n pickle.dump(self.model, f)", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save(self, target, withdP=False):\n from six.moves.cPickle import dump\n data = self.serialize(withdP=withdP)\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target, withdP=False):\n from six.moves.cPickle import dump\n data = self.serialize(withdP=withdP)\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, filename):\n with gzip.open(filename, \"w\") as f:\n f.write(pickle.dumps(self))", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)", "def save(self, file):\n if isinstance(file, basestring):\n with open(file, \"wb\") as file:\n self.save(file)\n else:\n to_save = (self.model, self.features, self.labels)\n dump(to_save, file, HIGHEST_PICKLE_PROTOCOL)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, filename='nn_model.pkl'):\n seconds = time.time()\n\n directory = os.path.join(os.curdir, 'models')\n filepath = os.path.join(directory, str(seconds)+'_'+filename)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(filepath, 'wb') as f:\n pickle.dump(self, f)\n f.close()", "def save(self, fname, io=None):\n ckpt_path = self.manager.save()\n logging.info(f'Saved to {ckpt_path}')\n\n print_summary(self.model)\n\n if io is not None:\n io._upload_dir_to_bucket(self.save_path, self.save_path, ['ckpt', 'checkpoint'])", "def save(self, filename=\"matpipe.p\"):\n temp_backend = self.learner.backend\n self.learner._backend = self.learner.backend.fitted_pipeline_\n for obj in [self, self.learner, self.reducer, self.cleaner,\n self.autofeaturizer]:\n obj._logger = None\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n self.learner._backend = temp_backend", "def to_file(self, file_io):\n pickle.dump(self.__object, file_io)", "def save(self, fname):\n\n def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True\n\n with open(fname, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save_pickle(self, path):\n with open(path, 'wb') as f:\n pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, filename, **kwargs):\n with open(filename, 'wb') as fin:\n pickle.dump(self, fin, **kwargs)", "def to_file(self, filename):\n\n output_dict = {'random_forest': self.random_forest,\n 'apply_preprocessing': self.apply_preprocessing,\n 'apply_postprocessing': self.apply_postprocessing}\n pickle.dump(output_dict, open(filename, \"wb\"))", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def write_pk(obj, filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'wb') as fd:\n pickle.dump(obj, fd)", "def save_pickle(obj,path,mkdirs=True): \n if mkdirs:\n ensure_dir(path)\n with open(path,'wb') as file:\n pickle.dump(obj,file,protocol=pickle.HIGHEST_PROTOCOL)" ]
[ "0.76594853", "0.74418044", "0.73540056", "0.73168766", "0.7297143", "0.7217382", "0.7135482", "0.71012557", "0.6987071", "0.69320947", "0.69144404", "0.68949693", "0.67491394", "0.67209446", "0.654902", "0.6546712", "0.6524628", "0.65131825", "0.6482549", "0.6466641", "0.6455011", "0.6447363", "0.6414857", "0.6396994", "0.63957584", "0.63940734", "0.63926977", "0.6353983", "0.63471556", "0.63420314", "0.63420314", "0.63316727", "0.6298629", "0.629389", "0.6292611", "0.62708604", "0.6270235", "0.62668186", "0.6266373", "0.62609655", "0.6243818", "0.6222352", "0.6221961", "0.6214938", "0.62102383", "0.6209754", "0.6205175", "0.6202322", "0.62009656", "0.61931086", "0.6189034", "0.61860013", "0.61843944", "0.6184334", "0.6166533", "0.6164995", "0.61592984", "0.61501896", "0.6147036", "0.61398655", "0.6122405", "0.6121732", "0.6120118", "0.61103487", "0.6103949", "0.6101679", "0.60950446", "0.6091968", "0.60898215", "0.6089774", "0.60896933", "0.6083574", "0.6082947", "0.60763156", "0.6075846", "0.6074506", "0.6070949", "0.6070871", "0.6068138", "0.60543823", "0.6052949", "0.6041897", "0.6041897", "0.6040151", "0.6039345", "0.6029209", "0.60287035", "0.60281587", "0.6024283", "0.60208696", "0.6017649", "0.6015989", "0.6014111", "0.60125405", "0.6011977", "0.6010208", "0.60078025", "0.60067207", "0.60016483", "0.5999282" ]
0.61762613
54
Save parameter info to output directory.
def save_best_params(output_dir, best_params, gene, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'params', '.pkl', gene, training_data, model_options.model, predictor, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(best_params, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save_params(self, output_folder: str, checkpoint: int):\n arg_params, aux_params = self.module.get_params() # sync aux params across devices\n self.module.set_params(arg_params, aux_params)\n self.params = arg_params\n params_base_fname = C.PARAMS_NAME % checkpoint\n self.save_params_to_file(os.path.join(output_folder, params_base_fname))", "def on_save_parameters(self):\n obj_points = self.get_object_points()\n cam_pos = self.get_camera_position()\n distortion = self.get_distortion_coeeficients()\n\n d = {\n 'object positions': obj_points,\n 'camera positions': cam_pos,\n 'distortion coefficients': distortion\n }\n\n jsn = json.dumps(d)\n h = hashlib.sha1(jsn.encode('utf-8')).hexdigest()\n fn = f'{h}.json'\n\n with open(fn, 'w') as f:\n f.write(jsn)\n\n self.statusBar().showMessage(f'Parameters have been save to {fn}.')\n self.param_file = fn", "def save_parameters(self):\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n \n params_var = {}\n params_var['eta'] = self.system_param['eta']\n params_var['cov'] = self.system_param['cov']\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)", "def save_settings(self, param_state):\n with open(CONFIG_DIR / self.name_parameters, 'wb') as f:\n pickle.dump(param_state, f)", "def save_params(params):\n with open('params.p', 'wb') as out_file:\n pickle.dump(params, out_file)", "def save_params(params):\r\n pickle.dump(params, open('params.p', 'wb'))", "def write_parameter_sets(self, filename = 'inputparameterfile', *args,\n **kwargs):\n try:\n np.savetxt(filename, self.parset2run, *args, **kwargs)\n print('file saved in directory %s' % os.getcwd())\n except PystanSequenceError:\n print('Parameter sets to run model with not yet setup.')", "def save_params():\n out_json = os.path.join(OUTPUT_DIR, OUT_JSON)\n out_dict = {\n \"librosa_version\": librosa.__version__,\n \"numpy_version\": np.__version__,\n \"SR\": SR,\n \"N_MELS\": N_MELS,\n \"N_FFT\": N_FFT,\n \"HOP_LENGTH\": HOP_LENGTH,\n \"MEL_FMIN\": MEL_FMIN,\n \"MEL_FMAX\": MEL_FMAX\n }\n with open(out_json, 'w') as f:\n json.dump(out_dict, f, indent=4)", "def save_params(self):\n try:\n with open(self.json_file, \"w\") as fl:\n json.dump(self.params, fl, indent=4)\n except KeyError as inst:\n print(inst)", "def save_params(outdir, params):\n sio.savemat(os.path.join(outdir, \"copy_params.mat\"), prepare_save_metadata(params))\n\n return True", "def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")", "def save_model_params(self):\n params_dict = self.get_model_params()\n if self.params_filepath is not None:\n file_params = data_functions.load_json(self.params_filepath)\n if file_params != params_dict: # cheking if the parametes for this\n # session are diffrent then those\n # in the source file\n self.session_number += 1\n\n curr_file_name = (\n self.params_file_name + PARAMS_UPDATE_FORMAT + 'json').format(\n sess=self.session_number,\n steps=self.samples_seen)\n\n data_functions.save_json(params_dict, curr_file_name, self.curr_folder)\n self.params_filepath = os.path.join(self.curr_folder, curr_file_name)", "def save(self):\n with open(os.path.join(self.save_path, \"experiment.delira.pkl\"),\n \"wb\") as f:\n pickle.dump(self, f)\n\n self.params.save(os.path.join(self.save_path, \"parameters\"))", "def save_experiment_config(self):\n\n if (self.use_dist and dist.get_rank() == 0) or not self.use_dist:\n logfile = os.path.join(self.experiment_dir, 'parameters.txt')\n log_file = open(logfile, 'w')\n log_file.write('\\n')\n json.dump(self.args.__dict__, log_file, indent=2)\n log_file.write('\\n')\n log_file.close()", "def write_parameters(data, run_dir, is_parallel):\n pkio.write_text(\n run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(\n data,\n run_dir,\n is_parallel,\n ),\n )", "def _log_parameters(self):\n\n # Create file path\n path = self.output / (str(self.run_id) + '__' + self.name + '.json')\n path.parent.mkdir(parents=True, exist_ok=True)\n\n # Make sure parameters are JSON serializable\n parameters = vars(self).copy()\n for key, value in parameters.items():\n try:\n json.dumps(value)\n except TypeError:\n parameters[key] = str(value)\n \n # Save as JSON\n with open(path, 'w') as file:\n json.dump(parameters, file, indent=4, sort_keys=True)\n\n return", "def save(self, folder):\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"nr_labels\": self.nr_labels,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"bias\": self.bias,\n \"pred_kwargs\": self.pred_params.to_dict(),\n }\n param = self.append_meta(param)\n with open(\"{}/param.json\".format(folder), \"w\") as f:\n f.write(json.dumps(param, indent=True))\n smat_util.save_matrix(\"{}/W.npz\".format(folder), self.W)\n smat_util.save_matrix(\"{}/C.npz\".format(folder), self.C)", "def save(self, path, suffix=0):\n if os.path.exists(path) and not self.overwrite:\n raise FileExistsError(\"Overwrite is False!\")\n else :\n os.makedirs(path, exist_ok=True)\n os.makedirs(path + \"/params\", exist_ok=True)\n info = {key: getattr(self, key) for key in self.attr_keys \n if key != \"model_params\"}\n pickle.dump(info, open(path + \"/data.b\", \"wb\"))\n pickle.dump(self.model_params, open(path + \"/params/param_\" + str(suffix) + \".b\", \"wb\"))\n yaml.dump(self.info_view(), open(path + \"/info.yaml\", \"w\"))", "def save(self, filename):\n with open(filename, 'w') as f:\n pickle.dump(self.pca.get_params(deep=True), f)", "def _write_model_parameters(self, param_dir):\n parameters = {\n \"training_epochs\" : self.training_parameters.training_epochs,\n \"learning_rate\" : self.model_parameters.learning_rate,\n \"momentum\" : self.model_parameters.momentum,\n \"model\" : self.model_parameters.model,\n \"input_keep_probability\" : self.model_parameters.input_keep_probability,\n \"output_keep_probability\" : self.model_parameters.output_keep_probability,\n \"sequence_length\" : self.model_parameters.sequence_length,\n \"input_dimension\" : self.model_parameters.input_dimension,\n \"batch_size\" : self.model_parameters.batch_size,\n \"state_size\" : self.model_parameters.state_size,\n \"n_layers\" : self.model_parameters.n_layers,\n \"n_classes\" : self.model_parameters.n_classes,\n \"log_dir\" : self.directories.log_dir,\n \"checkpoint_dir\" : self.directories.checkpoint_dir,\n }\n\n with open(self._parameters_file(param_dir), \"w\") as f:\n json.dump(parameters, f, indent=4)", "def save_params(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/params'))\n sh['params'] = self.params\n sh.close()", "def writeparamfile(self,filename_): # 3\n res = self.__obj.writeparamfile(filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def save_parameters(self):\n for env, params in self._parameters_dict.items():\n self.logger.save_params(env, params)", "def save_parms(self, name):\n self._save_parms(name.encode())", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def Save(self,val=0):\n u,p = self.problem.up_next.split(True,**self.extra_kwarg)\n if self.first_save:\n self.u_file = self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val)\n self.p_file = self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val)\n # self.nuT_file = self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val)\n self.first_save = False\n else:\n self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val,file=self.u_file)\n self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val,file=self.p_file)\n # self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val,file=self.nuT_file)", "def save_pkl(self, filename):\n param_dict = {}\n param_dict['learningrate'] = self.learningrate\n param_dict['verbose'] = self.verbose\n param_dict['loadsize'] = self.loadsize\n param_dict['batchsize'] = self.batchsize\n param_dict['momentum'] = self.momentum\n param_dict['epochcount'] = self.epochcount\n param_dict['momentum_batchcounter'] = self.momentum_batchcounter\n param_dict['incs'] = dict(\n [(p.name, self._incs[p].get_value()) for p in self._params])\n if self.rmsprop is not None:\n param_dict['avg_grad_sqrs'] = dict(\n [(p.name, self._avg_grad_sqrs[p].get_value()) for p in self._params])\n pickle.dump(param_dict, open(filename, 'wb'))", "def save(self):\n for name, param in self.components.items():\n param_path = os.path.join(self.model_path, \"%s.mat\" % name)\n if hasattr(param, 'params'):\n param_values = {p.name: p.get_value() for p in param.params}\n else:\n param_values = {name: param.get_value()}\n scipy.io.savemat(param_path, param_values)", "def export_parameters():\n exp_folder = os.path.join(os.getcwd(),'exported')\n if not os.path.exists(exp_folder):\n os.makedirs(exp_folder)\n try:\n json_p = os.path.join(os.path.dirname(__file__), 'parameters.json')\n with open(json_p,\"r\") as d_file:\n para = json.load(d_file)\n para_pd = pd.json_normalize(para[\"cls\"])\n para_pd.to_csv(os.path.join(exp_folder,\"exported_cls_parameters.csv\"),index = False)\n para_pd = pd.json_normalize(para[\"reg\"])\n para_pd.to_csv(os.path.join(exp_folder,\"exported_reg_parameters.csv\"),index = False)\n print('Done with the parameters setting file export.')\n except:\n print('Failed to export the parameters file.')", "def save_params():\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)", "def save_parameters(gp, target):\n pdict = {}\n pdict['likelihood'] = gp.likelihood.get_free_state()[0]\n pdict['kern_variance'] = gp.kern.variance.get_free_state()[0]\n pdict['kern_lengthscale'] = list(gp.kern.lengthscales.get_free_state())\n pdict['log_likelihood'] = gp._objective(gp.get_free_state())[0][0]\n #pdict = {n:list(gp[n].flatten()) for n in gp.parameter_names()}\n with open(target, 'w') as f:\n json.dump(pdict, f)", "def save_params(model_name: str):\n with open(model_name + '.params', 'w') as f:\n json.dump(pr.__dict__, f)", "def __openParameterfile(self, filename):\n #TODO: change from pure text file to xml?\n try:\n import CompuCellSetup\n self.__fileHandle, self.__fullFileName = CompuCellSetup.openFileInSimulationOutputDirectory(filename, \"a\")\n except IOError:\n print \"Could not open file \", filename, \\\n \" for writing. Check if you have necessary permissions.\"", "def save(self, output, data):", "def _write_params(self, force=False):\n\t\tif force or not os.path.exists(self._get_params_filepath()):\n\t\t\tf = open(self._get_params_filepath(), 'w')\n\t\t\tf.write(\"\\n\".join(self.params))\n\t\t\tf.write(\"\\n\")\n\t\t\tf.close()\n\t\t\tlogger.debug(\"Wrote %s\" % (self._get_params_filepath()))\n\t\telse:\n\t\t\tlogger.debug(\"The params file already exists, I don't overwrite it.\")", "def cache_parameters(self):\n sw_parameter_dir = self.args.parameter_dir + '/sw'\n waterer = Waterer(self.args, self.input_info, self.reco_info, self.germline_seqs, parameter_dir=sw_parameter_dir, write_parameters=True)\n waterer.run()\n self.sw_info = waterer.info\n self.write_hmms(sw_parameter_dir)\n parameter_out_dir = self.args.parameter_dir + '/hmm'\n self.run_hmm('viterbi', parameter_in_dir=sw_parameter_dir, parameter_out_dir=parameter_out_dir, count_parameters=True)\n self.write_hmms(parameter_out_dir)", "def _write_params_file(model_config: base_model_params.BaseModelParams,\n job_log_dir: str) -> None:\n if jax.process_index() == 0:\n params_fpath = os.path.join(job_log_dir, 'model_params.txt')\n if not tf.io.gfile.exists(job_log_dir):\n tf.io.gfile.makedirs(job_log_dir)\n with tf.io.gfile.GFile(params_fpath, 'w') as params_file:\n datasets = model_config.datasets()\n for dataset in datasets:\n params_file.write(dataset.ToText())\n params_file.write('\\n\\n')\n params_file.write(model_config.task().ToText())", "def write_results_to_file(self, param):\n results_file = open('%stheoretical_results_%s.yml' % (directory, str(param)), 'w')\n results_file.write(yaml.dump(self.mean_time_to_absorbtion, default_flow_style=False))\n results_file.close()", "def _persist_output(self, output, dir):\r\n try:\r\n mkdirp(dir)\r\n filename = os.path.join(dir, 'output.pkl')\r\n numpy_pickle.dump(output, filename, compress=self.compress)\r\n if self._verbose > 10:\r\n print('Persisting in %s' % dir)\r\n except OSError:\r\n \" Race condition in the creation of the directory \"", "def writeSettings(self):\n settings = QtCore.QSettings()\n output_directory = self.ui.outputDirLineEdit.text()\n settings.setValue(\"output_directory\", output_directory)", "def saveToFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n stream = open(path,\"w\")\n yaml.dump(self.parameters(),stream)", "def saveParams(self, trnParams):\n\n SystemIO.save(trnParams.toNpArray(), self.path.model_info_file)", "def saveParams(self, trnParams):\n\n SystemIO.save(trnParams.toNpArray(), self.path.model_info_file)", "def save(self, output, data):\n pass", "def _log_params(self):\n params_path = os.path.join(self._log_dir, self._name + \"params.json\")\n logger.info(\"Writing params to {}\".format(params_path))\n\n params = [(str(k),str(v)) for k,v in self.__dict__.items()]\n\n with open(params_path, 'w') as params_file:\n json.dump(dict(params), params_file, indent=4)", "def save(sans, describer, minParams, minPars, stats, location, fitInfo, description):\n\n while path.exists(location) == False:\n print('error: file path does not exist. Please input a valid file path')\n location = input('file path: ')\n\n # for idx, char in enumerate(sans.expData.shear[0]):\n # if char != ' ':\n # continue\n # else:\n # shearIdx = idx\n # break\n\n # Build name for modelled scattering data\n # shear = sans.expData.shear[0][0:shearIdx]\n shear = sans.expData.shear[0]\n\n name = sans.expData.sample[0] + '_' + shear + 'ps'\n post1 = '_sim'\n type1 = '.dat'\n\n saveName1 = name + post1 + describer + '_'\n # versionNum1 = input(\"Input a version number: \" )\n versionNum1 = description\n\n # Write modelled scattering data to 3 column dat file\n write_3_column(location + saveName1 + versionNum1 + type1, sans)\n\n # Build name for modelled scattering data statistics\n post2 = '_simInfo'\n type2 = '.txt'\n\n saveName2 = name + post2 + describer + '_'\n\n output = []\n\n # Build output file\n output.append('qmin = ' + str(sans.qmin))\n output.append('ftol = ' + str(fitInfo[0]))\n output.append('method = ' + str(fitInfo[1]))\n output.append(' ')\n\n for key, val in minParams.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n output.append(' ')\n\n output.append(' static parameters ')\n for key, val in sans.staticPars.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n\n output.append(' ')\n\n output.append('Fitting_performed_over_the_following_parameters:')\n for key in minPars.keys():\n output.append(str(key))\n\n output.append('Returned_the_following_goodness_of_fit_measures:')\n output = output + stats\n output.append(str(datetime.datetime.now()))\n\n # Write output to txt file\n with open(location + saveName2 + versionNum1 + type2, 'w') as file:\n for lines in output:\n file.write(lines)\n file.write(\"\\n\")\n\n print('file was saved with filename: ' + saveName1 + versionNum1 + type1)\n return", "def save_simulation_file(self):\n a = self.ui.inputfile.text()\n a = self.get_root_file_name(a)\n a = a.split('_a.txt')\n output_suffix = self.ui.output_suffix.text()\n simfile_name = self.input_dir+'/'+sgGL.SIMFILES_PATH + a[0] + '_' +\\\n sgcom.create_file_suffix(self.algorithm,output_suffix,self.ciclos)+\\\n '.sim'\n simulation_selected_filename = QtGui.QFileDialog.getSaveFileName(self,\n \"Save simulation parameters\",\n simfile_name)\n if len(simulation_selected_filename)>0:\n simulation_params.write2file(simulation_selected_filename)", "def para_saver(self):\n\n para_list = []\n path = self.lineEdit_params.text()\n # we need the 'image' button to be checked.\n radio_is_circle = self.radioButton_circle.isChecked()\n\n if radio_is_circle:\n self.radioButton_image.setChecked(True)\n\n file = open(path, 'wb')\n # we go over all instances of the sliderclass to save their respective parameters\n for clazz in SliderClass.all_sliders:\n para1 = clazz._params_image\n para_list.append(para1)\n if clazz.radio_image is not None:\n para2 = clazz._params_circle\n para_list.append(para2)\n # manually add the morph state, since it is no slider class yet they are parameters\n para_list.append(self.morph_state)\n para_list.append(self.coords)\n para_list.append(self.checkBox_segment.isChecked())\n para_list.append(radio_is_circle)\n\n pickle.dump(para_list, file)\n file.close()\n\n if radio_is_circle:\n self.radioButton_circle.setChecked(True)", "def save_parms(self, save_to_db=False, parms_file=None):\n _db = {}\n _db['inputs'] = [item.parms['job_name'] for item in self.get_renderable_inputs()]\n _db['class_name'] = self.__class__.__name__\n _db['backend_name'] = self.manager.__class__.__name__\n _db['parms'] = self.parms\n\n if not parms_file:\n parms_file = os.path.expandvars(self.parms['script_path'])\n parms_file = os.path.join(parms_file, self.parms['job_name']) + \".json\"\n\n with open(parms_file, 'w') as file:\n result = json.dump(_db, file, indent=2)\n return result, parms_file", "def setup(self, force=False):\n if not os.path.exists(self.expdir):\n logging.info(\"create directory: \"+self.expdir)\n os.makedirs(self.expdir)\n\n pfile = join(self.expdir, XPARAM)\n if os.path.exists(pfile) and not force:\n raise RuntimeError(repr(pfile)+\" param file already exists\")\n self.params.write(join(self.expdir, XPARAM))", "def save(self):\n f = open(os.path.join(self.gui.lnp.init_dir, 'init.txt'), 'w')\n f.write(self.init_text.get('1.0', 'end'))\n f.close()\n f = open(os.path.join(self.gui.lnp.init_dir, 'd_init.txt'), 'w')\n f.write(self.d_init_text.get('1.0', 'end'))\n f.close()\n self.gui.load_params()", "def save(self, directory):\n pass # pragma: no cover", "def save(self, filename, ftype='HDF5'):\n from . import Param\n from ...util.misc import param_to_array\n def gather_params(self, plist):\n if isinstance(self,Param):\n plist.append(self)\n plist = []\n self.traverse(gather_params, plist)\n names = self.parameter_names(adjust_for_printing=True)\n if ftype=='HDF5':\n try:\n import h5py\n f = h5py.File(filename,'w')\n for p,n in zip(plist,names):\n n = n.replace('.','_')\n p = param_to_array(p)\n d = f.create_dataset(n,p.shape,dtype=p.dtype)\n d[:] = p\n if hasattr(self, 'param_array'):\n d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)\n d[:] = self.param_array\n f.close()\n except:\n raise 'Fails to write the parameters into a HDF5 file!'", "def write_model_params(self, file_name, params):\n params_to_save = {}\n for key, value in params.items():\n params_to_save[key.name] = value\n filename = os.getcwd() + file_name + \".txt\"\n print(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, mode='w+', newline='') as params_file:\n #params_writer = csv.writer(params_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n params_file.write(json.dumps(params_to_save))", "def write_initparams(params, outdir, padding_var=7, paramsfn='parameters', skiplat=False, skipglat=False):\n paramfile = outdir + paramsfn + '.txt'\n with open(paramfile, 'w') as myfile:\n myfile.write('# Parameters\\n')\n\n dio.ensure_dir(outdir)\n for key in params:\n if key == 'reg1' or key == 'reg2' or key == 'reg3':\n np.savetxt(outdir + key + '.txt', params[key], fmt='%d', delimiter=',', header=key + ' particle IDs')\n if key == 'xyv0':\n np.savetxt(outdir + 'xyv0.txt', params['xyv0'], delimiter=',',\n header='xy0 (initial positions) v0 (initial velocities)')\n elif key == 'xy':\n if not skiplat:\n np.savetxt(outdir + 'xy.txt', params['xy'], delimiter=',',\n header='xy0 (undeformed lattice positions from mesh)')\n elif key == 'KL':\n if not skiplat:\n np.savetxt(outdir + 'KL.txt', params['KL'], fmt='%i', delimiter=',',\n header='KL (Bond Connectivity List)')\n elif key == 'NL':\n if not skiplat:\n np.savetxt(outdir + 'NL.txt', params['NL'], fmt='%i', delimiter=',', header='NL (Neighbor List)')\n elif key == 'BND':\n np.savetxt(outdir + 'BND.txt', params['BND'], fmt='%i', header='BND (Boundary List)')\n elif key == 'OmK':\n if not skipglat:\n np.savetxt(outdir + 'OmK.txt', params['OmK'], fmt='%f', delimiter=',',\n header='OmK (spring frequency array, for Nash limit: (-1)^(c+b)kl^2/Iw')\n elif key == 'OmG':\n if not skipglat:\n np.savetxt(outdir + 'Omg.txt', params['OmG'], fmt='%f', delimiter=',',\n header='Omg (gravitational frequency array, for Nash limit: (-1)^(c+1)mgl/Iw')\n elif key == 'LVUC':\n if not skiplat:\n np.savetxt(outdir + 'LVUC.txt', params['LVUC'], fmt='%i', delimiter=',',\n header='Lattice Vector and Unit cell vector coordinates')\n else:\n with open(paramfile, 'a') as myfile:\n # print 'Writing param ', str(key)\n # print ' with value ', str(params[key])\n # print ' This param is of type ', type(params[key])\n\n if isinstance(params[key], str):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + params[key] + '\\n')\n elif isinstance(params[key], np.ndarray):\n # print params[key].dtype\n if key == 'BIND':\n print 'BIND = ', str(params[key]).replace('\\n', '')\n\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + \", \".join(np.array_str(params[key]).split()).replace('[,', '[') + '\\n')\n # if params[key].dtype == 'float64':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ np.array_str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # elif params[key].dtype == 'int32':\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n # else:\n # myfile.write('{{0: <{}}}'.format(padding_var).format(key)+\\\n # '= '+ str(params[key]).replace('\\n','').replace(' ',',') +'\\n')\n elif isinstance(params[key], list):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + str(params[key]) + '\\n')\n else:\n # print key, ' = ', params[key]\n myfile.write('{{0: <{}}}'.format(padding_var).format(key) + \\\n '= ' + '{0:.12e}'.format(params[key]) + '\\n')\n\n # elif key == 'LV':\n # np.savetxt(outdir+'LV.txt',params['LV'], fmt='%18e',delimiter=',', header='Lattice Vector coordinates')\n # elif key == 'UC':\n # np.savetxt(outdir+'UC.txt',params['UC'], fmt='%18e',delimiter=',', header='Unit cell vector coordinates')\n #\n # elif key == 'h':\n # with open(outdir+'h.txt', \"w\") as hfile:\n # hfile.write(\"# h (time step) \\n{0:5e}\".format(h) )\n # elif key == 'beta':\n # with open(outdir+'beta.txt', \"w\") as betafile:\n # betafile.write(\"# beta (damping coeff) \\n{0:5e}\".format(beta) )", "def writeParamToFile(self, file, sect):\r\n f = configparser.ConfigParser()\r\n f.add_section(sect)\r\n\r\n for (key, value) in self.m_param.items():\r\n f.set(sect, key, value)\r\n # print(key + ':' + value)\r\n f.write(open(file, 'w'))", "def write_absorb_results_to_file(self, param):\n results_file = open('%sabsorbtion_probabilities_%s.yml' % (directory, str(param)), 'w')\n results_file.write(yaml.dump(self.absorbtion_probabilities, default_flow_style=False))\n results_file.close()", "def save(self, folder):\n if self.is_predict_only:\n raise Exception(\"Model is predict only! save not supported!\")\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"depth\": self.depth,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"nr_labels\": self.nr_labels,\n }\n param = self.append_meta(param)\n open(f\"{folder}/param.json\", \"w\", encoding=\"utf-8\").write(json.dumps(param, indent=True))\n for d in range(self.depth):\n local_folder = f\"{folder}/{d}.model\"\n self.model_chain[d].save(local_folder)", "def _write_dataset(self):\n if self.output_file:\n if self.dryrun:\n sys.stdout.write(f\"{self.output_file.resolve()}\\n{self.parameter_study}\\n\")\n else:\n self.output_file.parent.mkdir(parents=True, exist_ok=True)\n self._conditionally_write_dataset(self.output_file, self.parameter_study)\n else:\n for parameter_set_file, parameter_set in self.parameter_study.groupby(_set_coordinate_key):\n parameter_set_file = pathlib.Path(parameter_set_file)\n # If no output file template is provided, print to stdout\n if not self.provided_output_file_template:\n sys.stdout.write(f\"{parameter_set_file.name}\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n # If overwrite is specified or if file doesn't exist\n elif self.overwrite or not parameter_set_file.is_file():\n # If dry run is specified, print the files that would have been written to stdout\n if self.dryrun:\n sys.stdout.write(f\"{parameter_set_file.resolve()}:\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n else:\n self._conditionally_write_dataset(parameter_set_file, parameter_set)", "def save_output(self):\n # Auxiliary functions\n def intro(otype, suffix):\n self.logprint(\"Saving {}...\".format(otype))\n dirname = os.path.join(self.outpath,\\\n self.conf[\"output_prefix\"] + \"_files/{}\".format(suffix))\n if os.path.exists(dirname): # Overwrite existing output\n shutil.rmtree(dirname)\n os.makedirs(dirname)\n return(dirname)\n def save(obj, filename):\n try:\n f = open(filename, \"wb\")\n pickle.dump(obj, f)\n finally:\n f.close()\n def outro(otype): self.logprint(\"{} saved.\".format(otype).capitalize())\n # Saving output\n if self.conf[\"output_mode\"] >= 2: # Save all snapshot pops\n dirname = intro(\"snapshot populations\", \"populations/snapshots\")\n for n in xrange(self.conf[\"n_runs\"]):\n for m in xrange(self.conf[\"n_snapshots\"]):\n pop = self.runs[n].record[\"snapshot_pops\"][m]\n filename = dirname + \"/run{0}_s{1}.pop\".format(n,m)\n save(pop, filename)\n del self.runs[n].record[\"snapshot_pops\"]\n outro(\"snapshot populations\")\n if self.conf[\"output_mode\"] >= 1: # Save final populations\n dirname = intro(\"final populations\", \"populations/final\")\n for n in xrange(self.conf[\"n_runs\"]):\n pop = self.runs[n].record[\"final_pop\"]\n filename = dirname + \"/run{}.pop\".format(n)\n save(pop, filename)\n del self.runs[n].record[\"final_pop\"]\n outro(\"final populations\")\n if self.conf[\"output_mode\"] >= 0: # Save records\n dirname = intro(\"run records\", \"records\")\n for n in xrange(self.conf[\"n_runs\"]):\n rec = self.runs[n].record\n filename = dirname + \"/run{}.rec\".format(n)\n save(rec, filename)\n outro(\"run records\")", "def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)", "def save(self, filename):\n ext = os.path.splitext(filename)[1]\n if ext == '.pkl':\n print 'saving trainer params to a pkl file'\n self.save_pkl(filename)\n else:\n print 'saving trainer params to a hdf5 file'\n self.save_h5(filename)", "def writeParams(self, outpath = False, newvals = False, clobber = False, verbose = True):\n\n if newvals is not False and type(newvals) is not dict:\n if verbose: print \"New parameters not in dictionary format - please check!\"\n return False\n\n outparams = self.params\n\n if newvals is not False:\n for key in newvals:\n outparams[key] = newvals[key]\n\n if outpath is not False:\n if os.path.isfile(outpath) and clobber is False:\n if verbose: print \"Out path already exists and clobber is not allowed - please check!\"\n return False\n\n with open(outpath, \"w\") as f:\n for key in outparams:\n f.write(\"{key}{tab}{tab}{value}{newline}\".format(key=key, value=outparams[key],\n tab=\"\\t\", newline=\"\\n\"))\n\n if verbose: print \".params file written successfully!\"\n return True", "def writeto(self,output_file,**kwargs):\n dump_pkl(self._properties,output_file,**kwargs)\n return", "def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path", "def save(self):\n filename = os.path.join(self.directory, 'experiment.json')\n with open(filename, 'w') as f:\n json.dump(self.report, f, indent=2, sort_keys=True)\n filename = os.path.join(self.directory, 'training_progress.csv')\n with open(filename, 'w') as csvfile:\n csv.writer(csvfile).writerows(self.history)\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n parameters = lasagne.layers.get_all_param_values(self.__network)\n parameters = parameters\n numpy.save(filename, parameters)", "def save_opts(self):\n # save code as another folder in log_path\n dst_path = os.path.join(self.log_path, 'code', 'v0')\n iter_yes_or_no = 0\n while os.path.exists(dst_path):\n dst_path = os.path.join(self.log_path, 'code', 'v' + str(iter_yes_or_no))\n iter_yes_or_no = iter_yes_or_no + 1\n user_name = expanduser(\"~\")\n try:\n shutil.copytree(os.getcwd(), dst_path, ignore=shutil.ignore_patterns('*.pyc', 'tmp*'))\n except Exception as e_copytree:\n print(e_copytree)\n\n models_dir = os.path.join(self.log_path, \"models\")\n if not os.path.exists(models_dir):\n os.makedirs(models_dir)\n to_save = self.opt.__dict__.copy()\n\n with open(os.path.join(models_dir, 'opt.json'), 'w', encoding='utf-8') as f:\n json.dump(to_save, f, indent=2)", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def write_pars(self, prod_obj):\n new_json_data = {}\n for stepname in self.pars.keys():\n new_json_data[stepname] = self.pars[stepname].outpars\n new_json_data = {prod_obj.product_basename: {\"parameters\": new_json_data, \"default_values\": new_json_data}}\n\n if os.path.exists(self.output_custom_pars_file):\n with open(self.output_custom_pars_file) as f:\n json_data = json.load(f)\n\n json_data.update(new_json_data)\n\n with open(self.output_custom_pars_file, 'w') as f:\n json.dump(json_data, f, indent=4)\n log.info(\"Updated custom pars file {}\".format(self.output_custom_pars_file))\n else:\n with open(self.output_custom_pars_file, 'w') as f:\n json.dump(new_json_data, f, indent=4)\n log.info(\"Wrote custom pars file {}\".format(self.output_custom_pars_file))", "def save_model_params(self, full_path):\n \n file_to_save = file(full_path, 'wb')\n \n print(\"Saving model parameters to %s\"%full_path)\n \n cPickle.dump(self.theta, \n file_to_save, \n protocol=cPickle.HIGHEST_PROTOCOL)\n \n file_to_save.close()", "def save_json(self, path=None):\n if path is None:\n path = self.parampath\n\n params = {str(key): str(value) for key, value in self.__dict__.items()}\n # with open(f'{path}/params_{self.mode}.json', 'w') as f:\n with open(os.path.join(path, f'params_{self.mode}.json'), 'w') as f:\n json.dump(params, f)", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def _get_params_filepath(self):\n\t\treturn os.path.join(self.workdir, \"params.txt\")", "def save(self, savedir='.', savename='savehyperparams.json'):\n\n with open(os.path.join(savedir, savename), mode='w') as f:\n json.dump(self, f, indent=1, sort_keys=True)", "def saveParam(self):\n qApp.emit(QtCore.SIGNAL(\"saveMe\"), self._param)", "def saveParam(self):\n qApp.emit(QtCore.SIGNAL(\"saveMe\"), self._param)", "def save_output(self, output_file_path):\r\n self.output_file.save(output_file_path)", "def _save_parameter_names_file(self, model):\r\n\r\n parameter_names = model.model_component_and_parameter_names\r\n parameter_labels = model.parameter_labels\r\n subscripts = model.superscripts_overwrite_via_config\r\n parameter_labels_with_subscript = [\r\n f\"{label}_{subscript}\"\r\n for label, subscript in zip(parameter_labels, subscripts)\r\n ]\r\n\r\n parameter_name_and_label = []\r\n\r\n for i in range(model.prior_count):\r\n line = formatter.add_whitespace(\r\n str0=parameter_names[i],\r\n str1=parameter_labels_with_subscript[i],\r\n whitespace=70,\r\n )\r\n parameter_name_and_label += [f\"{line}\\n\"]\r\n\r\n formatter.output_list_of_strings_to_file(\r\n file=self._files_path / \"model.paramnames\",\r\n list_of_strings=parameter_name_and_label,\r\n )", "def save(self, dir):\n raise NotImplementedError", "def make_save_path(self):\n if self.paddle_length_factor is not None:\n for gym_env in self.env.envs:\n gym_env.scale_paddle_height(self.paddle_length_factor)\n self.save_path = f\"{self.save_folder}/{self.save_name}_paddle_length_{self.paddle_length_factor}\"\n else:\n self.save_path = f\"{self.save_folder}/{self.save_name}\"", "def save(self, directory='saves/'):\n # Create dirpath for temporary dir\n if directory[-1] != '/':\n directory += '/'\n dirpath = directory + self.name + '/'\n\n if not os.path.exists(dirpath): \n os.makedirs(dirpath)\n else:\n raise Exception(f'Path {dirpath} already exists.')\n\n # DQNs & Optimizer\n torch.save(self.policy_net.state_dict(), f'{dirpath}dqn.pth')\n torch.save(self.optimizer.state_dict(), f'{dirpath}optimizer.pth')\n\n # Trainer pamameters\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n\n with open(f'{dirpath}trainer_parameters.pick', 'wb') as file:\n pickle.dump(params, file)\n\n # Zip the saves in one .zip archive\n zippath = f'{directory}{self.name}'\n shutil.make_archive(zippath, 'zip', dirpath)\n\n # Remove the directory dirpath and files inside\n shutil.rmtree(dirpath)\n\n # Display\n print(f'Model saved at {zippath}.zip')", "def save_output(file, option):\n if not os.path.isfile(file):\n raise AssertionError()\n directory = os.path.join(os.getcwd(), 'estimation_output')\n os.rename(file, option)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n os.rename(os.path.join(os.getcwd(), option),\n os.path.join(directory, option))", "def save_parameters():\n\n retval = RP_LIB.rp_SaveLockboxConfig()\n if retval != 0:\n LOG.error(\"Failed to save parameters. Error code: %s\", ERROR_CODES[retval])", "def save_final_config(self, configuration):\n # outfile = 'passes_final.json'\n # print \"Optimal passes written to \" + outfile + \":\", configuration.data\n # self.manipulator().save_to_file(configuration.data, outfile)\n msg = \"Tuned on program {0}, with priority {1}. \\nBest pass ordering found:\\n{2}\".format(\n self.args.makefile, OPT_LVL, self.build_options(configuration.data))\n print msg\n self.make(\"clean\")", "def write_pars(self):\n\t\tself.write_components['pars'] = (self.shock_gm.write_parameters()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_parameters_load(self.shock_gm.database.name))\n\t\treturn self.write_components['pars']", "def save_input(self):\n if not os.path.exists(self.wdir):\n os.makedirs(self.wdir)\n\n with open(self.filepath, \"w\") as f:\n f.write(self.input_string)\n print(f\"-- Input file [{self.filename}] written successfully.\")", "def write_parameter_file( param_file_name, dictionary ):\n # if the file exists, rename it as a backup\n if os.path.exists( param_file_name ):\n os.rename( param_file_name, param_file_name + '.bak.' + str(int(time.time())))\n\n # write a new JSON file\n param_file = open( param_file_name, 'w' )\n param_file.write( json.dumps( dictionary, indent = 1) )\n param_file.close()\n return", "def write_output(self):", "def save(self, output, data):\n return", "def write(self):\n #\n if self.what == 'ecutwfc':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.set_ecutwfc(self.values[i])\n self.pwinput.write()\n #\n elif self.what == 'ecutrho':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.ecutrho = self.values[i]\n self.pwinput.write()\n elif self.what == 'kpoints':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.Nk = self.values[i]\n self.pwinput.write()\n #\n else:\n raise RuntimeError('what = %s is not implemented yet' % (self.what))\n #\n self.inputs_have_been_written = True", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def writeSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.settings.setValue(vol,self.param.vol[i-1])\n info = f\"info{i}\"\n self.settings.setValue(info,self.param.info[i-1])\n ip = f\"ip{i}\"\n self.settings.setValue(ip,self.param.ip[i-1])\n muted = f\"muted{i}\"\n self.settings.setValue(muted,self.param.muted[i-1])", "def save(self, directory):\n for field in self.save_fields:\n np.save(pjoin(directory, field+'.npy'), self.__dict__[field])", "def write_param(self):\n param_file = f\"{self.name}.snapparam\"\n coeff_file = f\"{self.name}.snapcoeff\"\n model = self.model\n describer = self.model.describer\n profile = describer.element_profile\n ne = len(self.elements)\n nbc = len(describer.subscripts)\n if describer.quadratic:\n nbc += int((1 + nbc) * nbc / 2)\n\n coeff_lines = []\n coeff_lines.append(f\"{ne} {nbc + 1}\")\n for element, coeff in zip(self.elements, np.split(model.model.coef_, ne)):\n coeff_lines.append(f\"{element} {profile[element]['r']} {profile[element]['w']}\")\n coeff_lines.extend([str(c) for c in coeff])\n with open(coeff_file, \"w\") as f:\n f.write(\"\\n\".join(coeff_lines))\n\n param_lines = []\n keys = [\"rcutfac\", \"twojmax\"]\n param_lines.extend([f\"{k} {getattr(describer, k)}\" for k in keys])\n param_lines.extend([\"rfac0 0.99363\", \"rmin0 0\"])\n param_lines.append(f\"quadraticflag {int(describer.quadratic)}\")\n param_lines.append(\"bzeroflag 0\")\n with open(param_file, \"w\") as f:\n f.write(\"\\n\".join(param_lines))\n\n pair_style = self.pair_style\n pair_coeff = self.pair_coeff.format(\n elements=\" \".join(self.elements), coeff_file=coeff_file, param_file=param_file\n )\n return [pair_style, pair_coeff]", "def _write_hparams():\n hparam_dict = utils_impl.lookup_flag_values(training_flags)\n\n # Update with optimizer flags corresponding to the chosen optimizers.\n opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)\n opt_flag_dict = optimizer_flag_utils.remove_unused_flags(\n 'client', opt_flag_dict)\n opt_flag_dict = optimizer_flag_utils.remove_unused_flags(\n 'server', opt_flag_dict)\n hparam_dict.update(opt_flag_dict)\n\n # Write the updated hyperparameters to a file.\n training_utils.write_hparams_to_csv(hparam_dict, _ROOT_OUTPUT_DIR.value,\n _EXPERIMENT_NAME.value)", "def write_output_metadata(output_params: Dict) -> None:\n if \"path\" in output_params:\n metadata_path = MPath.from_inp(output_params) / \"metadata.json\"\n logger.debug(\"check for output %s\", metadata_path)\n try:\n existing_params = read_output_metadata(metadata_path)\n logger.debug(\"%s exists\", metadata_path)\n logger.debug(\"existing output parameters: %s\", pformat(existing_params))\n current_params = dump_metadata(output_params)\n logger.debug(\"current output parameters: %s\", pformat(current_params))\n compare_metadata_params(existing_params, current_params)\n except FileNotFoundError:\n logger.debug(\"%s does not exist\", metadata_path)\n dump_params = dump_metadata(output_params)\n # dump output metadata\n write_json(metadata_path, dump_params)", "def save(self, filename):\n self.graph.save(filename)\n with open(filename + \".json\", \"w\") as f:\n f.write(json.dumps(self.params))", "def save_opts(self):\n models_dir = os.path.join(self.log_path, \"models\")\n if not os.path.exists(models_dir):\n os.makedirs(models_dir)\n to_save = self.opt.__dict__.copy()\n\n with open(os.path.join(models_dir, 'opt.json'), 'w') as f:\n json.dump(to_save, f, indent=2)", "def save_checkpoint(self, name, include_optimizers=True):\n if not self.params.is_master:\n return\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'n_total_iter': self.n_total_iter,\n 'best_metrics': self.best_metrics,\n 'best_stopping_criterion': self.best_stopping_criterion,\n }\n\n for name in self.MODEL_NAMES:\n logger.warning(\"Saving %s parameters ...\" % name)\n data[name] = getattr(self, name).state_dict()\n if include_optimizers:\n for name in self.optimizers.keys():\n logger.warning(\"Saving %s optimizer ...\" % name)\n data['%s_optimizer' % name] = self.optimizers[name].state_dict()\n\n # data['dico_id2word'] = self.data['dico'].id2word\n # data['dico_word2id'] = self.data['dico'].word2id\n # data['dico_counts'] = self.data['dico'].counts\n data['params'] = {k: v for k, v in self.params.__dict__.items()}\n\n torch.save(data, path)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)" ]
[ "0.72130847", "0.6946201", "0.6819849", "0.67463195", "0.66937596", "0.664994", "0.65934867", "0.6579225", "0.64846045", "0.6477415", "0.6457938", "0.64488935", "0.6443824", "0.640003", "0.6393134", "0.63881093", "0.6384607", "0.6339176", "0.6293206", "0.62779415", "0.6267205", "0.62547785", "0.6246627", "0.6213474", "0.61991656", "0.61829585", "0.6179292", "0.6168369", "0.6139648", "0.6130883", "0.61134934", "0.61044395", "0.6095522", "0.6082146", "0.60808223", "0.60715604", "0.6054459", "0.6048237", "0.603144", "0.6005771", "0.6005558", "0.5992397", "0.5992397", "0.5980592", "0.5979816", "0.59576046", "0.59526", "0.59416336", "0.5925057", "0.59177405", "0.59174865", "0.589793", "0.5894561", "0.5890554", "0.58755994", "0.5874791", "0.58661115", "0.5845691", "0.58452076", "0.58405155", "0.5831849", "0.580636", "0.578147", "0.5781039", "0.57714057", "0.57713", "0.5769992", "0.5766578", "0.5765388", "0.5764935", "0.57568866", "0.5746102", "0.57457495", "0.57429886", "0.57333404", "0.57333404", "0.5730484", "0.57297844", "0.5724821", "0.57175875", "0.57142574", "0.5698626", "0.5691627", "0.56837034", "0.56799805", "0.56796", "0.5675256", "0.5671718", "0.5671702", "0.5661379", "0.5652898", "0.5650441", "0.56428397", "0.5635463", "0.56220806", "0.56114167", "0.56038916", "0.56028306", "0.55917996", "0.5591367" ]
0.5662651
89
Check if results already exist for a given experiment identifier. If the file does not exist, return the filename.
def check_output_file(output_dir, identifier, shuffle_labels, model_options, predictor='classify', fold_no=None, titration_ratio=None): signal = 'shuffled' if shuffle_labels else 'signal' if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data if isinstance(model_options.n_dim, list): n_dim = '.'.join(map(str, model_options.n_dim)) else: n_dim = model_options.n_dim check_file = construct_filename(output_dir, 'coefficients', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) if check_file.is_file(): raise ResultsFileExistsError( 'Results file already exists for identifier: {}\n'.format( identifier) ) return check_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resultExist(probName,algoName,fitName,inst,s,c,n,k,q,w,m,t,e):\n if probName == 'NKQ':\n nameOfF = './result/'+probName+'-'+algoName+'-F'+fitName+'-M'+m+'-I'+str(inst)+'-S'+str(s)+'-W'+str(w)+'-N'+str(n)+'-K'+str(k)+'-C'+str(c)+'-Q'+str(q)+'-T'+str(t)+'-E'+str(e)+'.txt'\n elif probName == 'NK' or probName == 'NonNK':\n nameOfF = './result/'+probName+'-'+algoName+'-F'+fitName+'-C'+str(c)+'-I'+str(inst)+'-S'+str(s)+'-W'+str(w)+'-N'+str(n)+'-K'+str(k)+'-E'+str(e)+'.txt'\n\n if os.path.isfile(nameOfF)==True:\n print nameOfF, 'exists!!!'\n return os.path.isfile(nameOfF)", "def file_exist(self, file_id):\n filename = path.join(\n self._ext_config['dirresults'],\n \"{0}.{1}\".format(file_id, self.type_file)\n )\n if path.exists(filename):\n return True\n else:\n return False", "def results_file(self, filename, check_exists=False):\n return self._file_in_subdir(self.results_dir, filename, check_exists)", "def get_existing_filename_or_die(self, key) -> str:\n filename = self.get_or_default(key, None)\n if filename is None:\n print(\"Error, '\" + key + \"' is required.\")\n sys.exit(1)\n elif not os.path.isfile(filename):\n print(\"'\" + str(filename) + \"' is not a file.\")\n sys.exit(1)\n else:\n return filename", "def get_qafile(date, exp_id):\n filename = '/exposures/nightwatch/{}/{:08d}/qa-{:08d}.fits'.format(date, exp_id, exp_id)\n if os.path.isfile(filename):\n return filename\n return None", "def _check_file_exists_helper(self, report_path, filename):\n\n if not check_data_exists(report_path, [filename]):\n raise AssertionError(\n \"{} does not exist in location {}\".format(\n filename, report_path\n )\n )", "def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()", "def check_if_file_exists(self, sensor_id:str, name:str)->str:\n ret_value = \"\"\n file_name = '%s/%s.*.%s.json' % (self.generate_data_prep, sensor_id, name)\n try: \n ret_value = glob.glob(file_name)\n except Exception as e:\n print(\"OSError - Failed top get file (%s) - %s\" % (file_name, e))\n ret_value = False \n \n if ret_value != False and len(ret_value) > 0: \n ret_value = ret_value[0]\n\n return ret_value", "def compare_resources_resource_exists(data_packages_path, identifier):\n encoded_identifier = helpers.encode_identifier(identifier)\n expected_filename = f'data-json-{encoded_identifier}.json'\n expected_path = os.path.join(data_packages_path, expected_filename)\n logger.debug(f'Expected path {expected_path}')\n\n file_exists = os.path.isfile(expected_path)\n if not file_exists:\n logger.info(f'Dataset: {identifier} not in DATA.JSON.')\n\n return file_exists, expected_path", "def datafileexist(filename):\n filePath = os.path.join(pathtofolder(), \"datas\", filename)\n fileFormat = '.csv'\n return os.path.exists(f'{filePath+fileFormat}')", "def check_experiment(self, id):\n exp = experiment.experiment(new_experiment=False, ts=id)\n\n start_time = time.time()\n condition = True\n\n while exp.metadata is None and condition:\n now = time.time()\n if now-start_time > 3:\n condition = False\n return \"Experiment is not found!\"\n exp = experiment.experiment(new_experiment=False, ts=id)\n time.sleep(0.01)\n\n cam_statement = str()\n for i in range(7):\n fname = os.path.join(\"data/\", str(id), \"/\", str(i) + \"/\")\n if os.path.exists(fname):\n n = len(self.um.find_images(fname))\n else:\n n = 0\n cam_statement += \"Camera {i}: {n} images found! \".format(i=i, n=n)\n\n date = self.um.timestamp_to_date(id / 1000)\n f = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"backup\", str(id)+\".zip\")\n is_archived = self.um.check_file_exists(f)\n if is_archived:\n img = \"true.png\"\n else:\n img = \"false.png\"\n\n try:\n label = exp.metadata[\"label\"]\n except:\n label = None\n\n pd_images = exp.metadata[\"pose_detection\"].values()\n user = {\"timestamp\": id,\n \"date\": date,\n \"camera\": exp.metadata[\"number_of_cameras\"],\n \"n_images\": exp.metadata[\"number_of_images\"],\n \"room\": exp.metadata[\"room\"],\n \"label\": label,\n \"image\": img,\n \"exp\": exp.metadata,\n \"pose_detection_processed_images\": pd_images}\n return render_template('experiment.html', user=user)", "def resultsExist(aName):\n return (os.path.exists(aName + \".txt\") or\n os.path.exists(aName + \".html\") or\n os.path.exists(aName + \".txt.gz\") or\n os.path.exists(aName + \".html.gz\"))", "def testFilesExist(self):\n \n for year in range(2007,2013):\n self.assertTrue(os.path.exists(\"./IncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./LogIncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./IncomeBoxplot(log)_\"+ str(year)+\".pdf\"), \"A boxplot didn't save to output.\") \n self.assertTrue(os.path.exists(\"./results.txt\"), \"Results file doesn't exist.\")", "def get_non_existing_file(file_name, max_attempts = 1000):\n if not os.path.exists(file_name):\n return file_name\n attempt = 0\n while True:\n candidate_file_name = \"%s.%d\" % (file_name, attempt)\n if not os.path.exists(candidate_file_name):\n return candidate_file_name\n attempt += 1\n if attempt >= max_attempts:\n msg = \"Cannot find non existing file from pattern %s\"\n raise ValueError(msg % file_name)", "def artiq_results_path(experiment: Optional[str] = None) -> str:\n\n path = os.path.join(shared_area_path(), \"artiqResults\")\n\n if experiment is None:\n try:\n experiment = os.environ[\"OITG_EXPERIMENT\"]\n except KeyError:\n raise Exception(\n \"No experiment supplied, and no OITG_EXPERIMENT environment key\")\n\n return os.path.join(path, experiment)", "def results_dir(filename = None):\n path = 'results'\n if os.path.isdir(path):\n if not os.access(path, os.R_OK | os.W_OK):\n raise EnvironmentError(\"{0} is not readable or writable\".format(os.path.abspath(path)))\n return os.path.join(path, filename) if filename else path\n os.mkdir(path) # raises if it fails\n return os.path.join(path, filename) if filename else path", "def output_file_exists(self):\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' + \\\n self.options['image_format'].lower()\n self.__log(f'Checking if output file: \"{file}\" already exists.')\n if os.path.exists(file) and not os.stat(file).st_size == 0:\n self.__log(f'Output file: \"{file}\" does exist.')\n return True\n self.__log(f'Output file: \"{file}\" does not exist.')\n return False", "def existingFile(filename):\n if not os.path.exists(filename):\n raise argparse.ArgumentTypeError(\"{0} does not exist\".format(filename))\n return filename", "def FileExists(DSLModel, table, filename):\n fullpath=\"%s%s%s%s%s\" % (DSLModel['GENERAL']['target_folder'], os.sep,\n table['name'], os.sep, filename)\n return os.access(fullpath, os.F_OK)", "def check_analysis_results(path: str) -> None:\n filename = os.path.join(path, \"summaryresults.json\")\n if os.path.isfile(filename):\n return\n raise CHCAnalysisResultsNotFoundError(path)", "def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False", "def check_filename(*args):\n subject = subject_var.get()\n category = cat_var.get()\n private = private_var.get()\n\n extension = 'txt' if not private else 'secret'\n filename = f'{category} - {subject}.{extension}'\n\n if Path(filename).exists():\n status_var.set(f'WARNING: {filename} already exists!')\n else:\n status_var.set('')", "def file_exists(filename):\n return os.path.exists(filename)", "def fits_file_exists (filepath):\n return validate_file_path(filepath, FITS_EXTENTS)", "def get_report_file_name(self):\n if os.path.isfile(self.REPORT_FILE_PATH):\n print(\"'{}' is already exist!\".format(self.REPORT_FILE_PATH))\n report_file = self.prompt_report_file_name()\n else:\n report_file = self.REPORT_FILE_PATH\n return report_file", "def check_file_existing(k_out, sphere_radius,full_path):\n cwd = full_path\n file_names = ['/T_ns_', '/Energy_', '/Distance-e-to-e_',\n '/chain_on_iterations_']\n\n kout = f'{k_out:.3f}'\n spr_rad = f'{sphere_radius:.3f}'\n\n current = \"kout_\" + kout + \"_\" + \"spr_rad_\" + spr_rad\n for file_name in file_names:\n #print (cwd + file_name + current + \".pkl\")\n if not os.path.exists(cwd + file_name + current + \".pkl\"):\n return False\n return True", "def is_unique_filename(containing_folder, filename):\n return not os.path.exists(join_path(containing_folder, filename))", "def filecheck(filename):\n if not os.path.isfile(filename):\n print(\"Can't find %s\" % filename)\n exit(1)\n else:\n return filename", "def _get_results_path(self):\n # if we already have the results path set, please return it\n if self._results_path is not None:\n return self._results_path\n\n self._validate_results_path()\n\n path = self.results_path\n\n if path is None:\n for i in range(1, 10001):\n name = f\"AutoML_{i}\"\n if not os.path.exists(name):\n self.create_dir(name)\n self._results_path = name\n return name\n # If it got here, could not create, raise expection\n raise AutoMLException(\"Cannot create directory for AutoML results\")\n elif os.path.exists(self.results_path) and os.path.exists(\n os.path.join(self.results_path, \"params.json\")\n ): # AutoML already loaded, return path\n self._results_path = path\n return path\n # Dir does not exist, create it\n elif not os.path.exists(path):\n self.create_dir(path)\n self._results_path = path\n return path\n # Dir exists and is empty, use it\n elif os.path.exists(path) and not len(os.listdir(path)):\n self._results_path = path\n return path\n elif os.path.exists(path) and len(os.listdir(path)):\n raise AutoMLException(\n f\"Cannot set directory for AutoML. Directory '{path}' is not empty.\"\n )\n\n raise AutoMLException(\"Cannot set directory for AutoML results\")", "def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False", "def test_create_unique_files(self):\n fitting_report.create(results=self.results,\n support_pages_dir=self.dir.name,\n options=self.options)\n\n file_names = sorted([r.fitting_report_link\n for r in self.results])\n\n unique_names = sorted(list(set(file_names)))\n\n self.assertListEqual(unique_names, file_names)", "def file_exists(msl_data_path, filename):\n return os.path.isfile(msl_data_path + filename)", "def valid_resfile(listname):\r\n global results_file, directory_res\r\n try:\r\n results_file = open(directory_res+listname+\".output\", \"r\")\r\n return True\r\n except:\r\n return False", "def test_existing_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.reddit_id == 't3_ahal9v').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertTrue(file.endswith(' - 2'), msg='Failed to increment duplicate post!')", "def generate_id():\n for i in range(_MAX_NUM_TESTS):\n test, ref = filename(i)\n if not os.path.exists(test) and not os.path.exists(ref):\n return i\n return None", "def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False", "def _index_file_exists(idx_fn):\n if os.path.exists(idx_fn + \".npy\") and os.path.exists(idx_fn + \".info\"):\n return True\n else:\n return False", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def experimentReport(request, experiment_id):\n experiment = get_object_or_404(Experiment, pk=experiment_id)\n\n r = Reporter(experiment)\n filename = r.create_report()\n logger.info('Successfully created report with name %s.' % filename)\n\n fs = FileSystemStorage(location=settings.REPORTS_ROOT, base_url=settings.REPORTS_URL)\n\n return redirect(fs.url(os.path.basename(filename)))", "def example_filename(fn):\n fn = os.path.join(data_dir(), fn)\n if not os.path.exists(fn):\n raise ValueError(\"%s does not exist\" % fn)\n return fn", "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def is_file_exists(self):\n pass", "def check_if_file_exists(file: str) -> Union[str, None]:\n if not os.path.exists(file) and not os.path.exists(f'{file}_qforce'):\n sys.exit(f'ERROR: \"{file}\" does not exist.\\n')\n return file", "def prompt_report_file_name(self):\n while True:\n report_file = input(\"Enter name for your report file: \")\n if os.path.isfile(report_file):\n print(\"'{}' is already exist!\".format(report_file))\n else:\n break\n return report_file", "def _retrosheet_filename(game_id, data_root):\n # game id is TTTYYYYMMDDN.\n team = game_id[:3]\n year = game_id[3:7]\n file_pattern = year + team + \".EV*\"\n file_path = os.path.join(data_root, \"retrosheet\", year, file_pattern)\n file_matches = glob.glob(file_path)\n return file_matches[0] if len(file_matches) else None", "def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None", "def checkExists(fileName):\n if fileName == '' or not pathlib.Path(fileName).exists():\n print('Error: {} is not found !!!'.format(fileName))\n exit()", "def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename", "def result_message(file_path):\n if (os.path.isfile(os.path.join(file_path, \"overallmetrics.csv\"))) and \\\n (os.path.isfile(os.path.join(file_path, \"output.csv\"))) and \\\n (os.path.isfile(os.path.join(file_path, \"classmetrics.csv\"))):\n return 'Results have been generated successfully'\n else:\n\n # This is will delete settings.csv file if the method fails to generate the results successfully\n\n settings_filepath = file_path[:(file_path.find('Results'))]\n csv_files = [file for file in glob.glob(os.path.join(settings_filepath, '*.csv'))]\n for file in csv_files:\n os.remove(file)\n print(\"Settings files deleted\")\n return 'Some error while generating Adversarial results'", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def isFileExist(file_name):\n return os.path.exists(file_name)", "def file_exists(session, ds_browser, ds_path, file_name):\n client_factory = session._get_vim().client.factory\n search_spec = vm_util.search_datastore_spec(client_factory, file_name)\n search_task = session._call_method(session._get_vim(),\n \"SearchDatastore_Task\",\n ds_browser,\n datastorePath=ds_path,\n searchSpec=search_spec)\n try:\n task_info = session._wait_for_task(search_task)\n except error_util.FileNotFoundException:\n return False\n\n file_exists = (getattr(task_info.result, 'file', False) and\n task_info.result.file[0].path == file_name)\n return file_exists", "def file_exists(self, file_name):\n already_exists = False\n for file in os.listdir('saves'):\n if file.endswith('.json'):\n if file[:-5] == file_name:\n return True\n return False", "def existent_file(file_path):\n if not os.path.exists(file_path):\n raise argparse.ArgumentTypeError(\"Input file path does not exist\")\n return file_path", "def is_evaluation_created(path):\n evaluation_id = None\n try:\n with open(\"%s%sevaluation\" % (path, os.sep)) as evaluation_file:\n evaluation_id = evaluation_file.readline().strip()\n try:\n evaluation_id = bigml.api.get_evaluation_id(evaluation_id)\n return True, evaluation_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def existing_file(fname):\n if os.path.isfile(fname):\n return fname\n else:\n msg = \"The file '{}' does not exist\".format(fname)\n raise ap.ArgumentTypeError(msg)", "def test_return_sensitivity_hdf_path(datevshot):\n fn = return_sensitivity_hdf_path(datevshot)\n assert isfile(fn)", "def file_exists(filename):\n return os.path.isfile(filename)", "def id_exists(test_name):\n result_json = None\n try:\n with open(robot_dir + \"/output/results/{}.json\".format(test_name.replace(' ', ''))) as result_file:\n result_json = json.load(result_file)\n except:\n print(\"Failed to open the result json\")\n return False\n #look for values NEW_ASSOC, NEW_PROP1, NEW_PROP2\n print(result_json)\n if 6 == 6:\n return True\n return \"Length is not 6\"", "def fileCheck(filename):\n if not os.path.isfile(filename):\n print('File: ' + filename + ' not found. Exiting...', file=sys.stderr)\n sys.exit(1)", "def _getResultsFileName(self, toilPath):\n return os.path.join(toilPath, \"results.txt\")", "def findFileName(path, slug):\n\tfor attempt in range(0, 99):\n\t\tfile_name = makeFileName(path, slug, attempt)\n\t\tif not os.path.exists(file_name):\n\t\t\treturn file_name\n\n\tprint \"ERROR: Too many clashes trying to create filename \" + makeFileName(path, slug)\n\texit()", "def test_file_name_successful(self):\n\n url = '/%s/jobs/%i/input_files/?file_name=%s' % (self.api, self.job.id, self.f3_file_name)\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n result = results['results']\n self.assertEqual(len(result), 1)\n\n self.assertEqual(self.f3_file_name, result[0]['file_name'])\n self.assertEqual('2016-01-10T00:00:00Z', result[0]['source_started'])\n self.assertEqual(self.file3.id, result[0]['id'])", "def file_exists(filename: str) -> bool:\n\n return os.path.exists(filename)", "def get_filename(extended_slug):\n user, project, build_id, job_id = split_extended_slug(extended_slug)\n\n if None in (user, project, build_id, job_id): # todo; remove this\n return\n\n filename_glob = os.path.join(\n test_data_dir,\n user, project,\n '{0}.{1}-*.txt'.format(build_id, job_id))\n filenames = glob.glob(filename_glob)\n if filenames:\n return filenames[0]\n else:\n return None", "def FindDataFile(filename):\n filename = os.path.expanduser(filename)\n if os.path.exists(filename):\n return filename\n\n # If it's not a relative path, we can't do anything useful.\n if os.path.isabs(filename):\n return filename\n\n other_places = [os.getcwd(),\n os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'Contents', 'Resources'),\n os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources'),\n os.path.join(os.getcwd(), '..'),\n os.path.join(sys.prefix, 'namebench'),\n '/usr/local/share/namebench'\n '/usr/local/etc/namebench',\n '/usr/local/namebench',\n '/etc/namebench',\n '/usr/share/namebench',\n '/usr/namebench']\n for directory in reversed(sys.path):\n other_places.append(directory)\n other_places.append(os.path.join(directory, 'namebench'))\n\n for place in other_places:\n path = os.path.join(place, filename)\n if os.path.exists(path):\n return path\n\n print 'I could not find \"%s\". Tried:' % filename\n for path in other_places:\n print ' %s' % path\n return filename", "def get_file(filename, result):\n return next((f for f in result if f['filename'] == filename), None)", "def _filename(self, corotid):\n from datasource import DataSource\n self.corotid = corotid\n self.corot = DataSource(database='corot', user='sro', host='pina.aip.de')\n \n query = \"\"\"SELECT run_code, hlfccdid, win_id \n FROM corot \n WHERE corotid = %d;\"\"\" % self.corotid\n result = self.corot.query(query)\n \n par = {'run': result[0][0],\n 'half': result[0][1].rstrip('RL'), \n 'win': result[0][2]}\n filename = '/work2/jwe/CoRoT/%(run)s/data/%(run)s_%(half)s_%(win)04d.fits' % par\n logger.info('%d = %s' % (corotid,filename))\n return filename", "def assure_exists(self, name: str):\n result = self.l2.exists(name)\n if result:\n logging.debug(f'{name} l2 hit')\n return self.l2.get_path(name)\n\n self.l3.download(name, self.l2.get_path(name))\n result = self.l2.exists(name)\n if not result:\n raise Exception('file not found anywhere')\n else:\n logging.debug(f'{name} l3 hit')\n return self.l2.get_path(name)", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"write_qiime_train_db.py\", get_files)", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def get_file(filename):\n return os.path.join(TEST_DIR, filename)", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def find_file(file_name):\n if (pathlib.Path(file_name).resolve()):\n file_name = str(file_name)\n logging.info(f' found {file_name}.')\n return file_name\n else:\n logging.error(f' no file {file_name} found for processing.')\n sys.exit()", "def test_get_file_exists_with_git_and_revision(self):\n self._test_get_file_exists(\n tool_name='Git',\n revision='123',\n base_commit_id=None,\n expected_revision='123',\n expected_found=True)", "def file_exists(self,\n\t filename,\n\t shutit_pexpect_child=None,\n\t directory=False,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)", "def FileExists(file):\n return os.path.exists(file)", "def dailyanalysis(experiment):\n import os\n for fn in os.listdir('/network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment):\n if fn.find('exe.fms') == -1 and fn.find('mppnccombine.ifc') == -1:\n storedaily('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + fn + '/combine/',\n '/network/aopp/hera/mad/bakerh/data/FMS/output/' +\n experiment + '/' + fn + '/history/')\n print('Completed ' + fn)", "def result_file_selector(id, train=True):\n if train:\n return train_result_files.get(id, 'y_train_smpl.csv')\n else :\n return test_result_files.get(id, 'y_train_smpl.csv')", "def is_data_by_filename(fname):\n return \"Run2017\" in fname", "def find_file(filename):\n for i in list(_ctx.include_paths) + [ os.path.dirname(_ctx.filename) ]:\n full_path = os.path.join(i, filename)\n if os.path.exists(full_path):\n return full_path\n return filename # failure gets handled later on", "def search_file(filename, term):\n try:\n filepath = root + filename\n with open(filepath) as file:\n return term in file.read().splitlines()\n except FileNotFoundError:\n with open(filepath,\"w+\") as file:\n return (search_file(filename, term))", "def existing_file(fname):\n if not os.path.isfile(fname):\n raise ValueError(\"Invalid file: \" + str(fname))\n return fname", "def file_missing(filename):\n return not os.path.isfile(filename)", "def get_valid_filename(msg):\r\n\r\n filename = input(msg)\r\n while not os.path.exists(filename):\r\n print(\"That file does not exist.\")\r\n filename = input(msg)\r\n return filename", "def _get_existing_path(self, file_path):\n test_files_location = self._resource_config.test_files_location\n search_order = [\n os.path.join(test_files_location or \"\", file_path),\n os.path.join(test_files_location or \"\", self.reservation_id, file_path),\n file_path,\n ]\n for path in search_order:\n if os.path.exists(path):\n return path\n raise BPRunnerException(\n self.__class__.__name__,\n 'File {} does not exists or \"Test Files Location\" attribute was not specified'.format(file_path),\n )", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def ifExist(file_name, key):\n\tif exists(file_name) and exists(key):\n\t\treturn True\n\telse:\n\t\treturn False", "def log_file_exists(file_name: str):\n if os.path.isfile(get_complete_file_name(file_name)):\n return True\n return False", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def send_results_file_json(**kwargs):\n try:\n logging.debug(\"Opening json output file for writing\")\n with open(kwargs[\"output_file_json\"], \"w\") as file_json_open:\n logging.info(\n \"Writing to output json file: \" + kwargs[\"output_file_json\"]\n )\n file_json_open.write(kwargs[\"results_dataset_json\"])\n return True\n except IOError:\n logging.exception(\"Error writing results to json output file\")\n return False", "def _maybe_download(self, filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n return filepath", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath", "def get_existing_filename(existing_files: List[str]) -> str:\n\n # Ask user which file only if there are multiple files\n\n if len(existing_files) == 1:\n return existing_files[0]\n\n questions = [\n {\n 'type': 'list',\n 'name': 'target_filename',\n 'message': 'Which file do you want to load ?',\n 'choices': existing_files\n }\n ]\n return prompt(questions, style=custom_style_2)[\"target_filename\"]", "def test_DDSim_runIt_failure_inputFile(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.InputFile = \"pairs.hepmc\"\n ## side effect for Script, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, False, True] ) ):\n res = self.ddsim.runIt()\n self.assertEqual( res['Message'], \"no pairs.hepmc\" )", "def search_existing_file(path):\n return os.path.isfile(path)", "def storage_find_report_file(self, report_id, filename):\n return self._get_queryset(report_id=report_id, filename=filename).get()", "def download_agent_if_missing(filename):\n if file_missing(filename):\n print filename+'is missing, downloading it first'\n download(filename)", "def _get_filename(self, id):\n if re.findall('[^a-zA-Z0-9]', id):\n raise Exception\n\n return os.path.join(self._datadir, str(id))" ]
[ "0.65933585", "0.65233624", "0.63628626", "0.62309206", "0.6212523", "0.5945689", "0.5904068", "0.58523875", "0.58294135", "0.5818082", "0.5772402", "0.5763797", "0.5763763", "0.5752898", "0.5715975", "0.57131594", "0.57079446", "0.57061094", "0.57015955", "0.569571", "0.5647063", "0.56460226", "0.5639535", "0.5611101", "0.5609733", "0.5602704", "0.5599304", "0.55898184", "0.55634516", "0.5548449", "0.55323684", "0.5527153", "0.55039793", "0.55022955", "0.5491255", "0.54857284", "0.5464084", "0.54616904", "0.5459595", "0.5443259", "0.54432017", "0.5436623", "0.5419027", "0.541723", "0.5413634", "0.5413075", "0.5381967", "0.53650135", "0.5361891", "0.5345845", "0.5337481", "0.53137684", "0.53129536", "0.5305124", "0.53004843", "0.52990144", "0.52979887", "0.52874005", "0.52867526", "0.52806884", "0.528032", "0.528015", "0.52777755", "0.52719855", "0.5254156", "0.5252044", "0.5246594", "0.5236649", "0.5228263", "0.52207863", "0.5210947", "0.5210017", "0.5208138", "0.5195124", "0.51864874", "0.51815414", "0.51778024", "0.51601523", "0.51597416", "0.51577544", "0.5150148", "0.51471114", "0.5141689", "0.51368177", "0.51338077", "0.51330495", "0.51305085", "0.51229274", "0.512059", "0.51188767", "0.511626", "0.51117736", "0.5107362", "0.5104693", "0.50896996", "0.5084262", "0.50722533", "0.5065664", "0.5064825", "0.50516707" ]
0.5988968
5
Save results of a single experiment for a single identifier.
def save_results(output_dir, check_file, results, exp_string, identifier, shuffle_labels, model_options, predictor='classify', fold_no=None, titration_ratio=None): signal = 'shuffled' if shuffle_labels else 'signal' if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data if isinstance(model_options.n_dim, list): n_dim = '.'.join(map(str, model_options.n_dim)) else: n_dim = model_options.n_dim if predictor == 'classify': auc_df = pd.concat(results[ '{}_auc'.format(exp_string) ]) output_file = construct_filename(output_dir, 'auc_threshold_metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) auc_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) aupr_df = pd.concat(results[ '{}_aupr'.format(exp_string) ]) output_file = construct_filename(output_dir, 'aupr_threshold_metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) aupr_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) if '{}_coef'.format(exp_string) in results: coef_df = pd.concat(results[ '{}_coef'.format(exp_string) ]) coef_df.to_csv( check_file, sep="\t", index=False, float_format="%.5g" ) metrics_df = pd.concat(results[ '{}_metrics'.format(exp_string) ]) if '{}_preds'.format(exp_string) in results: preds_df = pd.concat(results[ '{}_preds'.format(exp_string) ]) else: preds_df = None if '{}_param_grid'.format(exp_string) in results: params_df = pd.concat(results[ '{}_param_grid'.format(exp_string) ]) else: params_df = None output_file = construct_filename(output_dir, 'metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) metrics_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) if preds_df is not None: output_file = construct_filename(output_dir, 'preds', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) preds_df.to_csv( output_file, sep="\t", float_format="%.5g" ) if params_df is not None: output_file = construct_filename(output_dir, 'param_grid', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no) params_df.to_csv(output_file, sep="\t")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_result(self, file_id, data):\n filename = path.join(\n self._ext_config['dirresults'],\n \"{0}.{1}\".format(file_id, self.type_file)\n )\n with open(filename, 'w') as file:\n file.write(data)\n logging.info(\"File %s has beed saved!\" % filename)", "def store_results(self, results_overall, results_single, featureDesp=None):\n # store experiment run and retrieve id\n # store aggregate results\n # store patient level results\n # happy end\n self.experiment_id = self.store_experiment(mode='r', featureDesp=featureDesp)\n self.log.debug(\"Stored experiment with id {}\".format(self.experiment_id))\n self.store_aggregate_results(results_overall)\n for patientId in self.patientIDs:\n if results_single[patientId] is None: continue\n self.store_patient_results(results_single[patientId], patientId)\n self.log.info(\"Finished experiment {}\".format(self.experiment_id))", "def save(self):\n output = self.prepare_results()\n\n override_name = output[\"config\"][\"sysconfig\"].get(\"output_filename\", None)\n scenario_name = (\n override_name if override_name else output[\"config\"][\"scenario\"][\"name\"]\n )\n filename = f\"{scenario_name}_{output['timestamp']}.json\"\n log.info(\n \"Saving evaluation results to path \"\n f\"{self.scenario_output_dir}/{filename} \"\n \"inside container.\"\n )\n output_path = os.path.join(self.scenario_output_dir, filename)\n with open(output_path, \"w\") as f:\n json_utils.dump(output, f)\n if os.path.getsize(output_path) > 2**27:\n log.warning(\n \"Results json file exceeds 128 MB! \"\n \"Recommend checking what is being recorded!\"\n )", "def save(self):\n pickle_save(self.results, 'results', self.main_dir)", "def _saveExperiment(self, experiment, path):\n Experiment.save(experiment, path);", "def save_result(self):\n self.logger.info(f'Saving results to {self.db_loc}s24_{self.year}.json')\n open(f'{self.db_loc}s24_{self.year}.json', 'w').write(json.dumps(self.db, indent=4, ensure_ascii=False))", "def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))", "def _save_results(self, test_name, task_id):\n # check for result directory and create it otherwise\n if not os.path.exists(self.results_dir):\n LOGGER.debug('%s does not exist, we create it.',\n self.results_dir)\n os.makedirs(self.results_dir)\n\n # put detailed result to log\n cmd = ([\"rally\", \"task\", \"detailed\", \"--uuid\", task_id])\n LOGGER.debug('running command: %s', cmd)\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n # save report as JSON\n report_json_name = f'{test_name}.json'\n report_json_dir = os.path.join(self.results_dir, report_json_name)\n cmd = ([\"rally\", \"task\", \"report\", \"--json\", \"--uuid\", task_id,\n \"--out\", report_json_dir])\n LOGGER.debug('running command: %s', cmd)\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n with open(report_json_dir, encoding='utf-8') as json_file:\n json_results = json_file.read()\n self._append_summary(json_results, test_name)\n\n # parse JSON operation result\n if self.task_succeed(json_results):\n LOGGER.info('Test scenario: \"%s\" OK.', test_name)\n else:\n LOGGER.info('Test scenario: \"%s\" Failed.', test_name)", "def save(self):\n payload = {\n \"test_id\": self.test_id,\n \"test_case_name\": self.test_case_name,\n \"epoch_timestamp\": self.epoch_timestamp,\n \"human_timestamp\": self.human_timestamp,\n \"status\": self.status,\n \"boundaries_breached\": self.boundaries_breached,\n \"regression_found\": self.regression_found\n }\n if self.check_if_test_id_exists_in_test_report(self.test_case_name, self.test_id):\n\n # Update existing test results\n return self.update_results_in_test_report(self.test_case_name, self.test_id, payload)\n\n else:\n\n # Insert new test results\n return self.insert_results_into_test_report(self.test_case_name, payload)", "def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")", "def save_episode(results, process_id, episode, seed, dtype='tfrecord'):\n if dtype == 'tfrecord':\n save_episode_tf_record(results_dir, results, process_id, episode)\n else:\n assert dtype == 'numpy'\n save_episode_numpy(results, seed)", "def store_result(result: dict, filepath: str):\n\n raise NotImplementedError", "def SaveResults(self, results_type='iterations', **options):\n\n # Insert '/' in path string if necessary\n if 'path' in options:\n if options['path'][-1] != '/' and len(options['path']) > 0:\n options['path'] += '/'\n path = options['path']\n else:\n path = ''\n\n # Exctract simulation ID if provided\n if 'sim_id' in options:\n sim_id = options['sim_id']\n if sim_id == '':\n print(\"Warning: sim_id not provided\")\n else:\n print(\"Warning: sim_id not provided\")\n sim_id = ''\n\n # define the name prefix and data dictionary\n if results_type == 'iterations':\n prefix = 'results'\n if 'results' in options:\n dictionary = options['results']\n else:\n raise ValueError(\"Please specify data_dictionary\")\n else:\n raise ValueError(results_type + \" is not supported\")\n\n # Add sim_id if providied:\n if isinstance(sim_id, int):\n dictionary['sim_id'] = sim_id\n\n # Generate name and save\n if 'name' in options:\n fullname = path + options['name']\n else:\n if results_type == 'iterations':\n if 'iter_start' in options:\n iter_start = str(options['iter_start'])\n elif 'iter_num' in dictionary:\n iter_start = str(dictionary['iter_num'][0])\n else:\n iter_start = '0'\n iter_start += '-'\n if 'iter_end' in options:\n iter_end = str(options['iter_end'])\n elif 'iter_num' in dictionary:\n iter_end = str(dictionary['iter_num'][-1])\n else:\n iter_end = str(self.iterations_GD_['logliks'].size)\n postfix = '_iterations_'\n fullname = path + prefix + \\\n str(sim_id) + postfix + iter_start + iter_end + '.npz'\n if os.path.isfile(fullname):\n print('Error: file ' + fullname + ' already exists. Aborting...')\n return\n else:\n np.savez(fullname, **dictionary)\n print('file ' + fullname + ' saved.')", "def save(self, exp_name=None, exp_id=None, path='ap_output', display=True):\n\n # Create output directory if it doesn't exist\n if path not in listdir():\n makedirs(path)\n\n # Set exp_name\n if exp_name is None:\n if 'log' in self and 'name' in self.log:\n exp_name = self.log['name']\n else:\n exp_name = 'Unnamed'\n\n exp_name = exp_name.replace(\" \", \"_\")\n\n # Set exp_id\n if exp_id is None:\n exp_id = _last_exp_id(exp_name, path) + 1\n\n # Create new directory for output\n path = f'{path}/{exp_name}_{exp_id}'\n makedirs(path)\n\n # Save experiment data\n for key, output in self.items():\n\n if isinstance(output, pd.DataFrame):\n output.to_csv(f'{path}/{key}.csv')\n\n elif isinstance(output, DataDict):\n for k, o in output.items():\n\n if isinstance(o, pd.DataFrame):\n o.to_csv(f'{path}/{key}_{k}.csv')\n elif isinstance(o, dict):\n with open(f'{path}/{key}_{k}.json', 'w') as fp:\n json.dump(o, fp, cls=NpEncoder)\n\n else: # Use JSON for other object types\n try:\n with open(f'{path}/{key}.json', 'w') as fp:\n json.dump(output, fp, cls=NpEncoder)\n except TypeError as e:\n print(f\"Warning: Object '{key}' could not be saved. \"\n f\"(Reason: {e})\")\n os.remove(f'{path}/{key}.json')\n\n # TODO Support grids & graphs\n # elif t == nx.Graph:\n # nx.write_graphml(output, f'{path}/{key}.graphml')\n\n if display:\n print(f\"Data saved to {path}\")", "def save_results(results, results_file: str):\n with open(results_file, \"wb\") as file:\n pickle.dump(results, file)", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def save_results(self, *args):\n try:\n filename = args[0]\n except IndexError:\n filename = self.filename\n results = {}\n results['gp_pred'] = self.gp_predictions\n results['func_val'] = self.target_func_vals\n results['inds_all'] = np.array(self.indices_all)\n results['vals_all'] = np.array(self.vals_all)\n np.save(filename+\".npy\", results)", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def save_eval_results(\n data_dir: Path,\n results: Dict[Any, pd.DataFrame],\n with_static: bool,\n concat_static: bool,\n) -> None:\n model = _get_model_str(with_static=with_static, concat_static=concat_static)\n\n dt = time.gmtime()\n dt_str = f\"{dt.tm_year}_{dt.tm_mon:02}_{dt.tm_mday:02}:{dt.tm_hour:02}{dt.tm_min:02}{dt.tm_sec:02}\"\n name = dt_str + \"_\" + f\"{model}_results.pkl\"\n\n (data_dir / \"models\").mkdir(exist_ok=True, parents=True)\n pickle.dump(results, open(data_dir / \"models\" / name, \"wb\"))", "def save(self,filename):\n f = open(filename,'w')\n f.write('Test results for %s v%s\\n' % (self.description,self.version))\n f.write('Series ran by %s\\n\\n' % self.person_name)\n for result in self.values():\n f.write('%-70s : %s\\n' % (result.id,result.outcome))\n if result.outcome != Result.PASS:\n for (kind, annotation) in result.annotations.items():\n f.write('%s:\\n%s\\n' % (kind, as_utf8(annotation)))\n f.write('\\n')\n f.write('\\n\\nPasses: %i\\n' % self.get_pass_count())\n f.write('Fails: %i\\n' % self.get_fail_count())\n f.write('Errors: %i\\n' % self.get_error_count())\n f.write('Untested: %i\\n' % self.get_untested_count())\n f.write('Skipped: %i\\n' % self.get_skipped_count())\n f.close()", "def store_outcome(model_name, dataset, strict, forgiving):\n model_name = model_name.replace('/', '-')\n\n with open(f'outcomes-{model_name}-{dataset}.pkl', 'wb') as file:\n\n pickle.dump((strict, forgiving), file)", "def store_result(self, data_id, anno_container, model, value):\n\n entry = PyannoResult(anno_container=anno_container,\n model=model, value=value)\n self._check_consistency(data_id, anno_container)\n\n # NOTE shelves to not automatically handle changing mutable values,\n # we need to take care of it manually\n if data_id not in self.database:\n temp = []\n else:\n temp = self.database[data_id]\n\n temp.append(entry)\n\n self.database[data_id] = temp", "def store(self, results):\n filename = self.get_archive_file_path(results)\n results.dump(filename)", "def save_optimization_results(self, adaptive_function, result):\n opt_res_grp = self.data_object.create_group('Optimization_result')\n\n if adaptive_function.__module__ == 'cma.evolution_strategy':\n res_dict = {'xopt': result[0],\n 'fopt': result[1],\n 'evalsopt': result[2],\n 'evals': result[3],\n 'iterations': result[4],\n 'xmean': result[5],\n 'stds': result[6],\n 'stop': result[-3]}\n # entries below cannot be stored\n # 'cmaes': result[-2],\n # 'logger': result[-1]}\n elif adaptive_function.__module__ == 'pycqed.measurement.optimization':\n res_dict = {'xopt': result[0],\n 'fopt': result[1]}\n else:\n res_dict = {'opt': result}\n h5d.write_dict_to_hdf5(res_dict, entry_point=opt_res_grp)", "def save_results(self, instagram_results):", "def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))", "def _save_run(index, session, artifact_id, conf, sandbox, run_result):\n run = index.Run()\n run.artifact_id = artifact_id\n run.pps_id = conf['identifiers']['pps_id']\n run.environment_id = run_result.environment_id\n run.challenge_problem_id = conf['identifiers']['challenge_problem_id']\n run.team_id = conf['identifiers']['team_id']\n run.dataset_label = conf['identifiers']['dataset_label']\n\n run.started = long(run_result.start_time)\n run.duration = run_result.runtime\n run.load_average = run_result.load_average\n run.load_max = run_result.load_max\n run.ram_average = long(run_result.ram_average) * 1024\n run.ram_max = long(run_result.ram_max) * 1024\n\n # Save the artifact configuration.\n try:\n artifact_config_path = os.path.abspath(run_result.config_file_path)\n run.artifact_configuration = db.Index.migrate(\n artifact_config_path\n )\n except AttributeError:\n assert run_result.config_file_path is None\n\n \"\"\"\n XXX : we were catching 'NOENT' OS_EXCEPTION\n \"\"\"\n\n # output should always have content\n run.output = db.Index.migrate(utility.path_walk(run_result.output_dir))\n\n # it is not required that logs file/directory is populated\n try:\n run.log = db.Index.migrate(run_result.log_path)\n except db.Empty_Migrate:\n run.log = None\n\n # it is not required that trace file/directory is populated\n try:\n run.trace = db.Index.migrate(utility.path_walk(run_result.trace_dir))\n except db.Empty_Migrate:\n run.trace = None\n\n\n try:\n session.add(run)\n session.commit()\n return run.run_id\n except Exception as e:\n # Clear the saved data.\n for blob_id in (run.artifact_configuration, run.output, run.log,\n run.trace):\n # We'd like to use EAFP here, but Python 2 doesn't support\n # exception chaining (see PEP 3134), so EAFP would require\n # making this its own function. LBYL is more elegant,\n # anyway.\n if blob_id:\n db.Index.remove_blob(blob_id)\n raise e", "def store_experiment(self, mode=\"r\", featureDesp=\"all\"):\n self.log.info(\"Storing Experiment\")\n\n with self.con:\n cur = self.con.cursor()\n query = \"INSERT INTO BG_experiment_run (model, parameters, svn_rev, experiment_id, features, `type`) \" \\\n \" VALUES (%(model)s, %(parameters)s, %(svn_rev)s, %(experiment_id)s, %(features)s, %(type)s)\"\n cur.execute(query, {\n 'model': self.algorithm,\n 'parameters': \"-\",\n 'svn_rev': self.svn_rev,\n 'experiment_id': self.batchId,\n 'features': featureDesp,\n 'type': mode\n })\n return cur.lastrowid", "def save_data(self, session, exp_id, content):\n from expfactory.database.models import Participant, Result\n\n subid = session.get(\"subid\")\n token = session.get(\"token\")\n\n self.logger.info(\"Saving data for subid %s\" % subid)\n\n # We only attempt save if there is a subject id, set at start\n if subid is not None:\n p = Participant.query.filter(\n Participant.id == subid\n ).first() # better query here\n\n # Does\n if self.headless and p.token != token:\n self.logger.warning(\n \"%s attempting to use mismatched token [%s] skipping save\"\n % (p.id, token)\n )\n elif self.headless and p.token.endswith((\"finished\", \"revoked\")):\n self.logger.warning(\n \"%s attempting to use expired token [%s] skipping save\" % (p.id, token)\n )\n else:\n\n # Preference is to save data under 'data', otherwise do all of it\n if \"data\" in content:\n content = content[\"data\"]\n\n result = Result(\n data=content, exp_id=exp_id, participant_id=p.id\n ) # check if changes from str/int\n\n # Create and save the result\n self.session.add(result)\n p.results.append(result)\n self.session.commit()\n self.logger.info(\"Save [participant] %s [result] %s\" % (p, result))", "def store(self, job, result):\n pass", "def save_result(self):\n np.save(os.path.join(self.outpath, self.image_name + '_run.npy'), {\n 'device' : u.get_gpu_name(),\n 'elapsed': u.sec2time(self.elapsed),\n 'outpath': self.outpath,\n 'history': self.history,\n 'mask' : self.mask,\n 'image' : self.img,\n 'output' : self.out_best,\n 'noise' : self.input_list,\n })\n \n # save the model\n if self.args.savemodel:\n torch.save(self.net.state_dict(),\n os.path.join(self.outpath, self.image_name + '_model.pth'))", "def save(\n self,\n suppress_errors: bool = True,\n max_workers: int = 3,\n save_figures: bool = True,\n save_children: bool = True,\n ) -> None:\n # TODO - track changes\n if not self._service:\n LOG.warning(\n \"Experiment cannot be saved because no experiment service is available. \"\n \"An experiment service is available, for example, \"\n \"when using an IBM Quantum backend.\"\n )\n if suppress_errors:\n return\n else:\n raise ExperimentDataSaveFailed(\"No service found\")\n if max_workers > self._max_workers_cap:\n LOG.warning(\n \"max_workers cannot be larger than %s. Setting max_workers = %s now.\",\n self._max_workers_cap,\n self._max_workers_cap,\n )\n max_workers = self._max_workers_cap\n self._save_experiment_metadata(suppress_errors=suppress_errors)\n if not self._created_in_db:\n LOG.warning(\"Could not save experiment metadata to DB, aborting experiment save\")\n return\n\n analysis_results_to_create = []\n for result in self._analysis_results.values():\n analysis_results_to_create.append(result._db_data)\n try:\n self.service.create_analysis_results(\n data=analysis_results_to_create,\n blocking=True,\n json_encoder=self._json_encoder,\n max_workers=max_workers,\n )\n for result in self._analysis_results.values():\n result._created_in_db = True\n except Exception as ex: # pylint: disable=broad-except\n # Don't automatically fail the experiment just because its data cannot be saved.\n LOG.error(\"Unable to save the experiment data: %s\", traceback.format_exc())\n if not suppress_errors:\n raise ExperimentDataSaveFailed(\n f\"Analysis result save failed\\nError Message:\\n{str(ex)}\"\n ) from ex\n\n for result in self._deleted_analysis_results.copy():\n with service_exception_to_warning():\n self._service.delete_analysis_result(result_id=result)\n self._deleted_analysis_results.remove(result)\n\n if save_figures:\n with self._figures.lock:\n figures_to_create = []\n for name, figure in self._figures.items():\n if figure is None:\n continue\n # currently only the figure and its name are stored in the database\n if isinstance(figure, FigureData):\n figure = figure.figure\n LOG.debug(\"Figure metadata is currently not saved to the database\")\n if isinstance(figure, pyplot.Figure):\n figure = plot_to_svg_bytes(figure)\n figures_to_create.append((figure, name))\n self.service.create_figures(\n experiment_id=self.experiment_id,\n figure_list=figures_to_create,\n blocking=True,\n max_workers=max_workers,\n )\n\n for name in self._deleted_figures.copy():\n with service_exception_to_warning():\n self._service.delete_figure(experiment_id=self.experiment_id, figure_name=name)\n self._deleted_figures.remove(name)\n\n if not self.service.local and self.verbose:\n print(\n \"You can view the experiment online at \"\n f\"https://quantum-computing.ibm.com/experiments/{self.experiment_id}\"\n )\n # handle children, but without additional prints\n if save_children:\n for data in self._child_data.values():\n original_verbose = data.verbose\n data.verbose = False\n data.save(\n suppress_errors=suppress_errors,\n max_workers=max_workers,\n save_figures=save_figures,\n )\n data.verbose = original_verbose", "def _store_results(user_cfg: Dict, run_cfg: Dict, results: pd.DataFrame, epoch: int):\n if \"eval_dir\" in user_cfg:\n store_dir = user_cfg[\"eval_dir\"]\n store_dir.mkdir(exist_ok=True, parents=True)\n else:\n store_dir = user_cfg[\"run_dir\"]\n\n if run_cfg[\"no_static\"]:\n file_name = store_dir / f\"lstm_no_static_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n else:\n if run_cfg[\"concat_static\"]:\n file_name = store_dir / f\"lstm_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n else:\n file_name = store_dir / f\"ealstm_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n\n with (file_name).open(\"wb\") as fp:\n pickle.dump(results, fp)\n\n print(f\"Sucessfully store results at {file_name}\")", "def store_result(duration, loci_number):\n print(' %ds for %d loci' % (duration, loci_number))\n\n if os.path.isfile(out_fname):\n with open(out_fname, 'r') as fd:\n cur = json.load(fd)\n else:\n cur = []\n\n with open(out_fname, 'w') as fd:\n cur.append((loci_number, duration))\n json.dump(cur, fd)", "def save(self, result_dir):\n path = os.path.join(result_dir, self._filename)\n\n util.write_json(path, {\n 'results': self._results,\n 'params': self._params,\n 'requirements': self._env.requirements,\n 'commit_hash': self._commit_hash,\n 'date': self._date,\n 'python': self._python\n }, self.api_version)", "def save_results(self, data, prefix, mode=\"train\", compute_loss=False):\n # save predictions\n self.save_predictions(prefix, mode)", "def save(self):\n filename = os.path.join(self.directory, 'experiment.json')\n with open(filename, 'w') as f:\n json.dump(self.report, f, indent=2, sort_keys=True)\n filename = os.path.join(self.directory, 'training_progress.csv')\n with open(filename, 'w') as csvfile:\n csv.writer(csvfile).writerows(self.history)\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n parameters = lasagne.layers.get_all_param_values(self.__network)\n parameters = parameters\n numpy.save(filename, parameters)", "def _save_results(item, user, duration, raw_result):\n LOGGER.debug('item: {}, user: {}, duration: {}, raw_result: {}'.format(\n item, user, duration, raw_result.encode('utf-8')))\n \n _existing_result = RankingResult.objects.filter(item=item, user=user)\n \n if _existing_result:\n _result = _existing_result[0]\n \n else:\n _result = RankingResult(item=item, user=user)\n \n LOGGER.debug(u'\\n\\nResults data for user \"{0}\":\\n\\n{1}\\n'.format(\n user.username or \"Anonymous\",\n u'\\n'.join([str(x) for x in [_result, duration, raw_result]])))\n \n _result.duration = str(duration)\n _result.raw_result = raw_result\n \n _result.save()", "def save_results(factors, rec_errors, scores_odor, scores_rew, name, path):\r\n\ti = 0\r\n\twhile os.path.exists(os.path.join(path, 'factors{}_{:02d}.npy').format(name, i)):\r\n\t\ti += 1\r\n\t\r\n\tnp.save(os.path.join(path, 'factors{}_{:02d}.npy'.format(name, i)), factors)\r\n\tnp.save(os.path.join(path, 'errors{}_{:02d}.npy'.format(name, i)), rec_errors)\r\n\tnp.save(os.path.join(path, 'scores_odor{}_{:02d}.npy'.format(name, i)), scores_odor)\r\n\tnp.save(os.path.join(path, 'scores_rew{}_{:02d}.npy'.format(name, i)), scores_rew)", "def save():", "def save_fit_result(self, fitresult, outfile):\n save_modelresult(fitresult, outfile)", "def save_results(PATH, data, filename):\n with open(PATH + '/' + filename + \".txt\",\"w\") as file:\n file.write(\"Results of heuristic models with mean and standard deviation.\\n\")\n for result in data:\n write_result(file, result)\n file.close()\n print('results saved in:'+ PATH + '/' + filename + \".txt\")", "def save_results(self, export_json_path):\n with open(export_json_path, 'w') as f:\n json.dump(self.results, f)", "def save(self, directory):\n os.makedirs(directory, exist_ok=True)\n\n summarized_res = self.get_summarized_results()\n detailed_res = self.get_detailed_results()\n\n with open(os.path.join(directory, '%s_summarized_results.json' % self.name), 'w') as f:\n json.dump(summarized_res, f, sort_keys=True, indent=2)\n\n with open(os.path.join(directory, '%s_detailed_results.json' % self.name), 'w') as f:\n json.dump(detailed_res, f, sort_keys=True, indent=2)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def writeAfter(self, model=None, histories=None, results={}, saveModel=False):\n# Write out everything new we know after running the experiment\n# Will append to the existing file\n with open(self.filename,'a') as f:\n finish = datetime.datetime.now()\n f.write( \"Finish: {}\\n\".format( finish ) )\n f.write( \"Elapsed: {}\\n\".format( finish-self.start ) )\n if model is not None:\n summ_list = []\n model.summary(print_fn=lambda x: summ_list.append(x))\n f.write( \"Model:\\n\" )\n for summ in summ_list:\n f.write( ' {}\\n'.format(summ) )\n f.write( \"Results:\\n\" )\n for key,value in results.items():\n f.write( \" {}: {}\\n\".format( key, value ) )\n if model is not None and saveModel:\n fname = os.path.join( self.dir_name, self.name+\"_model.json\" )\n with open(fname,'w') as f:\n f.write(model.to_json())\n fname = os.path.join( self.dir_name, self.name+\"_weights.h5\" )\n model.save_weights(fname)\n if histories is not None:\n try:\n his_fname = os.path.join(self.dir_name, \"histories.pickle\")\n with open(his_fname, 'wb') as f:\n pickle.dump( histories, f, pickle.HIGHEST_PROTOCOL)\n except Exception as ex:\n print( \"Failed to write history ({}) to {}\\n {}\".format( type(histories), his_fname, ex ) )", "def save_results_internal(self, obj: object):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"wb\") as f:\r\n dill.dump(obj, f)", "def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path", "def save_results(test_name, start_time, end_time, population_size, number_of_generations, pop, stats):\n record = stats.compile(pop)\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n palette_width = int(config[\"palette\"][\"width\"])\n palette_depth = int(config[\"palette\"][\"depth\"])\n palette_height = int(config[\"palette\"][\"height\"])\n palette_max_weight = int(config[\"palette\"][\"weight\"])\n print(record)\n fitness_max = record['max']\n fitness_min = record['min']\n fitness_avg = record['avg']\n Result.create(test_name=test_name, start_time=start_time, end_time=end_time,\n number_of_generations=number_of_generations, population_size=population_size,\n max_fitness=fitness_max, min_fitness=fitness_min, average_fitness=fitness_avg,\n palette_max_weight=palette_max_weight,\n palette_width=palette_width, palette_height=palette_height, palette_depth=palette_depth)", "def save_fit_results(self, save_path: str = \"./fit_results.json\"):\n assert (\n self._fit_src_dst_results or self._fit_dst_src_results\n ), \"There are no fit results to be saved, \\\n call fit method first or load the results from the file\"\n assert save_path.endswith(\".json\"), self.JSON_ASSERTION\n\n wrapped_results = {\n \"fit_src_dst_results\": self._fit_src_dst_results,\n \"fit_dst_src_results\": self._fit_dst_src_results,\n }\n\n with open(save_path, \"w\") as fjson:\n json.dump(wrapped_results, fjson)", "def save_results(self, data, prefix, mode=\"train\"):\n # save predictions\n if mode != \"train\":\n self.save_predictions(prefix, mode)\n #if self.config[\"model\"][\"version\"] != \"IE\":\n #self.save_assignments(prefix, mode)\n #self.visualize_assignments(prefix=prefix, mode=mode)\n\n \"\"\" given sample data \"\"\"\n if data is not None:\n # maintain sample data\n self._set_sample_data(data)\n\n # convert data as Variables\n data = [*self.tensor2variable(data), data[1]]\n\n outputs = self.forward(data[:-1])\n logit_list = outputs[0][0]\n\n # compute loss\n loss = self.loss_fn(outputs[0], data[-2], count_loss=False)\n\n # save results of assignment of model\n if self.config[\"model\"][\"version\"] != \"IE\":\n logits = [logit.data.cpu() for logit in logit_list]\n vis_data = [*self.sample_data, self.criterion.assignments]\n if type(vis_data[0][-1]) == type(list()):\n vis_data[0][-1] = vis_data[0][-1][0]\n if self.use_knowledge_distillation:\n vis_data.append([net_utils.get_data(bout)\n for bout in self.base_outputs])\n\n class_names = [self.itoa[str(key)]\n for key in range(len(self.itoa.keys()))]\n vis_utils.save_mcl_visualization(\n self.config, vis_data, logits, class_names, \\\n self.itow, self.itoa, prefix, \\\n self.use_knowledge_distillation\n )", "def save(self, output, data):", "def save_experiment(self, file_name: str, ovr_if_exists=False) -> None:\n if not self.populated:\n raise ExperimentException(\"Empty experiment class cannot be saved. Load or analyze experiment first.\")\n if ovr_if_exists:\n dfile = h5py.File(file_name, \"w\")\n else:\n dfile = h5py.File(file_name, \"x\")\n try:\n dfile.create_dataset(\"version\", data=self.version) # for later backwards compatibility\n # save general experiment data\n dfile.create_dataset(\"experiment_name\", data=self.experiment_name)\n dfile.create_dataset(\"original_path\", data=self.original_path)\n dfile.create_dataset(\"scope_name\", data=self.scope_name)\n dfile.create_dataset(\"comment\", data=self.comment)\n dfile.create_dataset(\"n_planes\", data=self.n_planes)\n dfile.create_dataset(\"tail_frame_rate\", data=self.tail_frame_rate)\n # save singular parameter dictionary\n self._save_dictionary(self.info_data, \"info_data\", dfile)\n # save augmentation flag\n if int(self.version) > 1:\n dfile.create_dataset(\"tail_data_augmented\", data=self.tail_data_augmented)\n # save per-plane data\n for i in range(self.n_planes):\n plane_group = dfile.create_group(str(i))\n self._save_dictionary(self.scanner_data[i], \"scanner_data\", plane_group)\n if len(self.tail_data) > 0:\n plane_group.create_dataset(\"tail_data\", data=self.tail_data[i], compression=\"gzip\",\n compression_opts=5)\n if self.bout_data[i] is not None:\n plane_group.create_dataset(\"bout_data\", data=self.bout_data[i], compression=\"gzip\",\n compression_opts=5)\n else:\n # no bouts were found, save dummy array of one line of np.nan\n bd = np.full((1, 8), np.nan)\n plane_group.create_dataset(\"bout_data\", data=bd, compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"tail_frame_time\", data=self.tail_frame_times[i])\n if int(self.version) > 1 and len(self.replaced_tail_frames) > 0:\n plane_group.create_dataset(\"replaced_tail_frames\", data=self.replaced_tail_frames[i],\n compression=\"gzip\", compression_opts=5)\n if len(self.laser_data) > 0:\n plane_group.create_dataset(\"laser_data\", data=self.laser_data[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"projection\", data=self.projections[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"func_stack\", data=self.func_stacks[i], compression=\"gzip\",\n compression_opts=5)\n if len(self.anat_projections) > 0: # this is a dual-channel experiment\n plane_group.create_dataset(\"anat_projection\", data=self.anat_projections[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"C\", data=self.all_c[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"dff\", data=self.all_dff[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"centroids\", data=self.all_centroids[i], compression=\"gzip\",\n compression_opts=5)\n plane_group.create_dataset(\"sizes\", data=self.all_sizes[i], compression=\"gzip\", compression_opts=5)\n plane_group.create_dataset(\"spatial\", data=self.all_spatial[i], compression=\"gzip\", compression_opts=5)\n # due to mixed python types in caiman parameter dictionaries these currently get pickled\n ps = json.dumps(self.mcorr_dicts[i])\n plane_group.create_dataset(\"mcorr_dict\", data=ps)\n ps = json.dumps(self.cnmf_extract_dicts[i])\n plane_group.create_dataset(\"cnmf_extract_dict\", data=ps)\n ps = json.dumps(self.cnmf_val_dicts[i])\n plane_group.create_dataset(\"cnmf_val_dict\", data=ps)\n finally:\n dfile.close()", "def save_fit_results(self, save_path: str = \"./fit_results.json\"):\n assert (\n self._fit_results\n ), \"There are no fit results to be saved, \\\n call fit method first or load the results from the file\"\n assert save_path.endswith(\".json\"), self.JSON_ASSERTION\n with open(save_path, \"w\") as fjson:\n json.dump(self._fit_results, fjson)", "def saveResult(resultHere, intI):\n\tintJ = 0\n\tif intI == 0:\n\t\twith jsonlines.open('tempRecords.jsonl', mode='w') as writer:\n\t\t\twriter.write(resultHere)\t\t\n\telif intI == -2:\n\t\twith jsonlines.open('tempRecords.jsonl', mode='w') as writer:\n\t\t\tintJ += 1\n\telse:\n\t\twith jsonlines.open('tempRecords.jsonl', mode='a') as writer:\n\t\t\twriter.write(resultHere)", "def save_results(\n layer: LayerType,\n batch_size: int,\n num_runs: int,\n num_repeats: int,\n gsm_mode: Optional[str],\n results: List[Dict[str, Any]],\n config: Dict[str, Any],\n random_seed: Optional[int] = None,\n forward_only: bool = False,\n root: str = \"./results/raw/\",\n suffix: str = \"\",\n) -> None:\n path = get_path(\n layer=layer,\n batch_size=batch_size,\n num_runs=num_runs,\n num_repeats=num_repeats,\n random_seed=random_seed,\n forward_only=forward_only,\n gsm_mode=gsm_mode,\n root=root,\n suffix=suffix,\n )\n\n with open(path, \"wb\") as handle:\n pickle.dump(\n {\n \"layer\": layer,\n \"batch_size\": batch_size,\n \"num_runs\": num_runs,\n \"num_repeats\": num_repeats,\n \"random_seed\": random_seed,\n \"forward_only\": forward_only,\n \"gsm_mode\": gsm_mode,\n \"results\": results,\n \"config\": config,\n },\n handle,\n protocol=pickle.HIGHEST_PROTOCOL,\n )", "def save_result(data, user_id):\n result = ResultModel(**data, user_id=user_id)\n result.save_to_db()\n user = UserModel.query.get(user_id)\n logging.info(f'User {user.username} scores {result.wpm} wpm.')\n return jsonify({}), 201", "def save_simulation_results(file_name, **kwargs):\n\n # Insert debugging assertions\n assert type(file_name) is str, \"The 'filw_name' must be string.\"\n \n # Save a dictionary of names and arrays into a MATLAB-style .mat file\n sio.savemat(file_name, kwargs)", "def save_results():\r\n global title_dict\r\n conn=sql.connect('output.db')\r\n conn.execute(\"DELETE FROM OUTPUT\")\r\n for k,v in title_dict.iteritems():\r\n conn.execute(\"INSERT INTO OUTPUT VALUES (?,?);\",(k,v))\r\n conn.commit()\r\n conn.close()", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)", "def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)", "def save(self, data, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n # Create path directory\n if not os.path.isdir(self.cache_path):\n logging.info(\"Creating cache directory at {}\".format(self.cache_path))\n mkpath(self.cache_path, 0o755)\n\n with open(cache_file_path, 'wb') as fp:\n logging.debug(\"Storing result in cache file at {}\".format(cache_file_path))\n pickle.dump(data, fp)\n\n return True", "def _store_result(self, task_id, result, status, traceback=None):\n session = Session()\n try:\n tasks = session.query(Task).filter(Task.task_id == task_id).all()\n if not tasks:\n task = Task(task_id)\n session.add(task)\n else:\n task = tasks[0]\n task.result = result\n task.status = status\n task.traceback = traceback\n if task.status == states.STARTED:\n task.date_began = datetime.now()\n session.commit()\n finally:\n session.close()\n return result", "def save_result(self):\n self.print_to_console()", "def storeResult(request, run_uuid):\n if request.method == 'POST':\n trialresult = TrialResult()\n trialresult.subject = get_object_or_404(SubjectData, pk=run_uuid)\n trialresult.trialitem = get_object_or_404(TrialItem, pk=int(request.POST.get('trialitem')))\n trialresult.start_time = dateutil.parser.parse(request.POST.get('start_time'))\n trialresult.end_time = dateutil.parser.parse(request.POST.get('end_time'))\n trialresult.key_pressed = request.POST.get('key_pressed')\n #trialresult.webcam_file = request.POST.get('webcam_file')\n trialresult.trial_number = int(request.POST.get('trial_number'))\n trialresult.resolution_w = int(request.POST.get('resolution_w'))\n trialresult.resolution_h = int(request.POST.get('resolution_h'))\n trialresult.save()\n return JsonResponse({'resultId': trialresult.pk})\n else:\n logger.error('Failed to store result.')\n raise Http404('Page not found.')", "def SaveResult(self, result, result_path):\n\n # Create / get results file in the local directory \"\"\"\n db = self.GetResultFile()\n\n if db is not None:\n debug('result : {}'.format(result))\n\n # Check if file exists\n try:\n o = db[str(result_path + '/' + result.GetName())]\n except KeyError:\n print(\"No object in {}/{}\".format(result_path, result.GetName()))\n o = None\n\n if o is not None:\n print(\"Replacing {}/{}\".format(result_path, result.GetName()))\n del o\n\n db[str(result_path + '/' + result.GetName())] = result\n if db[str(result_path + '/' + result.GetName())] is not None:\n print(\"+++result {}/{} adopted\".format(result_path, result.GetName()))\n\n else:\n error(\"Could not adopt result {}\".format(result.GetName()))\n db.close()\n return\n\n else:\n error(\"Error creating result file\")\n db.close()\n return\n\n db.close()", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def putresult(task, config, log, dataset, user, label, cbase, cstore):\n logf = log.format(task)\n if not os.path.exists(logf):\n click.echo(click.style(\"the log file at {} doesn't exist, provide a valid location\".format(logf), fg='red'))\n return\n if not os.path.exists(config):\n click.echo(click.style(\"the config file at {} doesn't exist, provide a valid location\".format(config), fg='red'))\n return\n if not os.path.exists(dataset):\n click.echo(click.style(\"the dataset file at {} doesn't exist, provide a valid location\".format(dataset), fg='red'))\n return\n config_obj = read_config_file(config)\n datasets_set = index_by_label(read_config_file(dataset))\n dataset_key = config_obj['dataset']\n dataset_key = get_dataset_from_key(dataset_key, datasets_set)\n config_obj['dataset'] = dataset_key['label']\n ServerManager.get()\n result = ServerManager.api.put_result(task, to_swagger_experiment(task, config_obj, log, username=user, label=label))\n if result.response_type == 'success':\n eid = result.message\n click.echo(click.style('results stored with experiment: {}'.format(result.message), fg='green'))\n if cbase is None:\n return\n result = store_model(checkpoint_base=cbase, config_sha1=hash_config(read_config_file(config)),\n checkpoint_store=cstore, print_fn=click.echo)\n if result is not None:\n click.echo(click.style('model stored at {}'.format(result), fg='green'))\n update_result = ServerManager.api.update_property(task, eid, prop='checkpoint', value=result)\n if update_result.response_type == 'success':\n click.echo(click.style(update_result.message, fg='green'))\n else:\n click.echo(click.style(update_result.message, fg='red'))\n else:\n click.echo(click.style('failed to store model'.format(result), fg='red'))\n else:\n click.echo(click.style(result.message, fg='red'))", "def save(self):\n path = self.get_benchmark_file_path(self._conf.results_dir)\n util.write_json(path, self._all_benchmarks, self.api_version)", "def save_results(save_path, inside_ellipsoid_per_step,\n inside_ellipsoid_all, x_all, p_all, q_all):\n\n results_dict = dict()\n results_dict[\"inside_ellipsoid_per_step\"] = inside_ellipsoid_per_step\n results_dict[\"inside_ellipsoid_all\"] = inside_ellipsoid_all\n results_dict[\"x_all\"] = x_all\n results_dict[\"p_all\"] = p_all\n results_dict[\"q_all\"] = q_all\n save_data_path = \"{}/res_data\".format(save_path)\n np.save(save_data_path, results_dict)\n\n return results_dict", "def save_episode_numpy(results, seed):\n path = os.path.join(home, 'controller-samples', str(seed))\n os.makedirs(path, exist_ok=True)\n\n for name, data in results.items():\n results[name] = np.array([np.array(a) for a in data])\n print(name, results[name].shape)\n np.save(os.path.join(path, '{}.npy'.format(name)), data)", "def save_results(coordinates, rs_directory):\n results = coordinates\n with open(os.path.join(rs_directory, 'results.json'), \"w\") as file:\n json.dump(results, file)", "def save(self, name, replicates=False):\n\n #saves replicates\n if replicates:\n for each in self:\n replicateName = name + \" \" + each.replicateName + \".xlsx\"\n writer = pd.ExcelWriter(replicateName)\n each.save(writer)\n\n #saves Experiment\n experimentName = name + \".xlsx\"\n experimentWriter = pd.ExcelWriter(experimentName)\n\n with experimentWriter as writer:\n #saves combinedReplicates\n try:\n for i in range(len(self.combinedReplicates)):\n try:\n self.combinedReplicates[i].to_excel(writer, sheet_name=str(self.cellLines[i]))\n except IndexError:\n pass\n try:\n self.combinedReplicatesData[i].to_excel(writer,sheet_name=str(self.cellLines[i]) + \" data\")\n except IndexError:\n pass\n except AttributeError:\n print(\"ERROR: Experiment not combined\")\n pass\n #saves experimentReferenceIntersections\n for i in range(len(self.experimentReferenceIntersections)):\n try:\n self.experimentReferenceIntersections[i].to_excel(writer, sheet_name=str(self.cellLines[i]) + \" to reference\")\n except AttributeError:\n print(\"ERROR: Experiment reference intersections not calculated\")\n break\n except IndexError:\n pass\n #saves experimentFullIntersection\n try:\n self.experimentFullIntersection.to_excel(writer, sheet_name=\"Full Intersection\")\n except AttributeError:\n print(\"ERROR: Experiment full intersection not calculated\")\n pass\n except IndexError:\n pass", "def save_result(self, result):\n\n try:\n self._save_or_reregister_result(result)\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)", "def save(self, output, data):\n pass", "def store(context: click.Context, case_id: str, dry_run: bool):\n LOG.info(f\"Storing analysis for {case_id}\")\n context.invoke(report_deliver, case_id=case_id, dry_run=dry_run)\n context.invoke(store_housekeeper, case_id=case_id)", "def _save_experiment_metadata(self, suppress_errors: bool = True) -> None:\n if not self._service:\n LOG.warning(\n \"Experiment cannot be saved because no experiment service is available. \"\n \"An experiment service is available, for example, \"\n \"when using an IBM Quantum backend.\"\n )\n return\n try:\n handle_metadata_separately = self._metadata_too_large()\n if handle_metadata_separately:\n metadata = self._db_data.metadata\n self._db_data.metadata = {}\n\n result = self.service.create_or_update_experiment(\n self._db_data, json_encoder=self._json_encoder, create=not self._created_in_db\n )\n if isinstance(result, dict):\n created_datetime = result.get(\"created_at\", None)\n updated_datetime = result.get(\"updated_at\", None)\n self._db_data.creation_datetime = parse_utc_datetime(created_datetime)\n self._db_data.updated_datetime = parse_utc_datetime(updated_datetime)\n\n self._created_in_db = True\n\n if handle_metadata_separately:\n self.service.file_upload(\n self._db_data.experiment_id, self._metadata_filename, metadata\n )\n self._db_data.metadata = metadata\n\n except Exception as ex: # pylint: disable=broad-except\n # Don't automatically fail the experiment just because its data cannot be saved.\n LOG.error(\"Unable to save the experiment data: %s\", traceback.format_exc())\n if not suppress_errors:\n raise QiskitError(f\"Experiment data save failed\\nError Message:\\n{str(ex)}\") from ex", "def save_test_evidence(self):\n payload = {\n \"test_id\": self.test_id,\n \"test_case_name\": self.test_case_name,\n \"epoch_timestamp\": self.epoch_timestamp,\n \"human_timestamp\": self.human_timestamp,\n \"verification_name\": self.verification_name,\n \"status\": self.status,\n \"value\": self.value,\n \"critical_value\": self.critical_value\n }\n return self.insert_regression_test_evidence(self.test_case_name, payload)", "def save_result(self, value: Any) -> None:\n self.run_logger.set_tags({self.name: value})", "def _save_metadata(self, result_dir: Path):\n id_path = result_dir / SerializationAttributes.ID_FILENAME\n with open(id_path, 'w') as f:\n json.dump({SerializationAttributes.ID_KEY: self.id}, f)\n\n version_path = result_dir / SerializationAttributes.VERSION_FILENAME\n with open(version_path, 'w') as f:\n json.dump({SerializationAttributes.VERSION_KEY: self.version}, f)", "def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)", "def saving_results(history, model_type, folder, task, idx=None, kfold_bool=False):\n if kfold_bool == False:\n if not os.path.exists(''.join(string for string in [absPath, 'data/results/', folder, task, model_type])):\n os.makedirs(''.join(string for string in [absPath, 'data/results/', folder, task, model_type]))\n \n #file_f1 = ''.join(string for string in [absPath, 'data/results/', folder, '/', model_type, '/f1_score.pickle'])\n #with open(file_f1, \"wb\") as output_file:\n # pickle.dump(f1s, output_file)\n \n file_his = ''.join(string for string in [absPath, 'data/results/',folder, task, model_type, '/history.pickle'])\n\n with open(file_his, \"wb\") as output_file:\n pickle.dump(history.history, output_file)\n else:\n if not os.path.exists(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/', str(idx), '/'])):\n os.makedirs(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/', str(idx), '/']))\n file_his = ''.join(string for string in [absPath, 'data/results/',folder, task, model_type, '/', str(idx), '/history.pickle'])\n\n with open(file_his, \"wb\") as output_file:\n pickle.dump(history.history, output_file)", "def save_individual(ind, path):\n with open(path, 'wb') as output:\n pickle.dump(ind, output, pickle.DEFAULT_PROTOCOL)\n output.close()", "def __setitem__(self, (essid, key), results):\n if essid not in self.essids:\n raise KeyError(\"ESSID not in store.\")\n filename = os.path.join(self.essids[essid][0], key) + '.pyr'\n with open(filename, 'wb') as f:\n f.write(PYR2_Buffer(essid, results).pack())\n self.essids[essid][1][key] = filename", "def save(self, data, **kwargs):\n if self.persist==['data']: # 1 data shortcut\n self.output().save(data, **kwargs)\n else:\n targets = self.output()\n if not set(data.keys())==set(targets.keys()):\n raise ValueError('Save dictionary needs to consistent with Task.persist')\n for k, v in data.items():\n targets[k].save(v, **kwargs)", "def save_dataset(self):\n if self.res_dataset is None:\n return\n if self.write_path is None:\n raise Exception(\"Error: Attempted to save result dataset without ever specifiying a path to write to\")\n\n if self.format == \"arrow\":\n self.res_dataset.save_to_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset.to_csv(self.write_path, index = False)", "def saveResult(timer, currentAttribute, totalAttribute, currentResult):\n # Get current and final position\n currentPosition = timer.current_timestep\n finalPosition = currentPosition + timer.timesteps_used_horizon\n \n # Only save the first values that are not overwritten later\n requiredResult = currentResult[0:timer.timesteps_used_horizon]\n \n # Save the results\n currentAttribute = requiredResult\n totalAttribute[currentPosition:finalPosition] = requiredResult\n \n return (currentAttribute, totalAttribute)", "def save_data(new_data):\n\n data_file = project_settings['results_path']\n\n formatted_data = prep_data(new_data)\n\n if already_exists(data_file):\n append_new_data(formatted_data, data_file)\n else:\n create_new_datafile(formatted_data, data_file)", "def save():\n pass", "def save(self, output, data):\n return", "def save_results(trainer, base_fname='figure_data_', store_x=True):\n if not store_x:\n # delete the saved final x value so we don't run out of memory\n trainer.history['x'] = defaultdict(list)\n\n fname = base_fname + trainer.model.name + \".npz\"\n np.savez(fname, history=trainer.history, model_name=trainer.model.name,\n num_subfunctions = trainer.num_subfunctions,\n full_objective_period=trainer.full_objective_period)", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/repeatsfinder/repeatsfinder.joblib\",\n )", "def save_result(file_name, text):\n\n filename = os.path.join(fileDir, 'tools/results/{}.txt'.format(file_name))\n\n with open(filename, \"a+\") as file:\n file.write(text)", "def save(self):\n #test output\n pywikibot.output('PICKLING %s records at %s' % (len(self.historyDict),datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n with open(self.datfilename, 'wb') as f:\n pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)", "def _store_result(self, task_id, result, state,\n traceback=None, request=None, **kwargs):\n self._get_connection(write=True)\n\n self._session.execute(self._write_stmt, (\n task_id,\n state,\n buf_t(self.encode(result)),\n self.app.now(),\n buf_t(self.encode(traceback)),\n buf_t(self.encode(self.current_task_children(request)))\n ))", "def save_result(self, fname):\n fields_to_not_save = ['imgs', 'imgs_ft',\n 'widefield', 'widefield_ft',\n 'separated_components_ft',\n 'widefield_deconvolution', 'widefield_deconvolution_ft',\n 'imgs_os',\n 'weights', 'weights_norm',\n 'deconvolved_components',\n 'components_deconvolved_ft',\n 'components_shifted_ft',\n 'snr', 'snr_shifted', 'weight_norm',\n 'img_sr', 'img_sr_ft', 'log_file',\n 'mask_wf',\n 'pspec_masks']\n # get dictionary object with images removed\n results_dict = {}\n for k, v in vars(self).items():\n if k in fields_to_not_save or k[0] == '_':\n continue\n results_dict[k] = v\n\n # add some useful info\n results_dict['freq units'] = '1/um'\n results_dict['period units'] = 'um'\n\n if fname is not None:\n with open(fname, 'wb') as f:\n pickle.dump(results_dict, f)\n\n return results_dict", "def save_results(self, name=\"\"):\n raise Exception(\"pure virtual function\")", "def save_result(working_space: str, result: dict) -> None:\n result_path = os.path.join(working_space, 'output')\n if not os.path.exists(result_path):\n os.makedirs(result_path)\n result_path = os.path.join(result_path, 'result.json')\n logging.info(\"Storing result at location: '%s'\", result_path)\n logging.debug(\"Result: %s\", str(result))\n\n with open(result_path, 'w') as out_file:\n json.dump(result, out_file, indent=2)" ]
[ "0.69104457", "0.6841078", "0.66724694", "0.65627193", "0.64784026", "0.64572316", "0.6454891", "0.64234656", "0.6373616", "0.63578653", "0.6343226", "0.6339512", "0.63307554", "0.6286586", "0.6275107", "0.62282044", "0.61683846", "0.616206", "0.61609125", "0.61108345", "0.6095498", "0.60870284", "0.6077734", "0.60724056", "0.6071063", "0.60649467", "0.6061553", "0.60145164", "0.6005002", "0.5985215", "0.5982571", "0.5979589", "0.59760314", "0.5973456", "0.594419", "0.5905897", "0.5875632", "0.58502233", "0.58353245", "0.5832373", "0.58216196", "0.58215624", "0.5795253", "0.57923573", "0.5790272", "0.578368", "0.5781137", "0.5758901", "0.5754363", "0.57526684", "0.57448137", "0.57391226", "0.57353765", "0.57292145", "0.57175004", "0.57019985", "0.5694818", "0.5694061", "0.5692837", "0.5687129", "0.5676427", "0.5667947", "0.56623775", "0.5660714", "0.56604606", "0.56593806", "0.5658265", "0.5656075", "0.565597", "0.56523174", "0.56435734", "0.56391203", "0.5630278", "0.5604104", "0.5603388", "0.5595025", "0.5586024", "0.55819225", "0.5563084", "0.55625105", "0.5558382", "0.555035", "0.5549898", "0.5544781", "0.5540677", "0.55269957", "0.5521058", "0.5507991", "0.55069286", "0.55012196", "0.5500968", "0.5498762", "0.5496271", "0.5491622", "0.54913765", "0.5490163", "0.54691374", "0.5445114", "0.5444552", "0.54311234" ]
0.5672618
61
Generate and format log output.
def generate_log_df(log_columns, log_values): return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def generateLog(outq1, outq2, outq3):\n # generating formatted string for output of question 1\n output_q1 = \"\\n\\t\\tArticles Ranked by Popularity\\n\"\n output_q1 += '-'*60 + \"\\n\"\n output_q1 += '{0:40} | {1:20}'.format('Article', 'Number Of Views') + '\\n'\n output_q1 += '-'*60 + \"\\n\"\n for ele in outq1:\n output_q1 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q1 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 2\n output_q2 = \"\\n\\t\\tAuthors Ranked by Popularity\\n\"\n output_q2 += '-'*60 + \"\\n\"\n output_q2 += '{0:40} | {1:20}'.format('Authors', 'Number Of Views') + '\\n'\n output_q2 += '-'*60 + \"\\n\"\n for ele in outq2:\n output_q2 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q2 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 3\n output_q3 = \"\\n\\t\\tDays with more than 1% error returns\\n\"\n output_q3 += '-'*60 + \"\\n\"\n output_q3 += '{0:15} | {1:20} | {2:20}'.format(\n 'Date',\n 'Number Of Views',\n 'Number of Errors') + '\\n'\n output_q3 += '-'*60 + \"\\n\"\n for ele in outq3:\n output_q3 += '{0:15} | {1:20} | {2:15}'.format(\n str(ele[0]),\n ele[1], ele[2]) + \"\\n\"\n output_q3 += '-'*60 + \"\\n\"\n\n with open('report.txt', 'w') as f:\n f.write(output_q1)\n f.write(output_q2)\n f.write(output_q3)\n f.close()\n\n print(output_q1)\n print(output_q2)\n print(output_q3)", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def output_log():\r\n log_str = (\"Contents:\\n\"\r\n f\"Input file: {LOC}\\n\"\r\n f\"Length checked: {LENGTH}\\n\"\r\n f\"Rows where {COL_NAME} is non_standard: {FLAGGED_FILE}\\n\"\r\n f\"NB, if {FLAGGED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\\n\"\r\n f\"Rows where {COL_NAME} is of standard length: {CLEANED_FILE}\\n\"\r\n f\"NB, if {CLEANED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\"\r\n f\"Rows where {COL_NAME} is NULL: {NULL_FILE}\\n\"\r\n f\"NB, if {NULL_FILE} does not exist, no values with\"\r\n )\r\n with open(LOG_FILE, 'w+') as f:\r\n f.write(log_str)", "def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f = open(filenameF, \"a+\") # Log saved in directory named logs located in same directory as this file\r\n \r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages)):\r\n # f.write('%s\\t\\t\\t' % self._voltages[i][0])\r\n # else:\r\n for i in range(len(self._voltages)):\r\n f.write('%s\\t\\t' % self._voltages[i][0])\r\n f.write('\\n')\r\n\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages[0])):\r\n # line = \"\"\r\n # for j in range(len(self._voltages)):\r\n # line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][2*i]) + \\\r\n # str(self._intensity[j][2*i + 1]) + '\\t'\r\n # f.write(line)\r\n # f.write('\\n')\r\n # else: \r\n for i in range(len(self._voltages[0])):\r\n line = \"\"\r\n for j in range(len(self._voltages)):\r\n line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][i]) + '\\t' \r\n f.write(line)\r\n f.write('\\n')", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def create_logs(self):\n print(\"creating logs...\")\n with open(self.log_file,'w') as log:\n writer = csv.writer(log)\n writer.writerow(['population',\n 'avg_age',\n 'avg_surv',\n 'avg_repro',\n # 'avg_neighbors_1',\n # 'avg_neighbors_2',\n # 'avg_neighbors_3',\n # 'avg_neighbors_4',\n # 'avg_neighbors_5',\n # 'avg_neighbors_6',\n # 'avg_neighbors_7',\n # 'avg_neighbors_8',\n 'number_of_clusters',\n 'clusters_10e1',\n 'clusters_10e2',\n 'clusters_10e3',\n 'clusters_10e4',\n 'clusters_10e5'])\n print(\"Logs created @ {}\".format(self.log_file))", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def appendLog(self):\n if self.logBuffer == None :\n self.logBuffer = \"Some header\\nhere\\n\\n\"\n self.logBuffer += \"\\tx\\ty\\ttheta : ul\\tur\\tt-neur\\n\";\n \n self.logBuffer += '%2.1f: %2.6f\\t %2.6f\\t %2.6f : ' % \\\n\t ( self.t, self.env.state[0], self.env.state[2], self.env.state[4] )\n self.logBuffer += '%1.3f\\t %1.3f \\t%1.2f \\t' % \\\n ( self.env.action[0], self.env.action[1], self.env.action[2] )\n self.logBuffer += 'Dst/Theta/Speed: \\t%f\\t%f\\t%f \\tF: %.2f \\n' % \\\n ( self.env.getDistance(), self.env.getOrientation(), self.env.getDistance(), self.getReward() )", "def fLOG (*l, **p) :\n path_add = p.get (\"LogPathAdd\", [] )\n\n lock = p.get(\"Lock\", None)\n if lock is not None : sys.hal_log_values[\"Lock\"] = lock\n \n if \"LogFile\" in p and \"LogPath\" in p : init (p [\"LogPath\"], p [\"LogFile\"])\n elif \"LogFile\" in p : init (filename = p [\"LogFile\"], path_add = path_add)\n elif \"LogPath\" in p : init (path = p [\"LogPath\"], path_add = path_add)\n \n def myprint(s): print(s)\n \n if \"OutputPrint\" in p : \n Print (p [\"OutputPrint\"])\n \n if \"LogFile\" in p :\n logfile = GetLogFile(True)\n \n dt = datetime.datetime (2009,1,1).now ()\n if len (l) > 0 :\n def _str_process (s) :\n if isinstance (s, str) : return s\n elif isinstance(s, bytes) : return s.decode(\"utf8\")\n else : \n try:\n return str (s)\n except Exception as e :\n raise Exception(\"unable to convert s into string: type(s)=\" + str(type(s))) from e\n \n message = str (dt).split (\".\")[0] + \" \" + \" \".join ( [_str_process(s) for s in l ] ) + sys.hal_log_values [\"__log_file_sep\"]\n \n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n try :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n except UnicodeEncodeError :\n try :\n rr = repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")\n for r in rr :\n myprint (r.encode(\"utf8\"))\n except UnicodeEncodeError :\n myprint (\"look error in log file\")\n GetLogFile ().write (message)\n st = \" \"\n else :\n st = str (dt).split (\".\")[0] + \" \"\n \n for k,v in p.items () :\n if k == \"OutputPrint\" and v : continue\n message = st + \"%s = %s%s\" % (str (k), str (v), sys.hal_log_values [\"__log_file_sep\"])\n if \"INNER JOIN\" in message :\n break\n GetLogFile ().write (message)\n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n GetLogFile ().flush ()", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def out_file_core():\n date = str(datetime.datetime.now().strftime(\"%Y%d%m_%H%M%S\"))\n return f\"log-{date}-{str(uuid.uuid4())}\"", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,\r\n formatdb_cmd, blast_results, options, all_ids,\r\n hit_ids, removed_hit_ids,\r\n included_ids, DEBUG):\r\n\r\n log_lines = []\r\n log_lines.append(\"Sequence exclusion analysis run on %s\" % strftime(\"%c\"))\r\n log_lines.append(\r\n \"Formatting subject database took %2.f seconds\" %\r\n (db_format_time))\r\n log_lines.append(\r\n \"BLAST search took %2.f minute(s)\" %\r\n ((blast_time) / 60.0))\r\n log_lines.append(\r\n \"Total analysis completed in %2.f minute(s)\" %\r\n ((time() - start_time) / 60.0))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Options |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.extend(option_lines)\r\n log_lines.append(\"Subject database formatted with command: %s\"\r\n % formatdb_cmd)\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Results |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"BLAST results above e-value threshold:\")\r\n log_lines.append(\r\n \"\\t\".join([\"Query id\", \"Subject id\", \"percent identity\", \"alignment length\",\r\n \"mismatches\", \"gap openings\", \"q. start\", \"q. end\", \"s. start\", \"s. end\", \"e-value\", \"bit score\"]))\r\n\r\n for line in blast_results:\r\n if line.startswith(\"#\"):\r\n continue\r\n else:\r\n log_lines.append(line)\r\n\r\n log_lines.append(\r\n \"Hits matching e-value and percent alignment filter: %s\" %\r\n ','.join(sorted(hit_ids)))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Summary |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"Input query sequences: %i\" % len(all_ids))\r\n log_lines.append(\r\n \"Query hits from BLAST: %i\" %\r\n (len(hit_ids) + len(removed_hit_ids)))\r\n log_lines.append(\r\n \"Query hits from BLAST lacking minimal percent alignment: %i\" %\r\n len(removed_hit_ids))\r\n log_lines.append(\"Final hits: %i\" % len(hit_ids))\r\n log_lines.append(\"Output screened sequences: %i\" % len(included_ids))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Output |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\r\n \"Writing excluded sequences (hits matching filters) to: %s\" %\r\n join(options.outputdir, \"matching.fna\"))\r\n log_lines.append(\r\n \"Writing screened sequences (excluding hits matching filters) to: %s\" %\r\n join(options.outputdir, \"non-matching.fna\"))\r\n log_lines.append(\r\n \"Writing raw BLAST results to: %s\" %\r\n join(options.outputdir, 'raw_blast_results.txt'))\r\n\r\n # format for printing\r\n revised_log_lines = []\r\n for line in log_lines:\r\n line = line + \"\\n\"\r\n revised_log_lines.append(line)\r\n\r\n if DEBUG:\r\n for line in log_lines:\r\n print line\r\n\r\n return revised_log_lines", "def template(self, record):\n\n def _log_format_onecolor(record):\n \"\"\"\n Normal console output format\n \"\"\"\n\n return LEVEL_COLORS.get(record.levelname)\n\n def _log_format_notset(record, stylized=True):\n \"\"\"\n Default log format.\n \"\"\"\n\n reset = Style.RESET_ALL\n\n levelname = {\n 'style_before': LEVEL_COLORS.get(record.levelname) + Style.BRIGHT,\n 'format': '(%(levelname)s)',\n 'style_after': reset,\n 'prefix': '',\n 'suffix': '',\n }\n\n name = {\n 'style_before': Fore.WHITE + Style.DIM + Style.BRIGHT,\n 'format': '%(name)s',\n 'style_after': Fore.RESET + Style.RESET_ALL,\n 'prefix': ' ',\n 'suffix': ' ',\n }\n\n # format prefix + style_before + message + style_after + suffix\n result = reset\n for i in [levelname, name]:\n result += f\"{i['prefix']}{i['style_before']}{i['format']}{i['style_after']}{i['suffix']}\"\n result += reset\n\n return result\n\n # Template Switcher\n templates = {\n 'NOTSET': _log_format_notset,\n 'INFO': _log_format_onecolor,\n 'DELIMITER': _log_format_onecolor,\n 'TOPIC': _log_format_onecolor,\n 'WARNING': _log_format_onecolor,\n }\n\n return templates.get(record.levelname, _log_format_notset)(record)", "def build_log(self):\n if not self._build_log_text:\n self._build_log_text = self._cat('/tmp/log')\n return self._build_log_text", "def getLogs():", "def getLogs():", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def writelog(self,*args):\n import sys\n print(' '.join([str(a) for a in args]),file=sys.stderr)", "def print_to_log(self, output):\n print(output)", "def logging_template():\n template = (\n '[loggers]\\n'\n 'keys=root\\n'\n '\\n'\n '[handlers]\\n'\n 'keys=consoleHandler\\n'\n '\\n'\n '[formatters]\\n'\n 'keys=simpleFormatter\\n'\n '\\n'\n '[logger_root]\\n'\n 'level=DEBUG\\n'\n 'handlers=consoleHandler\\n'\n '\\n'\n '[handler_consoleHandler]\\n'\n 'class=StreamHandler\\n'\n 'level=DEBUG\\n'\n 'formatter=simpleFormatter\\n'\n 'args=(sys.stdout,)\\n'\n '\\n'\n '[formatter_simpleFormatter]\\n'\n 'format=%(asctime)s - %(name)s - %(levelname)s - %(message)s\\n'\n 'datefmt=\\n')\n return template", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def generate_log_filename():\n return \"LOG_\"+strftime(\"(%Y-%m-%d)_%H-%M-%S\", gmtime())+\".txt\"", "def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()", "def print_log(*content):\n now = datetime.datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")\n print(\"MODEL INFO: \" + str(now)+ \" \", end='')\n print(*content)", "def log(self):\n LOG = {'tables': [{'state': self.state, 'name': 'Hearing', 'inserted': self.H_INS, 'updated': 0, 'deleted': 0},\n {'state': self.state, 'name': 'CommitteeHearing', 'inserted': self.CH_INS, 'updated': 0, 'deleted': 0},\n {'state': self.state, 'name': 'HearingAgenda', 'inserted': self.HA_INS, 'updated': self.HA_UPD,\n 'deleted': 0}]}\n self.logger.info(LOG)\n sys.stdout.write(json.dumps(LOG))", "def instantiate_logs(self):\n\n # Log file\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n self.log_dir = os.path.join(\"experiment_logs\", timestamp)\n\n # Create Log directory if it does not exist\n try:\n os.makedirs(self.log_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n self.info_file = os.path.join(self.log_dir, \"run_info.txt\")\n self.log_file = os.path.join(self.log_dir, \"data.csv\")\n\n with open(self.info_file, \"w+\") as f:\n f.write(\"Period = {}\\nMaxVel = {}\".format(self.period, self.max_vel))\n\n self.log_file_desc = open(self.log_file, \"w+\")\n self.log_file_desc.write(\"t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw\")", "def getLog(self):\n pass", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def createLogHeader(self,):\n \n #\n # Imports\n #\n import sys\n import getpass\n import commands\n from socket import gethostname\n \n #\n # get information\n #\n username = getpass.getuser()\n computer = gethostname()\n \n #\n # create the header\n #\n output = ''\n output += 'Running program: '+self.commandLine+'.\\n'\n output += 'time: '+self.startTimeStr+'\\n'\n output += 'Master process id='+str(MASTER)+'\\n'\n output += 'Started by user = '+username+' on host = '+computer+'\\n'\n if self.onUppmax: output += 'Program is run on uppmax, any temporary files will be placed in '+commands.getoutput('echo $SNIC_TMP')+' .\\n'\n \n return output", "def format(self, record: LogRecord) -> str:\n record.asctime = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n message = record.getMessage()\n if record.exc_info:\n eno = record.exc_info\n stacktrace = \"\".join(traceback.format_exception(None, eno[1], eno[2]))\n message += f\" excp: {stacktrace}\"\n if record.stack_info:\n stack = self.formatStack(record.stack_info)\n message += f\" trace: {stack}\"\n\n log_output = {\n \"tool\": type(self.checker).__name__,\n \"type\": \"infrastructure\",\n \"severity\": record.levelname,\n \"severityLevel\": max(0, record.levelno // 10 - 1),\n \"timestamp\": record.asctime,\n \"module\": record.module,\n \"function\": record.funcName,\n \"flag\": self.checker.flag,\n \"flagIndex\": self.checker.flag_idx,\n \"runId\": self.checker.run_id,\n \"roundId\": self.checker.round,\n \"relatedRoundId\": self.checker.flag_round,\n \"message\": message,\n \"teamName\": self.checker.team,\n \"teamId\": self.checker.team_id,\n \"serviceName\": self.checker.service_name,\n \"method\": self.checker.method,\n }\n\n return LOGGING_PREFIX + json.dumps(log_output)", "def setup_log():\n\n #logging.basicConfig(filename='log.txt',filemode='a',format='%(asctime)s %(threadName)s %(filename)s %(funcName) %(lineno) %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n\n #get the root logger\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n #set up logging to console for INFO and worse\n sh = colorlog.StreamHandler()\n sh.setLevel(logging.INFO)\n #sh_formatter = colorlog.Formatter(fmt='%(log_color)s%(levelname):%(asctime)s\\n%(message)s', datefmt='%H:%M:%S')\n sh_formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s - %(name)-25s - %(threadName)-15s - %(asctime)s - %(cyan)s \\n %(message)s\\n\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%'\n)\n sh.setFormatter(sh_formatter)\n\n #set up logging to file for ALL messages\n #fh = logging.FileHandler('log.txt')\n # fh = logging.handlers.TimedRotatingFileHandler('log.txt', when='midnight', interval=1, backupCount=7)\n # fh.setLevel(logging.DEBUG)\n # fh_formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(threadName)s - %(filename)s.%(funcName)s.%(lineno)s - %(levelname)s\\n%(message)s\\n\\n', datefmt='%Y/%m/%d %H:%M:%S')\n # fh.setFormatter(fh_formatter)\n\n #put the handlers to use\n logger.addHandler(sh)\n # logger.addHandler(fh)", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def write_log(self, msg, level = \"DEBUG\"):\r\n if len(self.parent)> 13:\r\n spacer = \"\\t\"\r\n elif len(self.parent) < 8:\r\n spacer = \"\\t\\t\\t\"\r\n else:\r\n spacer = \"\\t\\t\"\r\n \r\n log = level + \"\\t\" + self.parent +spacer +str(msg)\r\n print(log)", "def to_logchunk(self):\n\t\tdemo_name = os.path.splitext(self.demo_name)[0]\n\t\tto_write = [(\"Killstreak\", value, tick, date) for value, tick, date in self.killstreaks]\n\t\tto_write.extend((\"Bookmark\", value, tick, date) for value, tick, date in self.bookmarks)\n\n\t\tto_write.sort(key = lambda t: t[2])\n\n\t\treturn \"\\n\".join(\n\t\t\tf'[{date}] {type_} {value} (\"{demo_name}\" at {tick})'\n\t\t\tfor type_, value, tick, date in to_write\n\t\t)", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def print_log (self, n = None):\r\n\t\tif n is None:\r\n\t\t\tn = len(self.log)\r\n\t\t\r\n\t\tfor i in range(-n,0):\r\n\t\t\tprint('@ {0: 8.1f} ms, {1} : {2}'.format(1000*self.log[i]['proctime'], self.log[i]['type'], self.log[i]['desc']) )", "def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)", "def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s", "def pretty_end_log(title):\n output = '>' * 10 + ' ' + title + ' ' + '<' * 10 + '\\n\\n'\n return output", "def write_output(self):", "def outputLogFormatter(log):\n if log.get(\"blockNumber\"):\n log[\"blockNumber\"] = to_decimal(log[\"blockNumber\"])\n if log.get(\"transactionIndex\"):\n log[\"transactionIndex\"] = to_decimal(log[\"transactionIndex\"])\n if log.get(\"logIndex\"):\n log[\"logIndex\"] = to_decimal(log[\"logIndex\"])\n\n return log", "def emit(self, record):\n try:\n msg = self.format(record)\n log_level = record.levelno\n self.write_log(msg, log_level)\n except Exception:\n self.handleError(record)", "def format_output(output, case_number, status):\n output.append(\"Case #%s: %s\" % (case_number, status))", "def create_report_logging():\n print(\"Creating report\")\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(filename=os.path.join(log_path, \"client_stream_report.log\"),\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\n logging.info(\"Performance of client-streaming: Average_size: %s bytes, average ingest time: %s seconds, ingestion_rate: %s byte/sec\" \"number _of_messages: %s\"\n %(performance[\"avg_size\"], performance[\"avg_ingest_time\"], performance[\"rate\"], performance[\"number_of_messages\"]))\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)", "def export_log(self):\r\n if self.log[\"datetime\"] is not None and not self.log[\"datetime\"] == \"\":\r\n logs_dir = ''\r\n user = 'default'\r\n program_data = 'data\\program_data.json5'\r\n with open(program_data) as f:\r\n config = json.load(f)\r\n logs_dir = config.get(\"logs_records_path\", \"\")\r\n user = config.get(\"user\", \"default\")\r\n file_name = user+\" \"+self.log[\"datetime\"].replace(\"/\", \"\")\r\n file_name = file_name.replace(\" \", \"_\")\r\n file_name = file_name.replace(\":\", \"\")\r\n cwd = os.getcwd()\r\n if not logs_dir == \"\" and os.path.exists(logs_dir):\r\n if not user in os.listdir(logs_dir):\r\n os.makedirs(os.path.join(logs_dir, user))\r\n logs_dir = os.path.join(logs_dir, user)\r\n file_name = os.path.join(logs_dir, file_name)\r\n self.save_records(file_name)\r\n elif \"logs\" in os.listdir(cwd):\r\n folder = os.path.join(cwd, \"logs\")\r\n file_name = os.path.join(folder, file_name)\r\n self.save_records(file_name)\r\n self.reset_values()", "def build_log_entry(\n hostname: str, user: str, date: dt.datetime, wdir: Path, cmd: str\n) -> str:\n return (\n f'[{date.strftime(\"%Y-%m-%d %H:%M:%S\")}] ({user}@{hostname}) '\n f\"{wdir}\\n\\t{cmd}\\n\"\n )", "def create_output(self, messages):", "def commandLog(self,):\n \n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n\n #\n # Add run to runs table and open connection to logfile\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, False, MASTER)\n self.openLogfileConnection()\n SEAseqPipeLine.logfile.write(self.createLogHeader())\n \n # default all types of commands run\n runTypes = self.availableCommands.keys()\n \n SEAseqPipeLine.logfile.write('Writing commandLog to standard out.\\n')\n print 'Getting runs performed with the following commands '+', '.join(runTypes[:-1])+' or '+runTypes[-1]+'.'\n print '# StartTime: \\tFinished:\\tCommand:'\n for startTime, command, commandLine, finishedSuccessfully, masterPid in self.database.getRuns(runTypes):\n print str(startTime)+' \\t'+str(bool(finishedSuccessfully))+' \\t'+str(commandLine)\n \n #\n # update runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n SEAseqPipeLine.logfile.write('Finished exiting.\\n')", "def log(text):\n print \"%s: %s\" % (str(datetime.datetime.now()), text)", "def write_report(self):\r\n self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')\r\n server_log.info('')\r\n server_log.info('=========================================================')\r\n server_log.info('All test clients completed!')\r\n server_log.info(' Start time: {}'.format(self.start_time))\r\n server_log.info(' End time: {}'.format(self.end_time))\r\n server_log.info('')\r\n server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))\r\n for client in self.client_list.values():\r\n server_log.info('---------------------------------------------------------')\r\n server_log.info(' Client {}'.format(client.client_id))\r\n server_log.info(' Test status: {}'.format(client.status))\r\n server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran)) \r\n server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))\r\n server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))\r\n server_log.info(' Files written: {}'.format(client.files_written))\r\n server_log.info(' File size: {}'.format(client.file_size))\r\n server_log.info(' Chunk size: {}'.format(client.chunk_size))\r\n server_log.info('=========================================================')\r\n server_log.info('')", "def Log(self, times):\n\n print '--'\n print times.PrettyPrintLog()\n\n return", "def logToFile(output, file): \r\n print( output, file=file )", "def write_log(self):\n if self.hash_log_curr:\n temp_dict = {}\n count = 0\n for key, value in self.hash_log_curr.iteritems():\n temp_dict[value[4] + str(count)] = key\n count += 1\n temp_sort = temp_dict.keys()\n temp_sort.sort()\n temp_sort.reverse()\n\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'w')\n # log header\n log.write(self.log_header)\n # write hash_log_content to log\n for key in temp_sort:\n value = self.hash_log_curr[temp_dict[key]]\n log.write(value[0]+'|'+value[1]+'|'+value[2]+'|'+value[3]+'|'+value[4]+'|'+value[5] + '\\n')\n log.close()\n self.print_to_log('New log writen to file: ' + self.log_path + r'\\hash_log.txt' )\n except IOError:\n self.print_to_log('Cannot open log file to write')\n raise\n except:\n self.print_to_log('Unknown Error')\n raise", "def build_custom_log(\n dp_shell_history: Path,\n fp_results: Path,\n *,\n daterange: List[str],\n username: str = None,\n wdir: Path = None,\n hostname: str = None,\n regexp: str = None,\n unique: bool = False,\n) -> None:\n dt_start, dt_end = get_daterange(daterange)\n\n log.trace(\"dt_start: {}\", dt_start) # type: ignore\n log.trace(\"dt_end: {}\", dt_end) # type: ignore\n\n hostname = os.uname().nodename if hostname is None else hostname\n regexp = \".*\" if regexp is None else regexp\n\n with fp_results.open(\"w\") as f:\n f.write(f\"# vim: filetype={SCRIPTNAME}\\n\\n\")\n\n dt_tmp = dt_start\n entry_count = 0\n while date_ym_value(dt_tmp) <= date_ym_value(dt_end):\n fp_log = Path(\n f\"{dp_shell_history}/{hostname}/{dt_tmp.year}/\"\n f\"{str(dt_tmp.month).zfill(2)}.log\"\n )\n\n try:\n if hostname.lower() == \"all\":\n fp_log = merge_hosts(\n dp_shell_history, dt_tmp.year, dt_tmp.month\n )\n\n skip_date_check = (\n dt_tmp.month != dt_start.month or dt_tmp.year != dt_start.year\n ) and (dt_tmp.month != dt_end.month or dt_tmp.year != dt_end.year)\n\n log_lines = process_logfile(\n fp_log,\n dt_start=dt_start,\n dt_end=dt_end,\n regexp=regexp,\n username=username,\n wdir=wdir,\n unique=unique,\n skip_date_check=skip_date_check,\n )\n\n with fp_results.open(\"a+\") as f:\n f.writelines(log_lines)\n\n entry_count += len(log_lines)\n except LogsNotFound:\n log.debug(f\"No Log Files for {dt_tmp.month}-{dt_tmp.year} Exist.\")\n finally:\n dt_tmp = dt_tmp + relativedelta(months=1)\n\n with fp_results.open(\"a+\") as f:\n f.write(\n f\"# Number of shell commands matched by {SCRIPTNAME} query: \"\n f\"{entry_count}\"\n )", "def make_log_context(log_events, width=None):\n error_lines = set(e.line_no for e in log_events)\n log_events = sorted(log_events, key=lambda e: e.line_no)\n\n num_width = len(str(max(error_lines or [0]))) + 4\n line_fmt = \"%%-%dd%%s\" % num_width\n indent = \" \" * (5 + num_width)\n\n if width is None:\n _, width = tty.terminal_size()\n if width <= 0:\n width = sys.maxsize\n wrap_width = width - num_width - 6\n\n out = StringIO()\n next_line = 1\n for event in log_events:\n start = event.start\n\n if isinstance(event, BuildError):\n color = \"R\"\n elif isinstance(event, BuildWarning):\n color = \"Y\"\n else:\n color = \"W\"\n\n if next_line != 1 and start > next_line:\n out.write(\"\\n ...\\n\\n\")\n\n if start < next_line:\n start = next_line\n\n for i in range(start, event.end):\n # wrap to width\n lines = _wrap(event[i], wrap_width)\n lines[1:] = [indent + ln for ln in lines[1:]]\n wrapped_line = line_fmt % (i, \"\\n\".join(lines))\n\n if i in error_lines:\n out.write(colorize(\" @%s{>> %s}\\n\" % (color, cescape(wrapped_line))))\n else:\n out.write(\" %s\\n\" % wrapped_line)\n\n next_line = event.end\n\n return out.getvalue()", "def create_log(self):\n self.model.graph.get_stats()\n out = self.model.graph.summary\n out[\"training_error\"] = zip(self.train_it, self.train_err)\n out[\"validation_error\"] = zip(self.validation_it, self.validation_err)\n with open(self.log, \"w\") as f:\n f.write(json.dumps(out, default=defaultencode))", "def create_log(self, num_machines):\n\n # generates a folder for logs if one does not exist\n os.makedirs('logs', exist_ok=True)\n\n # record extra info at the top of the log file\n extra_info = [f'num machines: {num_machines}', f'ticks per second: {self.ticks_per_second}', f'lifetime: {self.lifetime}']\n dummy_info_dict = {k:info for k, info in zip(LogEntry.ENTRY_ORDER, extra_info)}\n\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n writer.writerow(dummy_info_dict)\n writer.writeheader()", "def _stab_log_data(self, timestamp, data, logconf):\n print('[%d][%s]: %s' % (timestamp, logconf.name, data))", "def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')", "def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))", "def summarise(thislog):\n\n # Logfile name\n print(\"Summary for \" + thislog.filename() + \"\\n\")\n # Was it from CCP4i?\n if thislog.isccp4i():\n print(\"This is a CCP4i logfile\\n\")\n # Number of programs or pseudo-programs\n print(str(thislog.nfragments()) + \" logfile fragments\\n\")\n print(\"Fragments:\")\n for i in range(0, thislog.nfragments()):\n fragment = thislog.fragment(i)\n if fragment.isprogram():\n if fragment.has_attribute(\"name\"):\n print(\"\\tProgram: \" + str(fragment.name))\n else:\n print(\"\\tProgram: <no name>\")\n else:\n if fragment.isccp4i_info():\n print(\"\\tCCP4i info\")\n elif fragment.isfragment():\n print(\"\\tFragment\")\n if fragment.ntables():\n print(\"\\t\\t\" + str(fragment.ntables()) + \" tables\")\n if fragment.nkeytexts():\n print(\"\\t\\t\" + str(fragment.nkeytexts()) + \" keytexts\")\n\n print(\"\")\n # Summarise program logfile fragments\n if thislog.nprograms() > 0:\n print(str(thislog.nprograms()) + \" program logfiles\\n\")\n print(\"Programs:\")\n for i in range(0, thislog.nprograms()):\n prog = thislog.program(i)\n # Is it a CCP4 program?\n if prog.isccp4():\n # Print name, version (and CCP4 version)\n print(\n \"\\t\"\n + prog.name\n + \"\\tv\"\n + prog.version\n + \"\\t(CCP4 \"\n + prog.ccp4version\n + \")\"\n )\n else:\n # Print name and version\n if prog.has_attribute(\"name\") and prog.has_attribute(\"version\"):\n print(\"\\t\" + prog.name + \"\\t\" + prog.version)\n else:\n print(\"\\t<No name and/or version>\")\n if prog.termination():\n print(\"\\tTerminated with: \" + prog.termination_message)\n else:\n print(\"\\tNo termination message found\")\n # Keytexts\n if prog.nkeytexts():\n print(\"\\n\\t\\tKeytext messages:\")\n for j in range(0, prog.nkeytexts()):\n print(\n \"\\t\\t\"\n + str(prog.keytext(j).name())\n + ': \"'\n + str(prog.keytext(j).message())\n + '\"'\n )\n # Tables\n if prog.ntables():\n print(\"\\n\\t\\tTables:\")\n for table in prog.tables():\n print('\\t\\tTable: \"' + table.title() + '\"')\n print(\"\")\n else:\n print(\"No program logfiles found\")\n print(\"\")\n # Total set of CCP4i information messages in the file\n print(\"CCP4i messages in file:\")\n if thislog.nccp4i_info():\n for i in range(0, thislog.nccp4i_info()):\n print('\\tCCP4i info: \"' + thislog.ccp4i_info(i).message + '\"')\n else:\n print(\"\\tNo messages found\")\n print(\"\")\n # Total set of tables in the file\n print(\"Tables in file:\")\n if thislog.ntables():\n for table in thislog.tables():\n print('\\tTable: \"' + table.title() + '\" (' + str(table.nrows()) + \" rows)\")\n else:\n print(\"\\tNo tables found\")\n print(\"\")\n # Total set of keytexts in the file\n print(\"Keytext messages in file:\")\n if thislog.nkeytexts():\n for i in range(0, thislog.nkeytexts()):\n print(\n \"\\t\"\n + str(thislog.keytext(i).name())\n + ': \"'\n + thislog.keytext(i).message()\n + '\"'\n )\n else:\n print(\"\\tNo keytext messages found\")\n print(\"\")", "def emit(self, record):\n try:\n msg = self.format(record)\n log_level = record.levelno\n self.write_log_buffer(msg, log_level)\n except Exception:\n self.handleError(record)", "def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)", "def log(content):\n\n now = datetime.datetime.now().strftime(\"%c\")\n now_time = time.time()\n # msg_last = '{} - {: >5.1f} seconds - {}'.format(now, now_time - TIME_LAST, content)\n\n if Logger._time_last is not None:\n msg_last = Logger.human_seconds(now_time - Logger._time_last)\n else:\n msg_last = ' ' * 13\n\n msgs = [now, msg_last, content]\n\n msg = \" │ \".join(msgs)\n\n msg_lines = [\"─\" * len(content) for content in msgs]\n\n msg_top = \"─┬─\".join(msg_lines)\n msg_lower = \"─┴─\".join(msg_lines)\n\n print(\" ┌─{}─┐\".format(msg_top))\n print(\" │ {} │\".format(msg))\n print(\" └─{}─┘\".format(msg_lower))\n\n Logger._time_last = time.time()", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def log(info):\n print(f\"[{info}]\")", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def create_logfile(output_dir, chains):\n with open(os.path.join(output_dir, LOGFILE), 'w+') as f:\n chain_columns = ', '.join(chains)\n f.write('alignment, converged, loglik_effsize, loglik_rel_diff, max_diff, ' + chain_columns)", "def command(ctx):\n ctx.setup_logger(format='')", "def write(self, msg, flag_print=True):\n file = open(self.log_path, \"a\")\n insert_time=datetime.now().strftime('%H:%M:%S.%f')[:-3]\n current_time = \"[\"+insert_time+\"]\"\n log_msg = current_time + \" \" + msg + \"$\" +\"\\n\" \n file.write(log_msg)\n # if flag_print is True:\n print(log_msg)", "def log(pro, logname, savepng=True):\n\n # Load processed data variables\n rawfiles = pro['rawfiles']\n transect = pro['transect']\n t120 = pro['t120' ]\n r120 = pro['r120' ]\n Sv120 = pro['Sv120' ]\n Sv120sw = pro['Sv120sw' ]\n t120r = pro['t120r' ]\n t120intrvls = pro['t120intervals']\n nm120r = pro['nm120r' ]\n lon120r = pro['lon120r' ]\n lat120r = pro['lat120r' ]\n sbline120r = pro['sbliner' ][0,:] \n NASC120swr = pro['NASC120swr'][0,:]\n pc120swr = pro['pc120swr'][0,:]\n \n # Build summary results\n results = {'Time' : np.array(t120r , dtype=str) ,\n 'Longitude': np.round(lon120r , 5) ,\n 'Latitude' : np.round(lat120r , 5) ,\n 'Transect' : np.ones(len(t120r ), dtype=int)*transect,\n 'Miles' : nm120r ,\n 'Seabed' : np.round(sbline120r , 1) ,\n 'NASC' : np.round(NASC120swr , 2) ,\n '% samples': np.round(pc120swr , 1) }\n results = pd.DataFrame(results, columns= ['Time' , 'Longitude',\n 'Latitude' , 'Transect' ,\n 'Miles' , 'Seabed' ,\n 'NASC' , '% samples'])\n \n # Create new log subdirectory\n path = os.path.join(os.path.dirname(__file__), '..', 'log', logname, '')\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Write results in CSV log file\n with open(path+logname+'.csv', 'a') as f:\n results.to_csv(path+logname+'.csv', index=False, mode='a',\n header=f.tell()==0) \n \n # save png image\n if savepng:\n \n # set figure\n plt.close()\n plt.figure(figsize=(8, 8))\n plt.subplots_adjust(left=0.066, right=1.055, bottom=0.065, top=0.985,\n wspace=0, hspace=0.05)\n plt.rcParams.update({'font.size': 9, 'lines.linewidth': 1})\n \n # plot raw echogram\n plt.subplot(211).invert_yaxis()\n im=plt.pcolormesh(t120, r120, Sv120,\n vmin=-80, vmax=-50, cmap=cmaps().ek500)\n plt.colorbar(im).set_label('Sv raw (dB re 1m$^{-1}$)')\n plt.gca().set_ylim(270,0)\n plt.gca().set_ylabel('Depth (m)')\n plt.gca().set_xlim(t120intrvls[0], t120intrvls[-1])\n plt.gca().set_xticks(t120intrvls[[0,-1]])\n plt.tick_params(labelright=False, labelbottom=False)\n \n # plot processed echogram\n ax= plt.subplot(212)\n ax = [ax, ax.twinx()]\n im=ax[0].pcolormesh(t120, r120, Sv120sw,\n vmin=-80,vmax=-50, cmap=cmaps().ek500)\n plt.colorbar(im).set_label('Sv pro (dB re 1m$^{-1}$)')\n ax[0].invert_yaxis()\n ax[0].set_ylim(270,0)\n ax[0].set_ylabel('Depth (m)')\n \n # overlay distance/NASC info\n for t, nm, NASC in zip(t120r, nm120r, NASC120swr):\n ax[1].plot([t, t], [0, 1], color=[0,.8,0], linewidth=2)\n ax[1].text(t, .95, ' ' + str(transect) + ': ' + str(round(nm,2)),\n fontweight='bold', color=[0,.8,0])\n ax[1].text(t, .02, ' ' + str(round(NASC,2)),\n fontweight='bold', color=[1,0,0]) \n ax[1].set_ylim(0, 1)\n ax[1].set_xlim(t120intrvls[0], t120intrvls[-1])\n ax[1].set_xticks(t120intrvls[[0,-1]])\n ax[1].tick_params(labelright=False)\n ax[1].xaxis.set_major_formatter(mdates.DateFormatter('%d%b-%H:%M:%S'))\n \n # save figure\n pf = rawfiles[0].split('-')[0]\n fn = pd.to_datetime(str(t120[0])).strftime(pf + '-D%Y%m%d-T%H%M%S')\n plt.savefig(path+fn+'.png' ,figsize=(8, 8), dpi=100)\n plt.close()", "def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result", "def outputlogMessage(message):\n global logfile\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()", "def build_logger(self):\n pass", "def output_logs(flag_depth, use_json, all_flag):\n use_json = \"--json\" if use_json else \"\"\n all_flag = \"--all\" if all_flag else \"\"\n flags = \" \".join([\"--archive\", \"--repo\", \"--backup\", \"--bucket\"][: flag_depth + 1])\n self.log.info(\"---\")\n self.log.info(f\"Testing Flags: {flags} {use_json} {all_flag}\")\n self.log.info(\"---\")", "def log(self, log_directly = True):\n stats = self.get_stats()\n logging_dict = dict(advantage_mean = _seq_mean(stats[\"advantage_mean\"]),\n critic_grad_norm = _seq_mean(stats[\"critic_grad_norm\"]),\n critic_loss =_seq_mean(stats[\"critic_loss\"]),\n policy_grad_norm = _seq_mean(stats[\"policy_grad_norm\"]),\n policy_loss = _seq_mean(stats[\"policy_loss\"]),\n target_critic_mean = _seq_mean(stats[\"target_critic_mean\"]),\n T_critic=self.T_critic,\n T_policy=self.T_policy\n )\n logging_str = \"T_policy={:g}, T_critic={:g}, \".format(logging_dict[\"T_policy\"], logging_dict[\"T_critic\"])\n logging_str += _make_logging_str(_copy_remove_keys(logging_dict, [\"T_policy\", \"T_critic\"]))\n\n if log_directly:\n self.logging_struct.py_logger.info(\"{} LEARNER INFO: {}\".format(self.args.learner.upper(), logging_str))\n\n return logging_str, logging_dict", "def print_aldb_to_log(aldb):\n _LOGGER.info(\"ALDB load status is %s\", aldb.status.name)\n if aldb.status not in [ALDBStatus.LOADED, ALDBStatus.PARTIAL]:\n _LOGGER.warning(\"Device All-Link database not loaded\")\n _LOGGER.warning(\"Use service insteon.load_aldb first\")\n return\n\n _LOGGER.info(\"RecID In Use Mode HWM Group Address Data 1 Data 2 Data 3\")\n _LOGGER.info(\"----- ------ ---- --- ----- -------- ------ ------ ------\")\n for mem_addr in aldb:\n rec = aldb[mem_addr]\n # For now we write this to the log\n # Roadmap is to create a configuration panel\n in_use = \"Y\" if rec.control_flags.is_in_use else \"N\"\n mode = \"C\" if rec.control_flags.is_controller else \"R\"\n hwm = \"Y\" if rec.control_flags.is_high_water_mark else \"N\"\n log_msg = (\n f\" {rec.mem_addr:04x} {in_use:s} {mode:s} {hwm:s} \"\n f\"{rec.group:3d} {rec.address.human:s} {rec.data1:3d} \"\n f\"{rec.data2:3d} {rec.data3:3d}\"\n )\n _LOGGER.info(log_msg)", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def log(self):\n lines = tailer.tail(open('logs/status.log'), 10)\n\n statement = \"\"\n\n for line in lines:\n statement += (line + \"<br />\")\n return statement", "def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message", "def Create_log():\r\n \"\"\"And Maintain log file to the current date in MMM_DD_YY format\"\"\"\r\n \r\n name = multiprocessing.current_process().name\r\n config = config_create()\r\n Stream = config.get('Log', 'Log1')\r\n Tweet = config.get('Log', 'Log2')\r\n OverallLog = config.get('Log', 'Log3')\r\n \r\n uscore = '_'\r\n txtn = '.txt'\r\n StreamL = uscore +Stream+ txtn\r\n TweetL = uscore +Tweet+ txtn\r\n OverallLogL = OverallLog+txtn\r\n \r\n \r\n \r\n name = multiprocessing.current_process().name\r\n StreamFileName = time.strftime(\"%b_%d_%y\")+StreamL\r\n TweetFileName = time.strftime(\"%b_%d_%y\")+TweetL\r\n config.set('Latest_Log', 'currentstreamlog',StreamFileName)\r\n config.set('Latest_Log', 'currenttweetlog',TweetFileName)\r\n config.set('Latest_Log', 'overalllog',OverallLogL)\r\n \r\n with open('botconfig.ini', 'w') as x:\r\n config.write(x)\r\n if os.path.isfile(StreamFileName) is False:\r\n open(StreamFileName, 'w')\r\n \r\n if os.path.isfile(OverallLogL) is False:\r\n open(OverallLogL, 'w')\r\n \r\n if os.path.isfile(TweetFileName) is False:\r\n twfile = open(TweetFileName, 'w')\r\n ## Edit this or comment to change first line entered upon\r\n ## File creation\r\n twfile.write('0 ComicTweetBot')\r\n #time.sleep(1)\r\n #Create_log()\r", "def generate_log(filecontent, fileformat, request, call_source, reason, extra={}):\n # I don't know the fileformat\n data = {\"filecontent\": filecontent, \"fileformat\": fileformat}\n\n logdict = {\n \"data\": data,\n \"reason\": reason,\n \"request\": str(request.headers),\n \"call_source\": call_source,\n \"source\": request.headers.get(\"X-Forwarded-For\", request.remote_addr),\n \"time\": datetime.datetime.now().isoformat(),\n }\n logdict.update(extra)\n return json.dumps(logdict)", "def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")", "def __init__(self):\n s = \"{0}\\n{1:^150}\\n{0}\\n\".format(\"=\"*150, \"N E B I L A N D\")\n self.log(s)\n self.table_log(\"Iteration\", \"Datetime\",\n \"Event\", \"Entity Affected\", \"Extra Info\")\n self.log(\"-\"*150)", "def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()", "def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def log_message(self, format, *args):", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def log(self, msg):\n print(msg)", "def logline(msg):\n print msg", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def gerar_log():\n arquivo = Path(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt')\n if os.path.exists(arquivo):\n with open(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt', 'a') as acessos:\n acessos.write(f'{datetime.datetime.now()} - {request.headers[\"Host\"]} {request.headers[\"User-Agent\"]}\\n')\n else:\n with open(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt', 'w') as acessos:\n acessos.write(f'{datetime.datetime.now()} - {request.headers[\"Host\"]} {request.headers[\"User-Agent\"]}\\n')", "def _get_log_file(self, _action):\n prefix = \"work/{mapper}.{{library_name}}/log/{mapper}.{{library_name}}\".format(\n mapper=self.__class__.name\n )\n key_ext = (\n (\"log\", \".log\"),\n (\"conda_info\", \".conda_info.txt\"),\n (\"conda_list\", \".conda_list.txt\"),\n )\n for key, ext in key_ext:\n yield key, prefix + ext\n yield key + \"_md5\", prefix + ext + \".md5\"", "def log_builder(self, log_level, hrtimestamp, datestamp, timestamp, log_msg, tags):\n log_body = {}\n log_body[\"filename\"] = self.filename\n log_body[\"log_level\"] = log_level\n log_body[\"hrtimestamp\"] = hrtimestamp\n log_body[\"datestamp\"] = datestamp\n log_body[\"timestamp\"] = timestamp\n log_body[\"log_msg\"] = log_msg\n log_body[\"tags\"] = tags\n return log_body", "def report(LOGDIR, epoch, e_dict, saver, sess, fh_log):\n # print loss\n print (\"Epoch: %i; Loss: %f; KLd: %f; CE %f\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))\n fh_log.write(\"%i\\t%0.5e\\t%0.5e\\t%0.5e\\n\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))" ]
[ "0.7084474", "0.67634064", "0.6613596", "0.64954156", "0.64463615", "0.64409524", "0.6432035", "0.63565314", "0.6338335", "0.6240202", "0.6228313", "0.6208995", "0.616877", "0.6147516", "0.6111839", "0.6076145", "0.6073458", "0.6070906", "0.6070906", "0.60508394", "0.60479283", "0.60284317", "0.5993349", "0.5991125", "0.59841096", "0.598401", "0.59821653", "0.59803027", "0.59774864", "0.5977271", "0.59689766", "0.59647757", "0.592659", "0.5921036", "0.59016466", "0.58988506", "0.58760136", "0.5870126", "0.5867569", "0.5864342", "0.58602405", "0.58600456", "0.58583045", "0.5857282", "0.5851041", "0.5850539", "0.58453894", "0.5840268", "0.5836045", "0.5824955", "0.58226717", "0.5815751", "0.5815292", "0.58142865", "0.58100015", "0.5807777", "0.580271", "0.5787493", "0.57815605", "0.5778664", "0.5766239", "0.5760634", "0.57519877", "0.57470596", "0.57385916", "0.57301855", "0.57273906", "0.5726075", "0.5725586", "0.57234895", "0.57224286", "0.57220644", "0.5718346", "0.570452", "0.5701556", "0.5700145", "0.56871057", "0.56857246", "0.5683994", "0.56815785", "0.56813675", "0.5670904", "0.5666735", "0.5663796", "0.5661636", "0.56578356", "0.5656313", "0.5650818", "0.5645919", "0.56441313", "0.5643221", "0.5640073", "0.5639026", "0.56377286", "0.56322294", "0.56298035", "0.56262857", "0.5622595", "0.5620574", "0.56204224", "0.5606811" ]
0.0
-1
Append log output to log file.
def write_log_file(log_df, log_file): if log_file.is_file(): # if log file already exists append to it, without the column headers log_df.to_csv(log_file, mode='a', sep='\t', index=False, header=False) else: # if log file doesn't exist create it, with column headers log_df.to_csv(log_file, sep='\t', index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_to_logfile(self):\n with open(self.path, \"a+\") as f:\n for item in self.logs:\n f.write(item)\n self.logs.clear()", "def write_log(self, log_output):\r\n with open(self.log_link, \"a\") as log_file:\r\n log_file.writelines(log_output + \"\\n\")", "def append_to_log(file_name: str, text: str):\n if not log_file_exists(file_name):\n create_log_file(file_name)\n log = open(get_complete_file_name(file_name), 'a')\n log.write(text)\n log.write(\"\\n\")\n log.close()", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def write_to_file(self, *args, **kwargs) -> None:\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def append_logfile(filename, file_str):\n file_str = \"[\" + get_datetime_str() + \"]\" + file_str\n write_file(filename, file_str, append=1)", "def write_to_log(self, log_file, log_data):\n with open(self.gamelogs_path + log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(log_data)\n f.close()", "def write_log(self, log_filename, data):\n open(log_filename, 'a').write(str(data))", "def write_log(output_dir, texts, new_file=False):\n if new_file:\n f = open(os.path.join(output_dir, \"std.log\"), \"w\")\n else:\n f = open(os.path.join(output_dir, \"std.log\"), \"a\")\n f.write(str(texts) + \"\\n\")\n f.close()", "def write_log(text):\n write_file(read_file(log_file), log + '\\n' + text)", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def get_and_append_log_events(self):\n\n log_events = self.get_log_events()\n\n # Write log events to file.\n if len(log_events) > 0:\n self.write_log_events(log_events)", "def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message", "def append_line_to_log(line = '\\n'):\n with open(logPath, 'a') as f:\n f.write(line + '\\n')", "def _write_log(self, log_data):\n self.log_file.write(ensure_bytes(log_data + \"\\n\"))\n self.log_file.flush()", "def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()", "def end_logging(self):\n self.append_to_logfile()", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def printToLogfile (self, text):\n if self.logFile is not None:\n self.logFile.write(text)\n self.logFile.flush()", "def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")", "def write(self, message, print_to_stdout=True):\n with open(self.log_path, 'a') as log_file:\n log_file.write(message + '\\n')\n if print_to_stdout:\n print(message)", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def append_log_message(self, text):\n self._new_logs.append(text)", "def __write_logs_to_file(self, file_path):\n\n \"\"\"\n The following options are used to format the date/time of logs\n %Y Year with century as a decimal number.\n %m Month as a decimal number [01,12].\n %d Day of the month as a decimal number [01,31].\n\n %H Hour (24-hour clock) as a decimal number [00,23].\n %M Minute as a decimal number [00,59].\n \"\"\"\n\n log_path = \"Results/Script_Logs/merge_files_log.txt\"\n Log(\"Merged files to: {0}\".format(file_path),\n log_path=log_path,\n erase_file=False)", "def logToFile(output, file): \r\n print( output, file=file )", "def add_row_to_logfile(output_dir, *args):\n with open(os.path.join(output_dir, LOGFILE), 'a') as f:\n args_as_strings = map(str, args)\n f.write('\\n' + ', '.join(args_as_strings))", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def write_log(self, logfile='./src/movement_log.txt'):\n # TODO: parameterize logfile name\n print('Writing logs...')\n f = open(logfile, \"w\")\n for command in self.log_arr:\n f.write(command + \"\\n\")\n print('Writing finished')", "def append_logfile(message=None, logfile=log, path=cwd):\n if message is None:\n return\n # Wrap the text if it is greater than 80 - 25 = 55 characters.\n # Indent 25 spaces to on left to allow for width of time stamp\n wrapper = textwrap.TextWrapper()\n wrapper.initial_indent = \" \" * 25\n wrapper.subsequent_indent = \" \" * 25\n wrapper.width = 80\n message = wrapper.fill(message).lstrip()\n\n if debug: print(path + logfile)\n f = open(path + logfile, \"a\")\n # Truncate the 6 digit microseconds to be 3 digits of milli-seconds\n stamp = (\"{0:%Y-%m-%d %H:%M:%S}.{1}:\".format(datetime.datetime.now(),\n datetime.datetime.now().strftime(\"%f\")[:-3]))\n if debug: print(stamp + \" \" + message)\n f.write(stamp + \" \" + message + \"\\n\")", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def log(self, txt):\n if self.logfile:\n self.logfile.write(txt)", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def write_log(self, msg: str):\n self.cta_engine.write_log(msg, self)", "def WriteLog(self, content, file_name=None):\n file_path = ''\n if file_name is None:\n file_path = tempfile.NamedTemporaryFile(dir=self.events_dir,\n delete=False).name\n else:\n file_path = os.path.join(self.events_dir, file_name)\n with open(file_path, 'a') as f:\n f.write(content)", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def outputlogMessage(message):\n global logfile\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()", "def log_message(self, text):\n if self.message_log_file != -1:\n #open file in append mode and write line to file\n with open(self.message_log_file, 'a') as log_file:\n log_file.write(text+'\\n')\n return", "def log_activity(self, log_entry):\n # open log file in \"append mode\"\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n # add a row to the log: the attributes of log_entry, in fieldnames order\n writer.writerow(log_entry.__dict__)", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def appendToStdout(self, txt):\n added = self.__logViewer.appendToStdout(txt)\n if added:\n self.__ui.showLogViewer()", "def _logToFile(logsLst, resultJSON=None, logFile=\"logFile.txt\"):\n if not LOGGING_TO_FILE: return\n with open(logFile, \"a+\") as file:\n message = \"\\n\".join(logsLst)\n file.write(\"------------------Logging--------------------\\n\")\n file.write(str(datetime.datetime.now()) + \"\\n\")\n # file.write(str(datetime.datetime.utcnow()) + \"\\n\")\n file.write(\"---------------------------------------------\\n\")\n file.write(message + \"\\n\")\n if resultJSON is not None:\n file.write(\"resulting JSON after comparison:\\n\")\n file.write(resultJSON)\n file.write(\"\\n\")", "def log(msg, logfile):\n print(msg)\n logfile.write(msg + \"\\n\")", "def log_write(log_f, text, action='a'):\n\n f = open(log_f, action)\n f.write(text)\n f.close()", "def log(self, data):\n if isinstance(data, basestring):\n self._write(data + '\\n', 'a')", "def add_to_error_log(message):\n f = open(Filenames.ERROR_LOG, \"a\")\n f.write((\"------------- %s --------------\\n\" % time.ctime()) + message)\n f.close()", "def append_log_info(string_info, log_full_filename_x, log_file_path, verbose=False):\n if log_full_filename_x is None:\n # If log file not exist create it\n # if verbose mode is enabled create the verbose log file also\n datetime_suffix = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n if verbose:\n log_full_filename = log_file_path + \"/\" + \"storage_transfer_verbose_\" + datetime_suffix + \".log\"\n else:\n log_full_filename = log_file_path + \"/\" + \"storage_transfer_\" + datetime_suffix + \".log\"\n else:\n log_full_filename = log_full_filename_x\n f = open(log_full_filename, \"a\")\n f.write(string_info + \"\\n\")\n f.close()\n return log_full_filename\n # try:\n # with open(local_log_file_name, \"a\") as f:\n # f.write(string_info + \"\\n\")\n # except:\n # print(\"Error append info log file \" + log_full_file_name)", "def write_to_all(self, *args, **kwargs) -> None:\n print(*args, **kwargs)\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def log_info(info):\n log = open(log_path, 'a+')\n log.write(info + '\\n')\n log.close()", "def to_log(*text):\n print(*text)\n with open('log.txt', 'a') as log:\n print(*text, file=log)", "def _write(self, data, mode):\n check_path(self.config_path)\n\n with open(self.log_file, mode) as log:\n if mode == 'a' and self.add_time:\n msg = self.TIME_TEMPLATE.format(time=strftime('%c'), error_msg=data)\n else:\n msg = data\n\n log.write(msg.encode(self._encoding, 'ignore'))", "def add_logfile_to_report(self):\n logfile = str(self.log_file)\n attach.file(logfile, \"Starter log file\", AttachmentType.TEXT)", "def writeLog(msg, addEndline=True):\n\n with open(LOG_FILE, \"a\") as f:\n f.write(\"\\n\")\n f.write(msg)\n \n if addEndline == True:\n f.write(\"\\n---------------------------------------------\\n\")", "def add_log_handlers(self, output_file_path):\n\n self.logger.setLevel(logging.DEBUG)\n\n if output_file_path is not None:\n directory.ensure_exists(output_file_path)\n self.logger.addHandler(logging.FileHandler(output_file_path))\n\n self.logger.addHandler(logging.StreamHandler())", "def writeLog(self, log_path):\r\n f = open(log_path, 'w')\r\n f.write(str(self))\r\n f.close()", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def write_log(*args):\n\n with open(\"server.log\", 'a') as log_file:\n log_file.write(datetime.now().isoformat() + \"\\t\")\n log_file.write(\"\\n\".join(args))\n log_file.write(\"\\n\")", "def write(self, msg, flag_print=True):\n file = open(self.log_path, \"a\")\n insert_time=datetime.now().strftime('%H:%M:%S.%f')[:-3]\n current_time = \"[\"+insert_time+\"]\"\n log_msg = current_time + \" \" + msg + \"$\" +\"\\n\" \n file.write(log_msg)\n # if flag_print is True:\n print(log_msg)", "def write_log(self):\n if self.hash_log_curr:\n temp_dict = {}\n count = 0\n for key, value in self.hash_log_curr.iteritems():\n temp_dict[value[4] + str(count)] = key\n count += 1\n temp_sort = temp_dict.keys()\n temp_sort.sort()\n temp_sort.reverse()\n\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'w')\n # log header\n log.write(self.log_header)\n # write hash_log_content to log\n for key in temp_sort:\n value = self.hash_log_curr[temp_dict[key]]\n log.write(value[0]+'|'+value[1]+'|'+value[2]+'|'+value[3]+'|'+value[4]+'|'+value[5] + '\\n')\n log.close()\n self.print_to_log('New log writen to file: ' + self.log_path + r'\\hash_log.txt' )\n except IOError:\n self.print_to_log('Cannot open log file to write')\n raise\n except:\n self.print_to_log('Unknown Error')\n raise", "def add_file_handler(self, output):\n fh = logging.FileHandler(output + 'cpnest.log')\n fh.setFormatter(logging.Formatter(self.fmt, datefmt=self.date_fmt))\n self.addHandler(fh)", "def log(logfile, st):\n with open(logfile, 'a') as f:\n f.write(st + '\\n')\n print(st)", "def log(self, *args):\n self.log_stdout(*args)\n print(*args, file=self.general_log_file.file)\n self.general_log_file.flush()", "def setAppendLog(self,value):\n self.PDFreactorConfiguration.in1[\"appendLog\"] = value", "def write_logfile(filename, content, directory):\n logfile = os.path.join(directory, f\"{filename}.log\")\n f = open(logfile, 'a+')\n f.write(content)\n f.write(\"\\n\")\n f.close()\n return logfile", "def log_to_file(log_path, logroot=True):\n\n # LOGGING FORMAT\n fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s'\n date_fmt = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt=date_fmt)\n\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n log.addHandler(file_handler)\n\n if logroot:\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG)", "def _writeOutput(self, msg, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n f.write (msg)\n f.close()", "def log_message(self, message):\n with open(LOGFILE, \"a\") as f:\n currentDt = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n message = \"\\n\" + currentDt + '---' + message\n f.write(message)", "def write_log(message: str, base_url, path=\"logs/\"):\n print(message)\n url_filename = url_to_filename(base_url)\n filename = f\"{path}LOG-{url_filename}.txt\"\n\n if os.path.exists(filename):\n append_write = \"a\"\n else:\n append_write = \"w\"\n\n f = open(filename, append_write)\n f.write(message)\n f.close()", "def log_output_file(self, file_key: str):\n if not self.exp_metadata.resources.output:\n self.exp_metadata.resources.output = []\n\n if file_key not in self.exp_metadata.resources.output:\n self.exp_metadata.resources.output.append(file_key)\n\n self._sync_log_event()", "def writeToLog(logName, message, writeOrAppend):\r\n\r\n\twith open(logName, writeOrAppend) as out:\r\n\t\tout.write(message)", "def log_to_file(text: str, target: Union[str, Path]) -> None:\n with open(target, \"a\" if Path(target).exists() else \"w\", encoding=\"utf-8\") as file:\n file.write(text)", "def _forward_log(self):\n\n if self.log is None:\n return\n\n fd = None\n try:\n fd = os.open(\"%s.out\" % self.vm_log_path, os.O_RDONLY)\n data = \"\"\n while True:\n new_data = os.read(fd, 4096)\n if new_data == \"\":\n self._log_to_file(data)\n return\n\n data += new_data\n lines = data.split(\"\\n\")\n for line in lines[:-1]:\n self._log_to_file(line)\n data = lines[-1]\n\n finally:\n if fd is not None:\n os.close(fd)", "def update_log(er=\"File created\",log_path=log_path, s3_path_log = s3_path_log, upload=False):\n print(er)\n with open(log_path, 'a') as file:\n file.write(str(datetime.now()) + ',' + str(er) + '\\n')\n # if upload is True:\n # s3.meta.client.upload_file(log_path, bucket_name, s3_path_log)", "def writelog(self,*args):\n import sys\n print(' '.join([str(a) for a in args]),file=sys.stderr)", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def test_006_log_append(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = consts.TEST_FILENAME + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n # pylint: disable = protected-access\n __log_test = __test._ChessStorage__log_append(__dir_game_logfile, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])", "def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def write_data(self, data):\n print \"Writing data...\"\n # Write data into log\n self.log.write_file(data)\n\n # Close log so information can be sent\n self.log.close_log()", "def recordLogsToFile(logpath):\n ret = True\n global LOGLIST\n if not os.path.exists(logpath):\n os.makedirs(logpath)\n\n f = open(logpath+'/TesterUpdatelogs.log','wb')\n LOGLIST = [line+'\\n' for line in LOGLIST]\n try:\n f.truncate()\n f.writelines(LOGLIST)\n except Exception:\n print 'Write logs to path %s failed!' %logpath\n print Exception\n ret = False\n finally:\n f.close()\n return ret", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def to_log(self, namefile=None):\n if namefile is None:\n namefile = self.name.replace(' ', '_')+'.log'\n f = open(namefile, 'w')\n f.write(self.__str__())\n f.close()", "def write_log(logfile, log_dict):\n with open(logfile, 'a') as f:\n c = csv.writer(f)\n if log_dict['epoch'] == 0: # write header for first epoch (dubbed as 0th epoch)\n c.writerow(log_dict.keys())\n\n c.writerow(log_dict.values())", "def log(listfile,line): \r\n\tif listfile != 0:\r\n\t\t#print line\r\n\t\tif line.startswith(\"ERROR\") or line.startswith(\"WARNING\"):\r\n\t\t\tprint line\r\n\r\n\t\tif not line.endswith('\\n'):\r\n\t\t\tline += '\\n'\r\n\t\t#listfile.write(line)\r\n\t\tlogfile.append(line)\r\n\t\tif len(logfile) >= 1: #:\r\n\t\t\tfor i in range(len(logfile)):\r\n\t\t\t\tlistfile.write(logfile[i])\r\n\t\t\tlogfile[:] = []\r\n\telse:\r\n\t\tprint line", "def enable_log_file():\n\n file_handler = logging.FileHandler(\"run-{}.log\".format(get_time_str()))\n file_handler.setFormatter(FORMATTER)\n\n for logger in LOGGER_TABLE.values():\n logger.addHandler(file_handler)", "def _log_message(self, message):\n\t\tif message not in self._logged_messages:\n\t\t\twith open(self._logfile, \"a\") as f:\n\t\t\t\tf.write(message + \"\\n\")\n\t\tself._logged_messages.append(message)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" %s\" % message)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + str(message))", "def append(self, log: str, action: str):\n self.logs.append(log)\n self.actions.append(action)\n self.debug.update(pip_version=get_pip_version(name=self.name))", "def log_append (self, type='err', id='', ch=0, value=0, desc='', raw=''):\r\n\t\t# Append to log fifo\r\n\t\tself.log.append({'timestamp':time.asctime(), 'proctime':round(time.time()-self.init_time,3), 'type':type, 'id':id, 'ch':ch, 'value':value, 'desc':desc, 'raw':raw})\r\n\t\t# Send to handler function (if defined)\r\n\t\tif self.log_handler is not None:\r\n\t\t\tself.log_handler(self.log[-1])\r\n\t\t# Send to stdout (if requested)\r\n\t\tif self.log_to_stdout:\r\n\t\t\tself.print_log (n = 1)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + message)" ]
[ "0.8269278", "0.77975404", "0.7335851", "0.73168206", "0.72050554", "0.71168935", "0.6989999", "0.696974", "0.6952153", "0.69020504", "0.68942434", "0.6814251", "0.68042284", "0.67517656", "0.6726408", "0.66658777", "0.6634301", "0.6611948", "0.65880245", "0.65626615", "0.6516075", "0.6443687", "0.64436024", "0.6423611", "0.6418324", "0.6418141", "0.6415243", "0.63921297", "0.63828146", "0.63390654", "0.63237363", "0.63067335", "0.62926596", "0.62926406", "0.62743837", "0.6254549", "0.62515485", "0.6244929", "0.6214842", "0.6206512", "0.61985826", "0.6183132", "0.61817896", "0.6161515", "0.6154404", "0.61440724", "0.6116526", "0.61081964", "0.60968274", "0.6081579", "0.6048926", "0.60360855", "0.6034651", "0.60222876", "0.6018984", "0.6018984", "0.5984418", "0.5981467", "0.5974847", "0.59713644", "0.59702045", "0.5970045", "0.5961306", "0.596106", "0.59553283", "0.5936781", "0.5919077", "0.5907658", "0.58953464", "0.5873266", "0.5864512", "0.5846748", "0.5844943", "0.58370024", "0.5836335", "0.58259714", "0.5822598", "0.5815524", "0.58024174", "0.57829785", "0.5780536", "0.57738096", "0.57724535", "0.5765148", "0.576113", "0.576005", "0.5759121", "0.57493377", "0.5746247", "0.574047", "0.5733387", "0.5732061", "0.5726472", "0.57088864", "0.57003766", "0.5699736", "0.5697695", "0.5694133", "0.56885946", "0.5688125" ]
0.60229826
53
Create a new credit card instance. The initial balance is zero. customer the name of the customer (e.g., 'John Bowman') bank the name of the bank acnt the account identifier (eg., '5391 0375 9387 5309') limit credit card (measured in dollars)
def __init__(self,customer, bank, acnt,limit): self._customer=customer self._bank=bank self._account=acnt self._limit=limit self._balance=0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(customer, **data):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.CARD_RESOURCE, customer_id=customer), data)\n return resources.Card(**response)", "def __init__(self, customer, bank, account, limit, bank_bal = 0):\n\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = bank_bal # To store customer spendings.", "def __init__(self, customer, bank, acnt, limit):\n self._customer = customer\n self._bank = bank\n self._account = acnt\n self._limit = limit\n self._balance = 0", "def __init__(self, customer, bank, account, limit):\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = 0", "def create_account(self):\n account_identifier = \"\".join([str(num) for num in random.sample(range(10), 9)])\n first_fifteen_digit = self.BIN + account_identifier\n checksum = self.create_checksum(first_fifteen_digit)\n card_number = first_fifteen_digit + str(checksum)\n pin = \"\".join([str(num) for num in random.sample(range(10), 4)])\n balance = 0\n print(\"\\nYour card has been created\")\n print(f\"Your card number:\\n{card_number}\\nYour card PIN:\\n{pin}\")\n # fetching max id from database\n database_cursor.execute(\"SELECT id FROM card;\")\n ids = [x[0] for x in database_cursor.fetchall()]\n if ids:\n max_id = max(ids) + 1\n else:\n max_id = 1\n # insert new account into database\n database_cursor.execute(f\"INSERT INTO card VALUES ({max_id}, {card_number}, {pin}, {balance});\")\n database_connection.commit()", "def create_account(customer):\n random.seed()\n \n a = random.randint(000000000, 999999999) # now 9 digits for checksum algorithm to work\n card_number = str(400000) + str(a)\n user_password = random.randint(0000, 9999)\n user_password = str(user_password).zfill(4) # ensures password is 4 digits long even with zeroes\n \n # find and add checksum\n checksum = luhn_checksum(card_number)\n card_number_final = str(card_number) + str(checksum)\n \n txt = \"Your card has been created \\nYour card number: \\n{cardnum}\"\\\n \"\\nYour card PIN: \\n{cardpin}\".format(cardnum = card_number_final, cardpin = user_password)\n print(txt)\n \n customer = [card_number_final, user_password] \n return customer", "def get_bankcard_obj(self):\n kwargs = {\n 'card_number': self.cleaned_data['number'],\n 'expiry_date': self.cleaned_data['expiry_month'].strftime(\"%m/%y\"),\n 'ccv': self.cleaned_data['ccv_number'],\n }\n if self.cleaned_data['start_month']:\n kwargs['start_date'] = self.cleaned_data['start_month'].strftime(\"%m/%y\")\n return Bankcard(**kwargs)", "def __init__(self, bank_name, account_num, balance):\n self._bank_name = bank_name\n self._account_num = account_num\n self._balance = balance", "def create_customer(self, user, card_token, plan):\n customer = stripe.Customer.create(\n card=card_token,\n plan=plan,\n email=user.email,\n )\n user.stripe_customer_id = customer.id\n user.save()\n return customer", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n logger.info(\n f\"Successfully added customer {customer_id} with {credit_limit}\"\n )\n customer.save()\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to added customer {customer_id}. {unknown_error}\"\n )\n print(unknown_error)", "def add_customer(customer_id,\n name,\n lastname,\n home_address,\n phone_number,\n email,\n status,\n credit_limit):\n # pylint: disable = W0703\n try:\n with DB.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email=email,\n status=status.lower(),\n credit_limit=credit_limit)\n new_customer.save()\n logging.info('Customer(s) successfully added')\n\n except Exception as error:\n LOGGER.info(f'Error creating = {name}')\n LOGGER.info(error)", "def add_customer(customer_id, name, lastname, homeaddress, phone_number, email, status, credit_limit):\n try:\n with customer_db.transaction():\n new_customer_mi = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n homeaddress=homeaddress,\n phone_number=phone_number,\n email=email,\n status=status,\n credit_limit=credit_limit\n )\n logger.debug(\"Added customer %s to %s\", new_customer_mi, customer_db.database)\n return new_customer_mi\n except Exception as e:\n logger.error(\"Error creating customer_id %s: %s\", customer_id, e)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "def example_bank_account():\n \n return BankAccount(\"Test User\", 1000.0)", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n # database.transaction; all work given to database gets done or none of it\n with cm.DATABASE.transaction():\n try:\n # .create inserts the data into the database\n new_customer = cm.Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n # .save() will write the data to the database\n new_customer.save()\n LOGGER.info(\"Added customer [%s]\", customer_id)\n except pw.IntegrityError:\n LOGGER.error(\"Customer [%s] not added to database!\", customer_id)\n raise pw.IntegrityError", "def add_customer(customer_id, first, last, addr, phone, email, status, limit):\n try:\n LOGGER.info('Creating customer record')\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=first,\n last_name=last,\n home_address=addr,\n phone_number=phone,\n email_address=email,\n status=status,\n credit_limit=limit\n )\n new_customer.save()\n LOGGER.info('Added customer: %s', new_customer.customer_id)\n except IntegrityError as err:\n LOGGER.warning('Error creating = ID: %s', customer_id)\n LOGGER.warning(err)\n\n return Customer", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n print('Adding new customer, Customer ID {}...'.format(customer_id))\n try:\n Customer.get_by_id(customer_id)\n print('Customer ID {} is already in use'.format(customer_id))\n except Exception as ex:\n if \"instance matching query does not exist\" in str(ex):\n try:\n new_customer = Customer.create(customer_ID=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info('Added new customer, Customer ID %s', customer_id)\n except IntegrityError:\n print('Incorrect format, customer {} not saved'\n .format(customer_id))", "def billCustomer(self, **params):\n self.__requireParams(params, ['id', 'amount'])\n return self.__req('bill_customer', params)", "def add_card(stripe_customer_id, data, user_type='customer'):\n if user_type == 'charity':\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n else:\n stripe.api_key = Config.STRIPE_SECRET_KEY\n if len(stripe.api_key) > 50:\n try:\n card_token = stripe.Token.create(\n card={\n \"number\": data['card_number'],\n 'name': data.get('card_holder', ''),\n \"exp_month\": data['card_expiration'].split('/')[0],\n 'exp_year': data['card_expiration'].split('/')[1],\n 'cvc': data['card_cvc'] if data.get('card_cvc') else None\n },\n )\n card = stripe.Customer.create_source(\n stripe_customer_id,\n source=card_token\n )\n except Exception as e:\n print(e)\n if hasattr(e, 'param') and e.param in card_params_bind:\n return False, {card_params_bind[e.param]: [e.args[0]]}\n return False, {'card': ['Invalid card details.']}\n else:\n try:\n card = stripe.Customer.create_source(\n stripe_customer_id,\n source={\n 'object': 'card',\n 'number': data['card_number'],\n 'name': data.get('card_holder', ''),\n 'exp_month': data['card_expiration'].split('/')[0],\n 'exp_year': data['card_expiration'].split('/')[1],\n 'cvc': data['card_cvc'] if data.get('card_cvc') else None\n }\n )\n # return True, card\n except Exception as e:\n if hasattr(e, 'param') and e.param in card_params_bind:\n return False, {card_params_bind[e.param]: [e.args[0]]}\n return False, {'card': ['Invalid card details.']}\n\n # test payment\n print('Card create')\n try:\n charge = stripe.Charge.create(\n amount=100, # $1\n currency='AUD',\n capture=False,\n customer=stripe_customer_id,\n source=card.id,\n description='Init payment.'\n )\n return True, card\n except Exception as e:\n if hasattr(e, 'param') and e.param in card_params_bind:\n return False, {card_params_bind[e.param]: [e.args[0]]}\n return False, {'card': ['Invalid card details.']}", "def __init__(self, account_number: str, bank_name: str,\n starting_balance: float):\n super().__init__(account_number)\n self.bank_name = bank_name\n self.starting_balance = starting_balance\n self.current_balance = starting_balance", "def new_bank_account(payment_doc, bankData):\n from erpnextfints.utils.bank_account_controller import \\\n BankAccountController\n return BankAccountController().new_bank_account(payment_doc, bankData)", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"alex@jr.com\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def create_customer_card(self,\n customer_id,\n body):\n\n # Prepare query URL\n _url_path = '/v2/customers/{customer_id}/cards'\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\n 'customer_id': customer_id\n })\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n _request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))\n OAuth2.apply(self.config, _request)\n _response = self.execute_request(_request)\n\n decoded = APIHelper.json_deserialize(_response.text)\n if type(decoded) is dict:\n _errors = decoded.get('errors')\n else:\n _errors = None\n _result = ApiResponse(_response, body=decoded, errors=_errors)\n return _result", "def __init__(self, account_id, balance):\n self.account_id = account_id\n self.balance = balance", "def credit(self, amount, debit_account, description, debit_memo=\"\", credit_memo=\"\", datetime=None):\r\n assert amount >= 0\r\n return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime)", "def charge_customer(customer, amount):\n stripe.api_key = Config.STRIPE_SECRET_KEY\n\n if not customer.cards:\n return False # This situation is impossible, but anyway\n try:\n charge = stripe.Charge.create(\n amount=int(amount * 100),\n currency='AUD',\n customer=customer.stripe_customer_id,\n source=customer.cards[-1].stripe_card_id,\n description='Payment for donations.'\n )\n except Exception as e:\n print(e.args[0])\n return False\n\n if charge.status == 'succeeded':\n return True\n return False", "def __init__(self, cardname, amount):\n self.cardname = str(cardname)\n self.amount = int(amount)", "def charge_credit_card(amount,save_to_cim=False):\n\n # Create a merchantAuthenticationType object with authentication details\n # retrieved from the constants file\n merchantAuth = apicontractsv1.merchantAuthenticationType()\n merchantAuth.name = CONSTANTS.apiLoginId\n merchantAuth.transactionKey = CONSTANTS.transactionKey\n\n\n # Create the payment data for a credit card\n creditCard = apicontractsv1.creditCardType()\n card_types = ['visa','discover','mastercard','jcb']\n creditCard.cardNumber = fake.credit_card_number(card_type=random.choice(card_types))\n creditCard.expirationDate = fake.credit_card_expire()\n creditCard.cardCode = fake.credit_card_security_code()\n\n # Add the payment data to a paymentType object\n payment = apicontractsv1.paymentType()\n payment.creditCard = creditCard\n\n # Create order information\n order = apicontractsv1.orderType()\n order.invoiceNumber = str(random.randint(1000,3000))\n order.description = fake.bs()\n\n # Set the customer's Bill To address\n customerAddress = apicontractsv1.customerAddressType()\n customerAddress.firstName = fake.first_name()\n customerAddress.lastName = fake.last_name()\n customerAddress.company = fake.bs()\n customerAddress.address = fake.street_address()\n customerAddress.city = fake.city()\n customerAddress.state = fake.address().split()[-1].split()[0]\n customerAddress.zip = fake.postalcode_in_state()\n customerAddress.country = fake.country()\n customerAddress.phoneNumber = fake.phone_number()\n\n\n # Set the customer's identifying information\n customerData = apicontractsv1.customerDataType()\n customerData.type = \"individual\"\n customerData.id = fake.upc_e()\n customerData.email = fake.email()\n\n # Add values for transaction settings\n duplicateWindowSetting = apicontractsv1.settingType()\n duplicateWindowSetting.settingName = \"duplicateWindow\"\n duplicateWindowSetting.settingValue = \"600\"\n settings = apicontractsv1.ArrayOfSetting()\n settings.setting.append(duplicateWindowSetting)\n\n # setup individual line items\n random_num = random.randint(2000,5000)\n line_item_1 = apicontractsv1.lineItemType()\n line_item_1.itemId = str(random.randint(1,9))\n line_item_1.name = \"first\"\n line_item_1.description = fake.catch_phrase()\n line_item_1.quantity = str(random.randint(1,9))\n line_item_1.unitPrice = \"12.95\"\n line_item_2 = apicontractsv1.lineItemType()\n line_item_2.itemId = str(random.randint(1,9))\n line_item_2.name = \"second\"\n line_item_2.description = fake.catch_phrase()\n line_item_2.quantity = str(random.randint(1,9))\n line_item_2.unitPrice = \"7.95\"\n line_item_3 = apicontractsv1.lineItemType()\n line_item_3.itemId = str(random.randint(1,9))\n line_item_3.name = \"third\"\n line_item_3.description = fake.catch_phrase()\n line_item_3.quantity = str(random.randint(1,9))\n line_item_3.unitPrice = \"100.00\"\n\n\n # build the array of line items\n line_items = apicontractsv1.ArrayOfLineItem()\n line_items.lineItem.append(line_item_1)\n line_items.lineItem.append(line_item_2)\n line_items.lineItem.append(line_item_3)\n\n # Create a transactionRequestType object and add the previous objects to it.\n transactionrequest = apicontractsv1.transactionRequestType()\n transactionrequest.transactionType = \"authCaptureTransaction\"\n transactionrequest.amount = amount\n transactionrequest.payment = payment\n transactionrequest.order = order\n transactionrequest.billTo = customerAddress\n transactionrequest.customer = customerData\n transactionrequest.transactionSettings = settings\n transactionrequest.lineItems = line_items\n\n # Assemble the complete transaction request\n createtransactionrequest = apicontractsv1.createTransactionRequest()\n createtransactionrequest.merchantAuthentication = merchantAuth\n createtransactionrequest.refId = \"1234-3432\"\n createtransactionrequest.transactionRequest = transactionrequest\n # Create the controller\n createtransactioncontroller = createTransactionController(\n createtransactionrequest)\n createtransactioncontroller.execute()\n\n response = createtransactioncontroller.getresponse()\n\n if response is not None:\n # Check to see if the API request was successfully received and acted upon\n if response.messages.resultCode == \"Ok\":\n # Since the API request was successful, look for a transaction response\n # and parse it to display the results of authorizing the card\n if hasattr(response.transactionResponse, 'messages') is True:\n print(\n 'Successfully created transaction with Transaction ID: %s'\n % response.transactionResponse.transId)\n if save_to_cim:\n # create CIM profile\n cim_create.append(response.transactionResponse.transId)\n create_customer_profile_from_transaction(str(cim_create[0]))\n print('Transaction Response Code: %s' %\n response.transactionResponse.responseCode)\n print('Message Code: %s' %\n response.transactionResponse.messages.message[0].code)\n print('Description: %s' % response.transactionResponse.\n messages.message[0].description)\n else:\n print('Failed Transaction.')\n if hasattr(response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(response.transactionResponse.\n errors.error[0].errorCode))\n print(\n 'Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n # Or, print errors if the API request wasn't successful\n else:\n print('Failed Transaction.')\n if hasattr(response, 'transactionResponse') is True and hasattr(\n response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(\n response.transactionResponse.errors.error[0].errorCode))\n print('Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n else:\n print('Error Code: %s' %\n response.messages.message[0]['code'].text)\n print('Error message: %s' %\n response.messages.message[0]['text'].text)\n else:\n print('Null Response.')\n\n return response", "def add_card(wallet, template_wallet):\n name = input(\"What is the name on the card? \")\n issuer = input(\"Which bank is the issuer? \")\n selected = False\n new_card = None\n yes_no = \"\"\n for card in template_wallet.get_cards():\n if card.get_issuer() != issuer:\n continue\n while yes_no != \"Y\" or yes_no != \"N\":\n yes_no = input(\"Is it the \" + card.get_card_name()\n + \"(Input Y or N)? \")\n if yes_no == \"Y\":\n selected = True\n new_card = card\n break\n elif yes_no == \"N\":\n break\n else:\n print(\"Error! Please enter in Y or N!\")\n if selected:\n break\n if selected is False:\n return\n result = None\n while yes_no != \"Y\" or yes_no != \"N\":\n yes_no = input(\"Is the card new (Input Y or N)? \")\n sub = new_card.get_sign_up_bonus()\n network = new_card.get_network()\n issuer = new_card.get_issuer()\n card_name = new_card.get_card_name()\n cats = new_card.print_categories()\n p_or_c = new_card.check_points_or_cash()\n cpp = new_card.get_cents_per_point()\n sub_info = str(sub.get_reward()) + \",\" + str(sub.get_minimum_spend()) \\\n + \",\" + str(sub.get_months())\n if yes_no == \"Y\":\n balance = 0\n age = 0\n result = credit_card.CreditCard(name, network, issuer, card_name,\n sub_info, cats, balance, age,\n p_or_c, cpp)\n break\n elif yes_no == \"N\":\n while True:\n try:\n balance = float(input(\"Please enter the balance in USD: \"))\n age = int(input(\"Please enter the age in months of the \"\n \"card: \"))\n break\n except ValueError:\n print(\"Please enter valid numbers!\")\n\n result = credit_card.CreditCard(name, network, issuer, card_name,\n sub_info, cats, balance, age,\n p_or_c, cpp)\n break\n else:\n print(\"Error! Please enter in Y or N!\")\n\n if selected:\n wallet.add_card(result)\n return selected", "def bank_account():\n return BankAccount()", "def add_customer(\n customer_id,\n name,\n last_name,\n home_address,\n phone_number,\n email_address,\n status,\n credit_limit,\n):\n LOGGER.info(\"Adding new customer, %s %s to database\", name, last_name)\n try:\n Customers.create(\n customer_id=customer_id,\n name=name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n LOGGER.info(\"Added new customer %s %s to database\", name, last_name)\n except IntegrityError as e_val:\n LOGGER.warning(\"Customer %s already exists\", customer_id)\n LOGGER.warning(e_val)", "def test_credit_card():\n cc = chap2.CreditCard('John Doe', '1st Bank', '5391 0375 9387 5309', 1000)\n assert cc.get_balance() == 0\n cc.charge(300)\n assert cc.get_balance() == 300\n cc.charge(15.50)\n assert cc.get_balance() == 315.50\n cc.make_payment(250)\n assert cc.get_balance() == (315.50 - 250)\n cc.make_payment(45.50)\n assert cc.get_balance() == 20\n with pytest.raises(TypeError):\n cc.charge('$5.00')\n with pytest.raises(TypeError):\n cc.make_payment('$10.00')\n assert not cc.charge(5000)", "def ht_get_stripe_customer(account, cc_token=None, cc_card=None, cust=None):\n\n\tif (account.stripe_cust is not None):\n\t\tprint 'ht_get_stripe_customer_id(): found customer', account.stripe_cust\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\t\tstripe_cust = stripe.Customer.retrieve(account.stripe_cust)\n\t\tprint 'ht_get_stripe_customer_id(): update customer,' + str(stripe_cust.get('email')) + ', w/ info(' + str(cc_token) + ', ' + str(cc_card) + ')'\n\t\tstripe_cust.cards.create(card=cc_token)\n\t\treturn account.stripe_cust\n\n\tprint 'ht_get_stripe_customer_id: customer does not exist, create'\n\ttry:\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\n\t\tht_metadata = {}\n\t\tht_metadata['ht_account'] = account.userid\n\n\t\tprint 'ht_get_stripe_customer_id: customer info cc_token: ' + str(cc_token) + ' cc_card: ' + str(cc_card)\n\t\tstripe_customer = stripe.Customer.create(card=cc_token, description=str(account.userid), metadata=ht_metadata, email=account.email)\n\t\tstripe_cust\t= stripe_customer['id']\n\t\tstripe_card\t= stripe_customer['default_card']\n\t\tprint 'ht_get_stripe_customer_id: New Customer (%s, %s)' % (stripe_cust, stripe_card)\n\t\tpp(stripe_cust)\n\n\t\tprint 'ht_get_stripe_customer_id: Update Account'\n\t\taccount.stripe_cust = stripe_cust\n\t\tdb_session.add(account)\n\t\tdb_session.commit()\n\texcept Exception as e:\n\t\t# problems with customer create\n\t\tprint type(e), e\n\t\tdb_session.rollback()\n\n\tprint 'ht_get_stripe_customer_id:', stripe_cust\n\treturn stripe_cust", "def transition_add_stripe(self):\n card_info = self.card_info\n\n stripe.api_key = card_info.gateway.stripe_api_key\n\n profile_data = {\n 'source': {\n 'object': 'card',\n 'number': card_info.number,\n 'exp_month': card_info.expiry_month,\n 'exp_year': card_info.expiry_year,\n 'cvc': card_info.csc,\n 'name': (\n card_info.owner or self.address.name or self.party.name\n ),\n },\n }\n profile_data['source'].update(\n card_info.address.get_address_for_stripe())\n\n customer_id = card_info.party._get_stripe_customer_id(\n card_info.gateway\n )\n\n try:\n if customer_id:\n customer = stripe.Customer.retrieve(customer_id)\n card = customer.sources.create(**profile_data)\n else:\n profile_data.update({\n 'description': card_info.party.name,\n 'email': card_info.party.email,\n })\n customer = stripe.Customer.create(**profile_data)\n card = customer.sources.data[0]\n except (\n stripe.error.CardError, stripe.error.InvalidRequestError,\n stripe.error.AuthenticationError, stripe.error.APIConnectionError,\n stripe.error.StripeError\n ), exc:\n raise UserError(exc.json_body['error']['message'])\n\n return self.create_profile(\n card.id,\n stripe_customer_id=customer.id\n )", "def __init__(self, bank_account_no: str, bank_name: str,\n bank_balance: float, budget_manager: BudgetManager):\n self.bank_account_no = bank_account_no\n self.bank_name = bank_name\n self.bank_balance = bank_balance\n self.transactions = []\n self.budget_manager = budget_manager\n self._locked = False", "def add_customer(customer_id, name, lastname, home_address, phone_number, email_address, status,\n credit_limit):\n init_database()\n try:\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n active_status=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n logging.info('New customer, ID %s, added successfully.', customer_id)\n return True\n except peewee.IntegrityError as exc:\n logging.error('Error creating new customer with ID %s: %s.', customer_id, exc)\n return False\n finally:\n database.close()", "def create_card(conn, card):\n sql = ''' INSERT INTO card(id,number,pin,balance)\n VALUES(?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, card)\n conn.commit()\n return cur.lastrowid", "def token_for_customer(self, token, user):\n kwargs = dict(card=token,\n description='Poold user: %s' % user.id,\n email=user.email)\n try:\n stripe_user = _Customer.create(api_key=self.api_key, **kwargs)\n msg = 'New Stripe Customer Created'\n logger.transaction(msg, **kwargs)\n except stripe.StripeError, e:\n self._handle_error(e, user, kwargs)\n except Exception, e: # Catch any other error and log, then re-raise\n msg = 'An unknown error occurred while creating a new Stripe Customer.'\n data = dict(error_type=type(e).__class__,\n error_message=e.message)\n logger.error(msg, data=data, **kwargs)\n raise\n\n return stripe_user.id", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def __init__(self, owner, initial_balance=0.0):\n Account.count += 1\n self.owner = owner\n self.account_number = '%sXY-%s-%08d' % (Account.division,\n Account.branch, Account.count)\n self.balance = initial_balance", "def get_new_customer() -> Customer:\r\n print(\"\\n-- PERSONAL INFORMATION --\")\r\n print(\"To start an order you must provide the following details.\\n\")\r\n\r\n print(\"- NAME -\")\r\n first_name = get_valid_input(\"Please type your FIRST NAME: \", validate_name)\r\n last_name = get_valid_input(\"Please type your LAST NAME: \", validate_name)\r\n\r\n print(\"\\n- CONTACT -\")\r\n email = get_valid_input(\"Please type your EMAIL address: \", validate_email)\r\n phone = get_valid_input(\"Please type your PHONE NUMBER: \", validate_phone).replace(\"-\",\"\").replace(\"(\", \"\").replace(\")\", \"\")\r\n\r\n print(\"\\n- ADDRESS -\")\r\n print(\"Please type your ADDRESS using the following form.\")\r\n print(\"HOUSE # Street Name, City, State/Province, ZIP/Postal Code\")\r\n print(\"EXAMPLE: 700 Pennsylvania Avenue NW, Washington, DC, 20408\")\r\n\r\n address = get_valid_input(\"ADDRESS: \", validate_address)\r\n\r\n customer = Customer(last_name, first_name, email, phone, address)\r\n return customer", "def bank():\n\n bank = Bank.objects.create(name='Random Bank')\n return bank", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def __init__(self, initial_amount = 0):\n self.balance = initial_amount", "def __init__(self, first_name, second_name, gender, account_type):\n self.first_name = first_name\n self.second_name = second_name\n self.gender = gender\n self.account_type = account_type\n self.account_number = '531'+ ''.join(random.choices(string.digits, k=6)) #todo: Generate new number if it exissts in database\n self.account_password = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))\n self.account_balance = 0.0", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def add_customer(*, customer_id, name=None, lastname=None, home_address=None,\n phone_number=None, email_address=None, status=None,\n credit_limit=None):\n with DATABASE.transaction():\n try:\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=name,\n last_name=lastname,\n address=home_address,\n phone=phone_number,\n email=email_address,\n is_active=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n LOGGER.info('Database add successful: (%s, %s)', lastname, name)\n return new_customer\n except pw.IntegrityError:\n LOGGER.warning('Database add error: (%s, %s)', lastname, name)", "def get_credit_card():\r\n print(\"- PAYMENT INFORMATION -\")\r\n print(\"Please enter your credit card information. This information will NOT be saved.\\n\")\r\n card_number = input(\"Please type your CREDIT CARD NUMBER: \").strip()\r\n card_expiry= input(\"Please type the EXPIRY DATE (MM/YY): \").strip().replace(\"/\",\"\")\r\n cvv = input(\"Please type the 3 digit SECURITY CODE: \").strip()\r\n zip_code = input(\"Please type your ZIP/POSTAL CODE: \").strip()\r\n\r\n try:\r\n card = CreditCard(card_number, card_expiry, cvv, zip_code)\r\n except Exception as e:\r\n print(\"Card details INVALID, please try again. \\n\", e)\r\n return get_credit_card()\r\n\r\n return card", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n scale = string(card, 2, 'scale')\n x1_npoints = integer_or_double(card, 3, 'x1/npoints')\n if isinstance(x1_npoints, integer_types):\n npoints = x1_npoints\n assert 0 < npoints < 7, 'CBARAO npoints=%r must be 1-6' % npoints\n x1 = double(card, 4, 'x1')\n delta_x = double(card, 5, 'delta_x')\n x = np.linspace(x1, x1 + delta_x * (npoints-1), num=npoints)\n assert len(x) == npoints, x\n else:\n x = [\n x1_npoints,\n double_or_blank(card, 4, 'x2'),\n double_or_blank(card, 5, 'x3'),\n double_or_blank(card, 6, 'x4'),\n double_or_blank(card, 7, 'x5'),\n double_or_blank(card, 8, 'x6'),\n ]\n x = [xi for xi in x if xi is not None]\n assert len(card) <= 9, f'len(CBARAO card) = {len(card):d}\\ncard={card}'\n return CBARAO(eid, scale, x, comment=comment)", "def create_bank_account(cls, user_type: UserType) -> BankAccount:\n bank_account_no = input('Enter bank account number: ')\n bank_name = input('Enter bank name: ')\n bank_balance = -1\n while bank_balance < 0:\n bank_balance = float(input('Enter bank balance: '))\n if bank_balance < 0:\n print('Bank balance must be greater than or equal to 0! Please'\n ' enter again!')\n budget_manager = BudgetCreator.create_budget_manager()\n return cls._user_type_mapper[user_type](\n bank_account_no,\n bank_name,\n bank_balance,\n budget_manager,\n )", "def add_cc_details(self, address: BillingAddress, credit_card: CreditCard):\n if self.add_billing_address.is_displayed:\n self.add_billing_address.click()\n if self.edit_billing_address.is_displayed:\n self.edit_billing_address.click()\n self.customer_form.wait_displayed()\n self.customer_form.add(address)\n self.cc_form.add(credit_card)\n self.customer_form.save.click()\n if credit_card.sca:\n self.otp_tmp.wait_displayed()\n self.otp_form.complete_auth()\n self.browser.wait_for_element(\"//*[normalize-space(.)='Credit card number']\", timeout=20)", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def __init__(self, balance=0):\n self.balance = balance", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n scale = double(card, 3, 'scale')\n N = [double_or_blank(card, 4, 'N1', 0.0),\n double_or_blank(card, 5, 'N2', 0.0),\n double_or_blank(card, 6, 'N3', 0.0)]\n\n nodes = fields(integer_or_string, card, 'node', i=9, j=len(card))\n return ACCEL1(sid, scale, N, nodes, cid=cid, comment=comment)", "def __init__(self):\n self.__account_number = self.__generate_acct_number()\n self.__account_pin = self.__generate_pin()\n self.card_number = BankAccount.BIN + self.__account_number \\\n + self.__calculate_checksum(BankAccount.BIN + self.__account_number)\n self.__balance = 0\n self.__display_login_info()", "def simple_transaction(\n description: str,\n debit_account: Account,\n credit_account: Account,\n amount: Decimal | int,\n author: User,\n valid_on: date | None = None,\n confirmed: bool = True,\n) -> Transaction:\n if valid_on is None:\n valid_on = session.utcnow().date()\n new_transaction = Transaction(\n description=description, author=author, valid_on=valid_on, confirmed=confirmed\n )\n new_debit_split = Split(\n amount=-amount, account=debit_account, transaction=new_transaction\n )\n new_credit_split = Split(\n amount=amount, account=credit_account, transaction=new_transaction\n )\n session.session.add_all([new_transaction, new_debit_split, new_credit_split])\n return new_transaction", "def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})", "def create_account(self, account_tree):\n\n # Couple this object to the account object in order\n # to access the request_xml methods and other account info\n account_data = dict()\n account_data['client'] = self\n\n for param in account_tree.iter('CardData'):\n name = param.get('name',\"NA\")\n if name != \"NA\":\n account_data[name] = param.text\n\n for summary_element in account_tree.iter('AccountSummaryData'):\n key = 'value' if 'value' in summary_element.attrib else 'formattedValue'\n name = summary_element.get('name',\"NA\")\n if name != \"NA\":\n account_data[name] = summary_element.attrib[key]\n\n # Extract the loyalty programmes from the XML\n # for element in account_tree.findall('LoyaltyData/RewardsData/param'):\n # name = element.attrib['label']\n # value = element.attrib['formattedValue'].replace(',', '')\n # loyalty_programme = LoyaltyProgramme(name, value)\n # self.loyalty_programmes.append(loyalty_programme)\n\n\n return CardAccount(account_data)", "def customer():\n customer = stripe.Customer.create(\n description=\"User created by pytest test_payments.py\",\n email=generate_random_email(),\n address={\"country\": \"DK\"},\n )\n yield customer\n customer.delete()", "def __init__(self,\r\n id=None,\r\n number=None,\r\n name=None,\r\n balance=None,\r\n mtype=None,\r\n status=None,\r\n customer_id=None,\r\n institution_id=None,\r\n balance_date=None,\r\n created_date=None,\r\n currency=None,\r\n institution_login_id=None,\r\n display_position=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.id = id\r\n self.number = number\r\n self.name = name\r\n self.balance = balance\r\n self.mtype = mtype\r\n self.status = status\r\n self.customer_id = customer_id\r\n self.institution_id = institution_id\r\n self.balance_date = balance_date\r\n self.created_date = created_date\r\n self.currency = currency\r\n self.institution_login_id = institution_login_id\r\n self.display_position = display_position\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def post(self):\n ctx = _request_ctx_stack.top\n current_user = ctx.user\n request_body = request.get_json()\n name = request_body.get('name')\n account_type = request_body.get('type')\n initial_balance = request_body.get('ini_bal')\n if name:\n try:\n acc_factory = AccountFactory()\n if account_type == 'credit':\n limit = request_body.get('limit')\n if limit is None:\n return response('failed', 'Please specify a credit limit for a credit account', 400)\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance,\n limit=limit\n )\n else:\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance\n )\n new_account.save()\n except IntegrityError:\n return response('failed', 'Duplicate account name', 400)\n else:\n return response_created_account(new_account, 200)\n return response('failed', 'Missing account name attribute', 400)", "def __init__(self, start: datetime.date, balance: float) -> None:\n Contract.__init__(self, start)\n self.balance = balance * (-1)", "def __add_credit_cc(self):\n log.debug(\"Displaying __add_credit_cc\")\n # Create a keyboard to be sent later\n presets = self.cfg.ccard[\"payment_presets\"]\n keyboard = [[telegram.KeyboardButton(str(self.Price(preset)))] for preset in presets]\n keyboard.append([telegram.KeyboardButton(self.loc.get(\"menu_all_cancel\"))])\n # Boolean variable to check if the user has cancelled the action\n cancelled = False\n # Loop used to continue asking if there's an error during the input\n while not cancelled:\n # Send the message and the keyboard\n self.bot.send_message(self.chat.id, self.loc.get(\"payment_cc_amount\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait until a valid amount is sent\n selection = self.__wait_for_regex(r\"([0-9]+(?:[.,][0-9]+)?|\" + self.loc.get(\"menu_all_cancel\") + r\")\",\n cancellable=True)\n # If the user cancelled the action\n if isinstance(selection, CancelSignal):\n # Exit the loop\n cancelled = True\n continue\n # Convert the amount to an integer\n value = self.Price(selection)\n # Ensure the amount is within the range\n if value > self.Price(self.cfg.ccard[\"max_amount\"]):\n self.bot.send_message(self.chat.id,\n self.loc.get(\"error_payment_amount_over_max\",\n max_amount=self.Price(self.cfg.ccard[\"max_amount\"])))\n continue\n elif value < self.Price(self.cfg.ccard[\"min_amount\"]):\n self.bot.send_message(self.chat.id,\n self.loc.get(\"error_payment_amount_under_min\",\n min_amount=self.Price(self.cfg.ccard[\"min_amount\"])))\n continue\n break\n # If the user cancelled the action...\n else:\n # Exit the function\n return\n # Issue the payment invoice\n self.__make_payment(amount=value)", "def __init__(self, acName, openingBalance, initialOverdraft):\n \n super().__init__(acName, openingBalance)\n \n self.overdraftLimit = initialOverdraft\n # defines overdraft boolean as true, regardless of initial overdraft, therefore overdraft is an option for a PremiumAccount upon account creation\n self.overdraft = True\n \n print(self)\n self.issueNewCard()\n print(\"\\n{self.name}'s account is now active.\".format(self=self))", "def generate_bank_account(user_data: dict) -> BankAccount:\n bank_account = BankAccount(user_data[\"bank_account_number\"],\n user_data[\"bank_name\"],\n user_data[\"bank_balance\"])\n\n return bank_account", "def test_create_payment_profile(self):\n self.cim.create_payment_profile(\n customer_profile_id=u'300',\n customer_type=u'individual',\n card_number=u'42222222222',\n expiration_date=u'2009-10'\n )", "def CreateCustomer(Person):\n\t\t\tif Person.AddrCitytownNrID:\n\t\t\t\tcitytown = model.AddressCityTown.get(Person.AddrCitytownNrID)\n\t\t\t\tAddressLabel = '%s\\n%s\\n%s, %s\\n%s\\n%s' % (Person.AddrStr, citytown.Name, citytown.Block, citytown.District, citytown.State, citytown.ZipCode) \n\t\t\telse:\n\t\t\t\tAddressLabel = Person.AddrStr\n\t\t\tPersonName = ('%s %s,%s,%s' % (Person.Title, Person.NameFirst, Person.NameMiddle, Person.NameLast)).replace(',,',',').replace(',', ' ').strip()\n\t\t\tcustomer = model.InvCustomer(Name=PersonName ,CityID=Person.AddrCitytownNrID , AddressLabel=AddressLabel, CreditAmount=0.0, \\\n\t\t\t\tInventoryLocation=self.GetDefaultCustomerLocationID(), ExternalID=Person.id)\n\t\t\treturn customer", "def create_account_request(request):\n if request.method == \"POST\":\n form = NewAccountForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Creation successful.\")\n return redirect(\"home\")\n messages.error(request, \"Unsuccessful creation. Invalid information.\")\n form = NewAccountForm\n customer_list = Customer.objects.all()\n context = {'customer_list': customer_list, 'account_form': form}\n return render(request, \"accounts/account_creation.html\", context)", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n N = [double_or_blank(card, 3, 'N1', 0.0),\n double_or_blank(card, 4, 'N2', 0.0),\n double_or_blank(card, 5, 'N3', 0.0)]\n direction = string(card, 6, 'dir')\n\n i = 9\n locs = []\n vals = []\n j = 0\n nfields = len(card)\n while i < nfields:\n #raise NotImplementedError('ACCEL-line 2')\n loc = double(card, i, 'loc%i' % j)\n val = double(card, i, 'loc%i' % j)\n #print('i=%s j=%s len=%s loc=%s val=%s' % (i, j, len(card), loc, val))\n locs.append(loc)\n vals.append(val)\n j += 1\n i += 2\n return ACCEL(sid, N, direction, locs, vals, cid=cid, comment=comment)", "def card_factory(rank,suit):\n pass", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\n f\"Successfully updated customer {customer_id} credit limit\"\n )\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to update customer {customer_id}\"\n \" credit limit. {unknown_error}\"\n )\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def test_update_customer_credit(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n update_customer_credit(user_1['customer_id'], 5000.00)\r\n query = Customer.get(Customer.customer_id == user_1['customer_id'])\r\n self.assertEqual(5000.00, query.customer_limit)\r\n\r\n # Test for non-existant customer\r\n with self.assertRaises(ValueError):\r\n update_customer_credit('456879', 5000.00)\r\n\r\n # Test for non-float value inputted\r\n with self.assertRaises(TypeError):\r\n update_customer_credit(user_1['customer_id'], '$20')\r\n drop_db()", "def retrieve(customer, card_id):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, __ = http_client.get(routes.url(routes.CARD_RESOURCE, resource_id=card_id, customer_id=customer))\n return resources.Card(**response)", "def __init__(self, name: str, money: int):\r\n self.cards_stack = CardStack()\r\n self.name = name\r\n self.balance = MoneyStack(money)\r\n self.active = True\r\n self.round_money = MoneyStack(0)\r\n self.already_raised = False", "def insert_card(brand='Core', card_name='EMVVisaCredit', debit_fee=False, cashback_amount=None, zip_code=None, cvn=None, custom=None, split_tender=False):\n # TODO : match loyalty prompt text to see if it is there once, Merlin gets the sco html finalized.\n if not _is_element_present(CARD_PROCESSING[\"PINPad Image\"]):\n if not click_function_key('Pay'):\n return False\n if _is_element_present(PROMPT_BOX[\"Heading\"]):\n if not click_prompt_key('No'):\n return False\n\n payload = pinpad.insert_card(\n brand=brand,\n card_name=card_name,\n debit_fee=debit_fee,\n cashback_amount=cashback_amount,\n zip_code=zip_code,\n cvn=cvn,\n custom=custom\n )\n # Wait until transaction is fully finished.\n start_time = time.time()\n while time.time() - start_time <= pinpad_timeout: # TODO : how long do we want to wait?\n try:\n if payload['success'] and not _is_element_present(CARD_PROCESSING[\"PINPad Image\"]):\n return True\n # TODO : Has not been tested. I know this will fail as the 'Yes' and 'No'\n # button text are \"weird\". Instead of \"Yes\" it is \"\\n\\n\\nYes\"..\n # When I run into this scenario, I will fix this.\n elif \"SPLIT PAY\" in _get_text(PROMPT_BOX[\"Message\"]).upper():\n if split_tender:\n if not click_prompt_key('Yes'):\n logger.warning(\"Unable to click 'Yes' for Split Pay\")\n return False\n else:\n logger.info(\"Clicked Yes for Split Pay\")\n return True\n else:\n if not click_prompt_key('No'):\n logger.warning(\"Unable to click 'No' for Split Pay\")\n return False\n else:\n logger.info(\"Clicked No for Split Pay\")\n return True\n except:\n continue\n else:\n logger.warning(f\"Unable to pay with {card_name}\")\n return False", "def deck_create_card(deck_id):\n log_request(request)\n username = request.json['username']\n sId = request.json['session_id']\n sideA = request.json['sideA']\n sideB = request.json['sideB']\n\n # verify session\n if not user.verify(username, sId):\n return jsonify({'error' : 101})\n \n dId = deck.get_id(deck_id)\n \n # check that the deck exists\n if not deck.exists(dId):\n return jsonify({'error' : 300})\n\n ret = card.new(dId, sideA, sideB)\n \n return jsonify({'error' : 0})", "def main():\n# print('===== Start =====')\n# cc = CreditCard('9999999999999999') # 1 - Invalid - card type\n# print('1 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('4440') # 2 - Invalid - too short\n# print('2 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('5515460934365316') # 3 - Mastercard - Valid\n# print('3 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('6011053711075799') # 4 - Discover - Valid\n# print('4 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('379179199857686') # 5 - Amex - Valid\n# print('5 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('4929896355493470') # 6 - Visa - valid\n# print('6 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('4329876355493470') # 7 - Visa - Invalid - mod 10\n# print('7 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# cc = CreditCard('339179199857685') # 8 - Amex - Invalid - starting numbers\n# print('8 - Credit Card Number: ', cc.card_number, 'Card Type: ', cc.card_type, 'Valid: ', cc.valid)\n\n# print('===== Done =====')\n pass", "def test_add_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n test_customer = Customer.get_by_id(1)\n self.assertEqual(\"Bruce\", test_customer.name)\n self.assertEqual(\"Wayne\", test_customer.last_name)\n self.assertEqual(\"1007 Mountain Drive, Gotham\", test_customer.home_address)\n self.assertEqual(\"228-626-7699\", test_customer.phone_number)\n self.assertEqual(\"b_wayne@gotham.net\", test_customer.email)\n self.assertEqual(True, test_customer.status)\n self.assertEqual(200000.00, test_customer.credit_limit)", "def credit_card_payment(self, card, order, user):\n with transaction.atomic():\n payment_txn = Transaction.objects.create(gateway=self.gateway,\n order=order,\n description='Transaction for order #%s' % order.id,\n status=Transaction.STATUS_PROCESSING,\n currency=order.currency.code,\n amount=order.charge_amount,\n updated_by=unicode(user),\n created_by=unicode(user))\n try:\n charge = stripe.Charge.create(\n amount=int(order.charge_amount * 100), # 100 cents to charge $1.00\n currency=order.currency.code.lower(),\n description='Payment for order #%s' % (order.id),\n card={\n 'number': card['number'],\n 'name': card['name'],\n 'exp_month': card['expire_month'],\n 'exp_year': card['expire_year'],\n 'cvc': card['cvv2']\n })\n\n with transaction.atomic():\n # Saving only few necessary fields for refunding\n payment_txn.status = Transaction.STATUS_APPROVED\n payment_txn.add_param('id', unicode(charge.id), user)\n payment_txn.add_param('created', unicode(charge.created), user)\n payment_txn.add_param('amount', unicode(charge.amount), user)\n payment_txn.add_param('card_id', unicode(charge.card.id), user)\n payment_txn.add_param('card_last4', unicode(charge.card.last4), user)\n payment_txn.add_param('card_country', unicode(charge.card.country), user)\n payment_txn.add_param('card_brand', unicode(charge.card.brand), user)\n payment_txn.save()\n\n order.payment_status = Order.PAYMENT_PAID\n order.updated_by = unicode(user)\n order.save()\n\n except stripe.error.CardError as e:\n # The card has been declined\n body = e.json_body\n error = body['error']\n logger.warning('Credit Card has been declined (transaction_id: %s)' % payment_txn.id, extra=error)\n\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.error_message = error['message']\n payment_txn.save()\n\n raise DoorstepError(error['message'])\n except Exception as e:\n logger.error('Failed to process Credit Card (transaction_id: %s)' % payment_txn.id)\n logger.exception(e)\n\n raise DoorstepError('We failed to process your Credit Card at the moment, please try again later!')", "def create_customer(email=None, name=None, user_type='customer'):\n if user_type == 'charity':\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n else:\n stripe.api_key = Config.STRIPE_SECRET_KEY\n if email and name:\n customer = stripe.Customer.create(email=email, name=name)\n else:\n customer = stripe.Customer.create()\n return customer.id", "def create_new_banks():\n\n\tcity = request.form.get('bankCity', '')\n\tname = request.form.get('bankName', '')\n\taddress = request.form.get('bankAddress', '')\n\tinfo = dict(city=city, name=name, address=address)\n\t# print(info)\n\tbank = Bank(city, name, address)\n\tres = bank.save()\n\t# print('res=%d' % res)\n\treturn send_result(info, res, status=\"True\")", "def generate_customer(self, start_of_month):\n customer_rates = np.random.multivariate_normal(\n mean=self.log_means, cov=self.behave_cov\n )\n customer_rates = self.exp_fun(customer_rates)\n customer_rates = np.maximum(customer_rates - 0.667, 0.333)\n new_customer = Customer(\n customer_rates, channel_name=self.version, start_of_month=start_of_month\n )\n # print(customer_rates)\n return new_customer", "def charge(customer):\n payment_method = generate_payment_method(customer[\"email\"])\n payment_intent = stripe.PaymentIntent.create(\n amount=80000,\n currency=\"dkk\",\n automatic_payment_methods={\"enabled\": True},\n customer=customer,\n description=\"Payment intent created by pytest test_payments.py\",\n payment_method=payment_method,\n )\n result = payment_intent.confirm(\n return_url=\"https://test.dknog.dk/returnurl\"\n )\n assert \"charges\" in result and len(result[\"charges\"][\"data\"]) == 1\n return result[\"charges\"][\"data\"][0]", "def define_card(card):\n try:\n value = define_card_value(card[0])\n color = define_card_color(card[1])\n return Card(value, color)\n except AttributeError:\n pass", "def __init__(self, id=None, customer_id=None, provider_code=None, provider_name=None, accounts=None, holder_info=None, client_name=None): # noqa: E501 # noqa: E501\n self._id = None\n self._customer_id = None\n self._provider_code = None\n self._provider_name = None\n self._accounts = None\n self._holder_info = None\n self._client_name = None\n self.discriminator = None\n self.id = id\n self.customer_id = customer_id\n self.provider_code = provider_code\n self.provider_name = provider_name\n self.accounts = accounts\n self.holder_info = holder_info\n self.client_name = client_name", "def generate_customer(self):\n customer_rates = np.random.multivariate_normal(\n mean=self.behave_means, cov=self.behave_cov\n )\n customer_rates = customer_rates.clip(\n min=self.min_rate\n ) # clip : no negative rates!\n new_customer = Customer(customer_rates)\n # print(customer_rates)\n return new_customer", "def __init__(self,owner,balance):\r\n self.owner = owner\r\n self.balance = balance\r\n #print(f\"Account owner: {self.owner} \\nAccount Balance: {self.balance}\")\r\n print(\"{}'s balance is {}\".format(self.owner,self.balance))", "def __init__(self):\n self.account_balance = 0\n self.amount = 0", "def createCard(self,id,name):\n card = Card(id,name)\n self.cards[id] = card\n print('Created Card:'+id)", "def makebank(self, amount):\n if self.__returned and not self.__made:\n self.__amount = amount\n self.__made = True", "def create_budget(client, customer_id):\n # Retrieves the campaign budget service.\n campaign_budget_service = client.get_service(\"CampaignBudgetService\")\n # Retrieves a new campaign budget operation object.\n campaign_budget_operation = client.get_type(\"CampaignBudgetOperation\")\n # Creates a campaign budget.\n campaign_budget = campaign_budget_operation.create\n campaign_budget.name = f\"Interplanetary Cruise #{uuid4()}\"\n campaign_budget.amount_micros = 50000000\n campaign_budget.delivery_method = (\n client.enums.BudgetDeliveryMethodEnum.STANDARD\n )\n # An App campaign cannot use a shared campaign budget.\n # explicitly_shared must be set to false.\n campaign_budget.explicitly_shared = False\n\n # Submits the campaign budget operation to add the campaign budget.\n response = campaign_budget_service.mutate_campaign_budgets(\n customer_id=customer_id, operations=[campaign_budget_operation]\n )\n resource_name = response.results[0].resource_name\n print(f'Created campaign budget with resource_name: \"{resource_name}\"')\n return resource_name", "def __init__(self, acName, openingBalance):\n\n print(\"\\nOpening new account...\")\n self.balance = float(openingBalance)\n self.name = str(acName)\n BasicAccount.acNum += 1 # increment acNum by 1 for account number serialisation\n self.acNum = BasicAccount.acNum\n \n # if a BasicAccount is being created, try block is executed\n try:\n print(self)\n self.issueNewCard()\n print(\"\\n{self.name}'s account is now active.\".format(self=self))\n # AttributeError occurs when PremiumAccount is being created because cannot print(self) from super().__init__ method before all parameters of PremiumAccount have been assigned to self\n except AttributeError:\n return", "def test_client_bank_account_create(self):\n pass", "def update_customer_credit(customer_id, credit_limit):\n customer = search_customer(customer_id)\n if customer is None:\n raise ValueError(f'Could not find customer for update with id '\n f'{customer_id}.')\n customer.credit_limit = credit_limit\n customer.save()", "def reg_stripe_custom_account(data):\n stripe.api_key = Config.STRIPE_SECRET_KEY\n _file = upload_file('matthew.jpg')\n try:\n account = stripe.Account.create(\n country='AU',\n type='custom',\n requested_capabilities=['card_payments', 'transfers'],\n email=data['email'],\n business_type='individual',\n default_currency='AUD',\n individual={\n 'first_name': 'Matthew Peter',\n 'last_name': 'Hogan',\n 'email': data['email'],\n 'phone': '+61480028895',\n 'address': {\n 'city': 'Sydney',\n 'state': 'New South Wales',\n 'line1': '123 Fake st.',\n 'line2': '',\n 'postal_code': '2000'\n },\n 'dob': {\n 'day': '26',\n 'month': '07',\n 'year': '1956'\n },\n 'verification': {\n 'document': {\n 'front': _file\n },\n 'additional_document': {\n 'front': _file\n }\n }\n },\n business_profile={\n 'mcc': '8099',\n 'url': 'mcc-org-web-dev.appelloproject.xyz'\n },\n tos_acceptance={\n 'date': int(datetime.now().timestamp()),\n 'ip': '3.222.227.234'\n },\n external_account={\n 'object': 'bank_account',\n 'country': 'AU',\n 'currency': 'AUD',\n 'routing_number': data['bank_bsb'].replace('-', '').replace(' ', ''),\n 'account_number': data['bank_account'],\n 'default_for_currency': True\n },\n settings={\n 'payouts': {\n 'schedule': {\n 'delay_days': 2,\n 'interval': 'weekly',\n 'weekly_anchor': 'tuesday'\n },\n }\n }\n )\n # import time\n # time.sleep(120)\n # success, result = add_external_card(account.id, data)\n return True, account\n # stripe.Account.delete(account.id)\n # return False, result\n except Exception as e:\n if hasattr(e, 'param') and e.param in bank_params_bind:\n return False, {bank_params_bind[e.param]: [e.args[0]]}\n return False, {'bank': ['Invalid bank details.']}", "def test_newCustomer(self):\n\t\tdashboardPage = DashboardPage(self.driver)\n\t\tdashboardPage.goToOnboard()\n\n\n\t\tdashboardPage.createCustomer(USER_NAME, S3FOLDER)\n\t\tdashboardPage.goToCustomerList()\n\t\tdashboardPage.sortRecentCustomer()\n\n\t\tinitialId = dashboardPage.getId()\n\t\teditPage = dashboardPage.goToEditPage() \n\t\tcheckId, checkName, checkS3Folder, maxSize, panoMaxSize, checkBox = editPage.getParameters()\n\n\n\t\tself.assertEqual(initialId, checkId)\n\t\tself.assertEqual(checkName, USER_NAME)\n\t\tself.assertEqual(checkS3Folder, S3FOLDER)\n\t\tself.assertEqual(maxSize, MAX_SIZE)\n\t\tself.assertEqual(panoMaxSize, PANO_MAX_SIZE)\n\t\tself.assertEqual(CHECK_BOX, checkBox)", "def card_balance(self, card_balance):\n\n self._card_balance = card_balance", "def get_customer(self) -> djstripe.models.Customer:\n if self.customer_id:\n return self.customer\n\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n customer = stripe.Customer.create(name=name, email=email)\n self.customer = djstripe.models.Customer.sync_from_stripe_data(customer)\n except Exception:\n logger.exception(\"Error creating customer on Stripe\")\n else:\n self.customer = djstripe.models.Customer.objects.create(\n id=shortuuid.uuid(), name=name, email=email\n )\n\n self.save()\n return self.customer" ]
[ "0.74948144", "0.73887783", "0.72569084", "0.7231468", "0.68172675", "0.6624415", "0.65744996", "0.6494921", "0.6418747", "0.6410303", "0.63047266", "0.6296109", "0.6277576", "0.62600785", "0.62094873", "0.6208367", "0.61989254", "0.61363536", "0.61312497", "0.61093074", "0.60620254", "0.60525846", "0.60508394", "0.60404766", "0.60349244", "0.60296065", "0.601951", "0.59977704", "0.5947822", "0.5923609", "0.58862424", "0.58733004", "0.583525", "0.5830374", "0.58215374", "0.57945615", "0.57931983", "0.5779887", "0.5775941", "0.57522106", "0.5723288", "0.57178193", "0.57055724", "0.5703757", "0.56774795", "0.5677379", "0.56681114", "0.56281465", "0.56230474", "0.5604055", "0.5602421", "0.5597292", "0.55891854", "0.5587409", "0.5579453", "0.55765575", "0.5535964", "0.55297285", "0.550021", "0.5497476", "0.547593", "0.5459832", "0.54587555", "0.5454069", "0.5446957", "0.5442872", "0.54397845", "0.5427976", "0.541488", "0.54027903", "0.540099", "0.5389017", "0.5387995", "0.53744507", "0.5371574", "0.53685266", "0.53659475", "0.5360448", "0.5357828", "0.535587", "0.5354699", "0.5340048", "0.53333354", "0.5327996", "0.53251386", "0.5318258", "0.53138095", "0.53105384", "0.53040296", "0.53008884", "0.53003144", "0.52891415", "0.5285648", "0.52804255", "0.52621377", "0.5261568", "0.52604526", "0.52474785", "0.5247313", "0.52376276" ]
0.7120257
4
Return name of the customer
def get_customer(self): return self._customer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def customer_name(self):\n return self._customer_name", "def customer(self):\n return self.__customer", "def getCustomer(self):\n return self.base.get(\"customer\", [])", "def __str__(self):\n\n return self.customer.first_name + \" \" + self.payment_name", "def getCustomer(self):\n return self._Customer", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def __repr__(self):\n\n return '<Customer customer_id={} first_name={}>'.format(\n self.customer_id, self.first_name)", "def __repr__(self):\n\n return f\"<Customer: {self.first_name}, {self.last_name}\"", "def customer_id(self) -> str:\n return self._customer_id", "def store_customer(self, name):\n pass", "def customer(self):\n return Customer(self._dict.get('customer'))", "def customer_name(self, customer_name):\n self._customer_name = customer_name", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def test_get_customer_by_name(self):\n test_customer = self._create_customers(\"Alex\")\n test_customer.create()\n test_customer = self._create_customers(\"Sally\")\n test_customer.create()\n test_customer = self._create_customers(\"John\")\n test_customer.create()\n resp = self.app.get(\"/customers?name={}\".format(\"John\"))\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"name\"], test_customer.name)", "def get_name(self):\n\n return \"Sawyer McLane\"", "def get_current_customer(self):\n def _random_string():\n random_string = u''.join(random.choice(\n string.ascii_uppercase + string.ascii_uppercase)\n for _ in range(20))\n return random_string\n\n id = _random_string()\n owner_id = _random_string()\n\n current_customer = {\n u'can_edit_matches': u'0',\n u'can_read_public_ip_list': u'0',\n u'can_upload_vcl': u'1',\n u'updated_at': u'2014-11-03T23:37:44+00:00',\n u'has_config_panel': u'1',\n u'has_improved_ssl_config': False,\n u'id': id,\n u'has_historical_stats': u'1',\n u'has_openstack_logging': u'0',\n u'can_configure_wordpress': u'0',\n u'has_improved_logging': u'1',\n u'readonly': '',\n u'ip_whitelist': u'0.0.0.0/0',\n u'owner_id': owner_id,\n u'phone_number': u'770-123-1749',\n u'postal_address': None,\n u'billing_ref': None,\n u'can_reset_passwords': True,\n u'has_improved_security': u'1',\n u'stripe_account': None,\n u'name': u'Poppy - Test',\n u'created_at': u'2014-11-03T23:37:43+00:00',\n u'can_stream_syslog': u'1',\n u'pricing_plan': u'developer',\n u'billing_contact_id': None,\n u'has_streaming': u'1'}\n return current_customer", "def account_name(self):\n return self.civic_no_city()", "def get_name():\r\n name = input(\"What is the customer's name?: \")\r\n\r\n return name", "def search_for_customer(self, name):\n customers_list = self.get_customers()\n return next((customer for customer in customers_list if customer.get('name') == name), {'name': None, 'parent':None, 'active': None, 'link': None })", "def get_name(self):\n return self.card_name", "def customer_email(customer):\n return customer.get(\"email\")", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"alex@jr.com\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def get_name() -> str:", "def get_customer(self):\n try:\n cursor = self.db.cursor()\n cursor.execute(\"SELECT * FROM costumers WHERE dni=?\", (self.dni,))\n return cursor.fetchall()\n except:\n print(\"Error\")", "def display_name(self) -> str:\n return self.requester.username", "def customer(self, customer_id=None):\r\n return customers.Customer(self, customer_id)", "def print_customers(self):\n self.current_time = self.get_time()\n return f'Supermarket(\"{self.customers}\", \"{self.current_time}\")'", "def table_info(self):\n for customer in self.customers:\n print(customer.get_name())", "def __str__(self) -> str:\n return self.customer.name + ' arrives at ' + str(self.timestamp)", "def customer_id(self):\n return self._customer_id", "def getCustomerAccount(self):\n return self._CustomerAccount", "def getCustomerAccount(self):\n return self._CustomerAccount", "def return_customer(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_customer, login, password, customer_name, phone, email, perm\n FROM Customers\n WHERE id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchone()", "def get_name():\n\n return character['Name']", "def retrieveCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('retrieve_customer', params)", "def search_for_customer(f_name: str, l_name: str):\n return cr.search_for_customer(f_name=f_name, l_name=l_name)", "def getCustomer(self):\n if self.__orderinfo is None:\n return False\n else:\n self.__customer.getCustomer(self.__orderinfo['customerID'])", "def get_name() -> str:\n pass", "def next_customer(self) -> Optional[str]:\n if len(self.priority_customer) > 0:\n return self.priority_customer.pop(0)\n elif len(self.normal_customer) > 0:\n return self.normal_customer.pop(0)\n else:\n return None", "def name(self) -> str:\n return self.user.name", "def get_full_name(self):\n return self.name + \" \" + self.email", "def customer_email(self):\n return self._customer_email", "def get_name(self): \r\n return self.name", "def get_client_name(self, obj):\n\t\treturn obj.client.name", "def get_name(self) -> str:\r\n return self.name", "def get_name():", "def get_full_name(self):\n\t\treturn self.email", "def get_new_customer() -> Customer:\r\n print(\"\\n-- PERSONAL INFORMATION --\")\r\n print(\"To start an order you must provide the following details.\\n\")\r\n\r\n print(\"- NAME -\")\r\n first_name = get_valid_input(\"Please type your FIRST NAME: \", validate_name)\r\n last_name = get_valid_input(\"Please type your LAST NAME: \", validate_name)\r\n\r\n print(\"\\n- CONTACT -\")\r\n email = get_valid_input(\"Please type your EMAIL address: \", validate_email)\r\n phone = get_valid_input(\"Please type your PHONE NUMBER: \", validate_phone).replace(\"-\",\"\").replace(\"(\", \"\").replace(\")\", \"\")\r\n\r\n print(\"\\n- ADDRESS -\")\r\n print(\"Please type your ADDRESS using the following form.\")\r\n print(\"HOUSE # Street Name, City, State/Province, ZIP/Postal Code\")\r\n print(\"EXAMPLE: 700 Pennsylvania Avenue NW, Washington, DC, 20408\")\r\n\r\n address = get_valid_input(\"ADDRESS: \", validate_address)\r\n\r\n customer = Customer(last_name, first_name, email, phone, address)\r\n return customer", "def _cname(self,account_id):\n company = self.pool.get('account.account').browse(self.cr, self.uid, account_id).company_id\n self.caddress = self._cadd(company)\n return company.name", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def get_name(self):\n return self.name", "def get_name(self):\n\n return self.name", "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(\"Alex\")\n logging.debug(test_customer)\n test_customer.create() \n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_customer.name)", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def GetCustomer(Person):\n\t\t\tcustomers = model.InvCustomer.select(model.InvCustomer.q.ExternalID==Person.id)\n\t\t\tif customers.count() == 0:\n\t\t\t\treturn CreateCustomer(Person)\n\t\t\telse:\n\t\t\t\treturn customers[0]", "def get(self, customer_id):\n customer = get_a_customer(customer_id)\n if not customer:\n api.abort(404)\n else:\n return customer", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def get_name(self):\r\n return self.name", "def name(self, cname: str)->str:\n return self.like(cname, mx=1)[0]['cname']", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def get_customer(self) -> djstripe.models.Customer:\n if self.customer_id:\n return self.customer\n\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n customer = stripe.Customer.create(name=name, email=email)\n self.customer = djstripe.models.Customer.sync_from_stripe_data(customer)\n except Exception:\n logger.exception(\"Error creating customer on Stripe\")\n else:\n self.customer = djstripe.models.Customer.objects.create(\n id=shortuuid.uuid(), name=name, email=email\n )\n\n self.save()\n return self.customer", "def _get_name(self):\n return self.name", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_coordinated_car_name(self):\n return self.coordinated_car_name", "def name(self):\n return \"{} {}\".format(self._clientname, self._name)", "def get_name(self) -> str:\n pass", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_full_name(self):\n\n return self.name", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def displayname(self):\n return self.email", "def get_customer(connection, customer_id):\n connection.command_path = \"customer/{0}\".format(customer_id)\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n body = res.content\n if res.status_code > 210:\n return\n return customers.parse_customer(body)", "def get_real_name(self):\n return self.get_display_name()", "def name(self):\n return f\"{self.client_name} {self._name}\"", "def name(self):\n return f\"{self.client_name} {self._name}\"", "def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'" ]
[ "0.8773322", "0.72585183", "0.72349936", "0.715536", "0.7111342", "0.7096749", "0.684566", "0.68219835", "0.6775382", "0.67625487", "0.67317456", "0.6717967", "0.6713337", "0.6651998", "0.6633621", "0.66315216", "0.66025484", "0.6575484", "0.65680176", "0.6505166", "0.6465378", "0.64554167", "0.640338", "0.63755095", "0.63688946", "0.6364894", "0.6360344", "0.63435906", "0.63201183", "0.63017035", "0.62962466", "0.6279019", "0.6279019", "0.6262093", "0.62542075", "0.62445647", "0.6228432", "0.6223182", "0.62225443", "0.62212783", "0.621389", "0.6190445", "0.6182184", "0.6174367", "0.6170532", "0.61675215", "0.6165873", "0.6162177", "0.61618793", "0.615253", "0.6151044", "0.6151044", "0.6146406", "0.6145299", "0.6144546", "0.61443514", "0.61443514", "0.61443514", "0.6135197", "0.6128521", "0.6126647", "0.61235267", "0.61033523", "0.6103101", "0.6099169", "0.6073792", "0.60695064", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065409", "0.6065055", "0.60621876", "0.60606587", "0.60580796", "0.60580796", "0.60580796", "0.60536313", "0.60503656", "0.60465026", "0.60315984", "0.6029086", "0.6025331", "0.6025331", "0.60196316" ]
0.7327407
2
Return the bank's name
def get_bank(self): return self._bank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bank_name():\r\n\r\n with open(\"config.json\") as f:\r\n config = json.loads(f.read())\r\n\r\n return config[\"BANK_NAME\"]", "def get_bank_name_by_id(bank_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank where id = '{}';\".format(bank_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def bank(self) -> str:\n return self.random_element(self.banks)", "def __str__(self):\n return f\"bank name: {self._bank_name}\\n\" \\\n f\"account_num: {self._account_num}\\n\" \\\n f\"balance: {self._balance}\"", "def get_name():\n return \"Boss\"", "def get_name() -> str:", "def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'", "def printname(bruce):", "def budget_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"budget_name\")", "def get_name(self):\n return self.card_name", "def account_name(self):\n return self.civic_no_city()", "def get_name():", "def get_name():\n\n return character['Name']", "def get_name() -> str:\n pass", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def displayName(self):\n\t\treturn self.tr(\"Get Drainage Basins\")", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def __repr__(self) -> str:\n return f\"Bank({self.balance}, {self.bet})\"", "def get_bank_address_by_name(bank_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from bank where name = '{}';\".format(bank_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def nome_bandeira(self):\n return self._nome_bandeira", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def get_name(self) -> str:\n pass", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self) -> str:\r\n return self.name", "def get_name(self): \r\n return self.name", "def bank_name(self, bank_name):\n\n self._bank_name = bank_name", "def get_full_name(self):\n return self.name #self is base and it hits name filed", "def bank_account_bban(self):\n return self.__bank_account_bban", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_name(self):\n return self.name", "def _get_name(self):\n return self.name", "def getName(self):\n l = []\n for wt in self.weights:\n l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))\n for bs in self.bias:\n #print(\"BS: \"+str(bs[0]))\n l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))\n l[0] = chr(ord(l[0]) - 32)\n self.name = ''.join(l)\n return self.name", "def get_name(self):\r\n return self.name", "def get_name(self):", "def get_name(self):", "def display_name(self):\n if len(self.current_hand) == 0:\n return self.nume\n else:\n card_sum = self.get_cards_sum()\n blackjack = ''\n if card_sum == 21:\n blackjack = 'BLACKJACK !!!'\n return ('%s [%s] - %d' %\n (self.nume, self.get_cards_str(), card_sum)) + blackjack", "def getName(self):\n return \"\"", "def budget_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"budget_name\")", "def name_bank(self, new_bank_name: str):\n if self.is_bank_name_valid(new_bank_name):\n self.bank_name = new_bank_name\n else:\n raise ValueError(\"Bank name is not valid!\")", "def get_name(self):\n\n return self.name", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def get_name(self) -> str:\n return self.name + \" - \\u20ac\" + str(self.price)", "def bank(self):\n return self.random_element(self.banks)", "def budget_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"budget_name\")", "def getName(self):", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def cardholder_name(self):\n return self.__cardholder_name", "def last_name():\r\n\r\n return surnames()", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def getCurrencyName(id=None):", "def wallet_name(self) -> str:\n\n return self._wallet_name", "def get_name(self):\n return self.name", "def get_name(self):\n return", "def getcurrencyfullname(self):\n return self.__currencyFullName", "def name(self) -> str:\n\t\treturn self._raw_result['name']", "def get_full_name(self):\n\n return self.name", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def get_base_name(file_name, num_banks):\n datatypeutility.check_string_variable('Calibration file name', file_name)\n\n base_name = os.path.basename(file_name).split('.')[0] + '{0}banks'.format(num_banks)\n\n return base_name", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))" ]
[ "0.7939867", "0.76192105", "0.76158357", "0.7009449", "0.68444806", "0.6841892", "0.6825423", "0.681393", "0.67982596", "0.67663383", "0.67662084", "0.6721667", "0.65943986", "0.6575735", "0.6546608", "0.6546608", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65313816", "0.64770633", "0.64770633", "0.64770633", "0.64770633", "0.64770633", "0.64347744", "0.64307714", "0.63966537", "0.6371697", "0.6371697", "0.6371697", "0.6371697", "0.6366499", "0.6339569", "0.6339569", "0.6339569", "0.63278466", "0.63202345", "0.62944347", "0.62893695", "0.6289115", "0.6283284", "0.62720263", "0.62720263", "0.62718356", "0.6270176", "0.6255728", "0.6254726", "0.62543136", "0.62543136", "0.62538624", "0.62497014", "0.62287027", "0.6224168", "0.6217236", "0.6206238", "0.6206238", "0.6206238", "0.6206238", "0.6206238", "0.62040025", "0.6195351", "0.6194105", "0.61940306", "0.6185168", "0.6185168", "0.6185168", "0.61818117", "0.6170234", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6163137", "0.6147006", "0.6143333", "0.61367804", "0.6127567", "0.61176306", "0.61069137", "0.6101847", "0.60859424", "0.60831624" ]
0.67452586
11
Return the bank's name
def get_account(self): return self._account
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bank_name():\r\n\r\n with open(\"config.json\") as f:\r\n config = json.loads(f.read())\r\n\r\n return config[\"BANK_NAME\"]", "def get_bank_name_by_id(bank_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank where id = '{}';\".format(bank_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def bank(self) -> str:\n return self.random_element(self.banks)", "def __str__(self):\n return f\"bank name: {self._bank_name}\\n\" \\\n f\"account_num: {self._account_num}\\n\" \\\n f\"balance: {self._balance}\"", "def get_name():\n return \"Boss\"", "def get_name() -> str:", "def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'", "def printname(bruce):", "def budget_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"budget_name\")", "def get_name(self):\n return self.card_name", "def account_name(self):\n return self.civic_no_city()", "def get_bank(self):\n return self._bank", "def get_name():", "def get_name():\n\n return character['Name']", "def get_name() -> str:\n pass", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def account_name(self) -> str:\n return pulumi.get(self, \"account_name\")", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def displayName(self):\n\t\treturn self.tr(\"Get Drainage Basins\")", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def __repr__(self) -> str:\n return f\"Bank({self.balance}, {self.bet})\"", "def get_bank_address_by_name(bank_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from bank where name = '{}';\".format(bank_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def nome_bandeira(self):\n return self._nome_bandeira", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def get_name(self) -> str:\n pass", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self) -> str:\r\n return self.name", "def get_name(self): \r\n return self.name", "def bank_name(self, bank_name):\n\n self._bank_name = bank_name", "def get_full_name(self):\n return self.name #self is base and it hits name filed", "def bank_account_bban(self):\n return self.__bank_account_bban", "def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_name(self):\n return self.name", "def _get_name(self):\n return self.name", "def getName(self):\n l = []\n for wt in self.weights:\n l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))\n for bs in self.bias:\n #print(\"BS: \"+str(bs[0]))\n l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))\n l[0] = chr(ord(l[0]) - 32)\n self.name = ''.join(l)\n return self.name", "def get_name(self):\r\n return self.name", "def get_name(self):", "def get_name(self):", "def display_name(self):\n if len(self.current_hand) == 0:\n return self.nume\n else:\n card_sum = self.get_cards_sum()\n blackjack = ''\n if card_sum == 21:\n blackjack = 'BLACKJACK !!!'\n return ('%s [%s] - %d' %\n (self.nume, self.get_cards_str(), card_sum)) + blackjack", "def getName(self):\n return \"\"", "def budget_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"budget_name\")", "def name_bank(self, new_bank_name: str):\n if self.is_bank_name_valid(new_bank_name):\n self.bank_name = new_bank_name\n else:\n raise ValueError(\"Bank name is not valid!\")", "def get_name(self):\n\n return self.name", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def get_name(self) -> str:\n return self.name + \" - \\u20ac\" + str(self.price)", "def bank(self):\n return self.random_element(self.banks)", "def budget_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"budget_name\")", "def getName(self):", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def cardholder_name(self):\n return self.__cardholder_name", "def last_name():\r\n\r\n return surnames()", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def getCurrencyName(id=None):", "def wallet_name(self) -> str:\n\n return self._wallet_name", "def get_name(self):\n return self.name", "def get_name(self):\n return", "def getcurrencyfullname(self):\n return self.__currencyFullName", "def name(self) -> str:\n\t\treturn self._raw_result['name']", "def get_full_name(self):\n\n return self.name", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def get_base_name(file_name, num_banks):\n datatypeutility.check_string_variable('Calibration file name', file_name)\n\n base_name = os.path.basename(file_name).split('.')[0] + '{0}banks'.format(num_banks)\n\n return base_name", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))" ]
[ "0.7939867", "0.76192105", "0.76158357", "0.7009449", "0.68444806", "0.6841892", "0.6825423", "0.681393", "0.67982596", "0.67663383", "0.67662084", "0.67452586", "0.6721667", "0.65943986", "0.6575735", "0.6546608", "0.6546608", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.65313816", "0.64770633", "0.64770633", "0.64770633", "0.64770633", "0.64770633", "0.64347744", "0.64307714", "0.63966537", "0.6371697", "0.6371697", "0.6371697", "0.6371697", "0.6366499", "0.6339569", "0.6339569", "0.6339569", "0.63278466", "0.63202345", "0.62944347", "0.62893695", "0.6289115", "0.6283284", "0.62720263", "0.62720263", "0.62718356", "0.6270176", "0.6255728", "0.6254726", "0.62543136", "0.62543136", "0.62538624", "0.62497014", "0.62287027", "0.6224168", "0.6217236", "0.6206238", "0.6206238", "0.6206238", "0.6206238", "0.6206238", "0.62040025", "0.6195351", "0.6194105", "0.61940306", "0.6185168", "0.6185168", "0.6185168", "0.61818117", "0.6170234", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6164098", "0.6163137", "0.6147006", "0.6143333", "0.61367804", "0.6127567", "0.61176306", "0.61069137", "0.6101847", "0.60859424", "0.60831624" ]
0.0
-1
Return current credit limit
def get_limit(self): return self._limit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_plan_limit(self, source):\n commitment = getattr(self.get_subscription(), 'commitment', {})\n return self.get_plan().get_price_data(source, commitment)[1]", "def get_limit(self):\n return self.limit", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def limit(self):\n return self._owner.plan", "def _get_next_limit(self):\n return self.__quota", "def get_display_plan_limit(self, source):\n commitment = getattr(self.get_subscription(), 'commitment', {})\n return self.get_plan().get_display_price_data(source, commitment)[1]", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def get_rate_limit(client):\n query = '''query {\n rateLimit {\n limit\n remaining\n resetAt\n }\n }'''\n response = client.execute(query)\n json_response = json.loads(response)\n return json_response['data']['rateLimit']", "def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)", "def read_current_limit(self):\n function_string = 'I' + self.output + '?'\n value_string = self.scpi_comm(function_string)\n try:\n value = float(value_string.replace('I' + self.output, ''))\n except ValueError:\n value = -999999\n return value", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def deposits_limit(self):\n limits = self.user.limits\n value = 0\n if limits.exists():\n value = self.user.limits.get(type=Limit.DEPOSIT).value\n return value", "def limit(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"limit\")", "def adaptive_limit(self) -> int:\n return pulumi.get(self, \"adaptive_limit\")", "def current_rate(entity, limit, duration):\n\n key = \"ratelimit:{}:{}\".format(int(time.time() / duration), entity)\n value = memcache.incr(key, initial_value=0)\n if value > limit:\n logging.info(\n \"RateLimitDenied({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n else:\n logging.info(\n \"RateLimitAllowed({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n return value", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def limit(self):\n return self._limit", "def limit(self):\n return self._limit", "def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n # TODO distinct up/down rates\n # check limiting rate for resource flow in/out, if any\n if self._rate:\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]\n delta = np.sign(amt) * min(max_rate, abs(amt))\n print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)\n return {res: delta}, meta\n return {res: amt}, meta", "def limit(self):\n if self._limit:\n return self._limit\n else: # no custom limit, go with the default\n return PublicAppPlan", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def acceleration_limit(self):\n return self._read(MX_ACCELERATION_LIMIT)", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def _get_max_expense(self):\n pass", "def calculate(self, limit):\r\n pass", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit", "def speed_limit(comp):\n return max(min(comp, SPEED_LIMIT), -1 * SPEED_LIMIT)", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def getCurrentBalance(self):\r\n return self.balance_amt", "def capture_limit(self):\n return self._capture_limit", "def calculate(self, limit):\n pass", "async def get_limit(cls, user_id, mcc_code):\n try:\n limit = await db.one(cls.SELECT_LIMIT, user_id=user_id, mcc_code=mcc_code)\n except exceptions.NoResultFound:\n LOGGER.error(\"Could not find limit by mcc code=%s for user=%s.\", mcc_code, user_id)\n raise DatabaseError\n except SQLAlchemyError as err:\n LOGGER.error(\"Failed to fetch limit by mcc code=% for user=%s. Error: %s\", mcc_code, user_id, err)\n raise DatabaseError\n\n return limit", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def _get_limit(self, req):\n try:\n limit = int(req.params.get('limit', CONF.limit_param_default))\n except ValueError:\n raise exc.HTTPBadRequest(_(\"limit param must be an integer\"))\n\n if limit < 0:\n raise exc.HTTPBadRequest(_(\"limit param must be positive\"))\n\n return min(CONF.api_limit_max, limit)", "def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))", "def get_limits(self):\n return self._get(limits.Limits)", "def timelimit(self):\n return self._timelimit", "def get_limit(self):\r\n\r\n limit = self.request_data.get('limit', self.limit)\r\n if limit is None:\r\n limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)\r\n\r\n try:\r\n limit = int(limit)\r\n except ValueError:\r\n raise BadRequest(\r\n \"Invalid limit '%s' provided. Please provide a positive integer.\" % limit)\r\n\r\n if limit < 0:\r\n raise BadRequest(\"Invalid limit '%s' provided. Please provide a positive integer >= 0.\" % limit)\r\n\r\n if self.max_limit and (not limit or limit > self.max_limit):\r\n # If it's more than the max, we're only going to return the max.\r\n # This is to prevent excessive DB (or other) load.\r\n return self.max_limit\r\n\r\n return limit", "def getRange(self, c, name):\n self.validateChannel( name )\n limits = self.d[name].limits\n return limits", "def findLimit(name):\n return Limit(Cuebot.getStub('limit').Find(\n limit_pb2.LimitFindRequest(name=name), timeout=Cuebot.Timeout).limit)", "def update_customer_credit(customer_id, credit_limit):\n LOGGER.info(\"Changing %s credit limit to %.2f\", customer_id, credit_limit)\n try:\n db_customer = Customers.get(Customers.customer_id == customer_id)\n db_customer.credit_limit = credit_limit\n db_customer.save()\n LOGGER.info(\n \"Successfully changed %s credit limit to %.2f\", customer_id, credit_limit\n )\n except DoesNotExist as e_val:\n LOGGER.warning(\"Error updating %s credit limit\", customer_id)\n LOGGER.warning(e_val)", "def update_customer_credit(customer_id, credit_limit):\n try:\n customer = cm.Customers.get(cm.Customers.customer_id == customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except cm.DoesNotExist:\n raise ValueError", "def budget(self):\n return self._budget", "def set_current_limit(self, value):\n function_string = 'I' + self.output + ' ' + str(value)\n return self.scpi_comm(function_string)", "def get_lift_limit(self) -> float:\n\n return self.send(self.cmd.GET_LIFT_LIMIT)", "def update_customer_credit(customer_id, credit_limit):\n try:\n LOGGER.info('Update credit limit for customer.')\n with DATABASE.transaction():\n a_customer = Customer.get(Customer.customer_id == customer_id)\n a_customer.credit_limit = int(credit_limit)\n a_customer.save()\n LOGGER.info('customer credit limit has updated')\n\n except Customer.DoesNotExist as error:\n LOGGER.info('Updated failed.')\n LOGGER.info('Customer with id %s not found.', customer_id)\n LOGGER.info(error)\n raise ValueError", "def getYLimit(self):\n return self._myCanvas.getYLimit()", "def get_resource_limit(resource):\n\n return _resources_allowed_dict[resource]", "def get_limit_per_second(self):\n pass", "def getXLimit(self):\n return self._myCanvas.getXLimit()", "def quota(self) -> 'outputs.CommitmentQuotaResponse':\n return pulumi.get(self, \"quota\")", "def STAND_LIMIT() -> int:\n return 15", "def get_limits(self):\n raise NotImplementedError(\"Limits are not available for Cloud Databases\")", "def update_customer_credit(customer_id, credit_limit):\n with cm.DATABASE.transaction():\n try:\n a_customer = cm.Customer.get(\n cm.Customer.customer_id == customer_id)\n a_customer.credit_limit = credit_limit\n a_customer.save()\n LOGGER.info(\"Updating customer [%s] credit limit to $%s\",\n customer_id, credit_limit)\n except pw.DoesNotExist:\n LOGGER.warning(\"Error updating credit limit for customer [%s]!\",\n customer_id)\n raise ValueError", "def getLimits(self):\n lims = [x * self.getSign() + self.getOffset() for x in (self.connection.getChannel(self.chanNamePrefix % 'low_limit').read(), \\\n self.connection.getChannel(self.chanNamePrefix % 'high_limit').read())]\n return (min(lims), max(lims))", "def get_bet_limit(self, n: int) -> int:\n return self._bet_limits[n]", "def user_get_rate_limit():\n login = demisto.getArg('login')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limit = {\n 'SubmissionWaitSeconds': demisto.get(r, 'data.user.submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(r, 'data.user.submissions-available')\n }\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limit},\n 'HumanReadable': tableToMarkdown('ThreatGrid - User Rate Limit', [rate_limit], [\n 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def tcp_acc_limit(self):\r\n return self._arm.tcp_acc_limit", "def _get_limit(self, req):\n try:\n limit = int(req.str_params.get('limit', MAX_ITEM_LIMIT))\n except ValueError:\n raise exc.HTTPBadRequest(\"limit param must be an integer\")\n\n if limit < 0:\n raise exc.HTTPBadRequest(\"limit param must be positive\")\n\n return min(MAX_ITEM_LIMIT, limit)", "def time_limit(self) -> float:\n return self._time_limit", "def limits(self):\n return self._limits", "def pwm_limit(self):\n return self._read(MX_PWM_LIMIT)", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\n f\"Successfully updated customer {customer_id} credit limit\"\n )\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to update customer {customer_id}\"\n \" credit limit. {unknown_error}\"\n )\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def upper_limit(self, parameter, bound=0.95):\n\n return self.credible_interval(parameter, interval=[bound])", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def get_high_limit(self):\n debug(\"Getting high limit...\")\n value = self.get_value(7)/10.\n if not isnan(value): info(\"High limit %r C\" % value)\n else: warn(\"High limit unreadable (old firmware?)\")\n return value", "def max_voltage_limit(self):\n return self._read(MX_MAX_VOLTAGE_LIMIT)", "def getLimits():\n return [Limit(limit) for limit in Cuebot.getStub('limit').GetAll(\n limit_pb2.LimitGetAllRequest(), timeout=Cuebot.Timeout).limits]", "def organization_get_rate_limit():\n login = demisto.getArg('adminLogin')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limits = [\n {\n 'Minutes': demisto.get(rate_limit, 'minutes'),\n 'Samples': demisto.get(rate_limit, 'samples'),\n 'SubmissionWaitSeconds': demisto.get(rate_limit, 'submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(rate_limit, 'submissions-available')\n }\n for rate_limit in demisto.get(r, 'data.organization.submission-rate-limit')\n ]\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limits},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Organization Rate Limit', rate_limits, [\n 'Minutes', 'Samples', 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def get_view_rate_limit():\n return getattr(g, '_view_rate_limit', None)", "def calculate_point_limit():\n if request.method == 'OPTIONS':\n return create_response({}, 200, '*', 'content-type, token')\n\n role, response = handle_request_token(request)\n\n if role is None:\n return response\n\n try:\n recruitments = Recruitment.query.filter_by(point_limit=None)\\\n .filter(Recruitment.end_date <= date.today())\n for recruitment in recruitments:\n candidates = CandidateRecruitment.query.filter_by(recruitment_id=recruitment.id)\\\n .order_by(CandidateRecruitment.points.desc())\n places_left = recruitment.slot_limit\n point_limit = 0\n for candidate in candidates:\n if places_left > 0 and candidate.is_paid:\n candidate.status = RecruitmentStatus.QUALIFIED\n places_left -= 1\n if places_left == 0:\n point_limit = candidate.points\n else:\n candidate.status = RecruitmentStatus.NOT_QUALIFIED\n recruitment.point_limit = point_limit\n\n db.session.commit()\n except (AttributeError, SQLAlchemyError) as exception:\n logging.error(exception, file=sys.stderr)\n return create_response({\"error\": \"Nie udało się obliczyć progów.\"}, 400, \"*\")\n\n return create_response({\"message\": \"Wyliczono progi rekrutacyjne\"}, 200, \"*\")", "def get_credit(self):\n res = self.client.get(\"/v1/credit\")\n\n try:\n return res.data[\"credit\"]\n except:\n raise ValueError(\"returned response not valid\")", "def remaining_requests(self):\n try:\n return self._get_limit('Remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def limitValue(self, value, lowerLimit, upperLimit):\n if value > upperLimit:\n return upperLimit\n elif value < lowerLimit:\n return lowerLimit\n else:\n return value", "def locked_temp_max_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_max_c\"))\r\n return kelvin_to_celsius(self._locked_temp_max)", "def max_temp(self):\n return 30", "def get_current_price(limit: int = None, attempts: int = 0):\n try:\n price = EXCHANGE.fetch_ticker(CONF.pair)['bid']\n if not price:\n LOG.warning('Price was None')\n sleep_for(1, 2)\n get_current_price(limit, attempts)\n else:\n return int(price)\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.debug('Got an error %s %s, retrying in 5 seconds...', type(error).__name__, str(error.args))\n attempts += 1\n if not limit or attempts < limit:\n sleep_for(4, 6)\n get_current_price(limit, attempts)\n else:\n return 0", "def createLimit(name, maxValue):\n return Limit(Cuebot.getStub('limit').Create(\n limit_pb2.LimitCreateRequest(name=name, max_value=maxValue), timeout=Cuebot.Timeout))", "def getAvailableBalance(self):\n\n # calculates the available balance as the sum of the account balance and the overdraft limit\n availableBalance = self.balance + self.overdraftLimit\n return availableBalance", "def comprequestsrate(self) :\n\t\ttry :\n\t\t\treturn self._comprequestsrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def get_low_limit(self):\n debug(\"Getting low limit...\")\n value = self.get_value(6)/10.\n if not isnan(value): info(\"Low limit %r C\" % value)\n else: warn(\"Low limit unreadable (old firmware?)\")\n return value", "def max_temp(self):\n return 99", "def get_current(self):\r\n with open('MonthlyRate.csv', newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n if self.choice == row['CurrencyCode']:\r\n current = row[\"Current Rate\"]\r\n csvfile.close()\r\n # Round the value to 4 d.p.\r\n current = round(float(current), 4)\r\n return current", "def _check(self):\n try:\n num = int(self.ids.res_lim.text)\n # reset negative numbers to zero\n if num <= 0:\n self.ids.res_lim.text = str(0)\n except ValueError:\n self.ids.res_lim.text = str(self.limit)\n\n return int(self.ids.res_lim.text)", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def min_voltage_limit(self):\n return self._read(MX_MIN_VOLTAGE_LIMIT)", "def rate_limit_check():\n\n data = api.rate_limit_status()\n\n user_timeline_remaining = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['remaining']\n\n followers_list_remaining = data['resources']['followers'] \\\n ['/followers/list']['remaining']\n\n rate_limit_remaining = data['resources']['application'] \\\n ['/application/rate_limit_status']['remaining']\n\n verify_credentials_remaining = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['remaining']\n\n user_timeline_reset = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['reset']\n\n followers_list_reset = data['resources']['followers'] \\\n ['/followers/list']['reset']\n\n rate_limit_reset = data['resources']['application'] \\\n ['/application/rate_limit_status']['reset']\n\n verify_credentials_reset = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['reset']\n\n return {'utrem': user_timeline_remaining,\n 'ftrem': followers_list_remaining,\n 'rlrem': rate_limit_remaining,\n 'vcrem': verify_credentials_remaining,\n 'utres': user_timeline_reset,\n 'ftres': followers_list_reset,\n 'rlres': rate_limit_reset,\n 'vcres': verify_credentials_reset}", "def get_current_rate(self):\n pass", "def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def get_max_record_limit(self):\n return self.max_record_limit", "def update_customer_credit(customer_id, credit_limit):\n try:\n with customer_db.transaction():\n customer = Customer.select().where(Customer.customer_id == customer_id).get()\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\"Updated credit limit of customer ID %s to %s\", customer_id, credit_limit)\n except Customer.DoesNotExist as e:\n logger.error(\"Error updating credit limit for customer %s: %s\", customer_id, e)\n # Absolutely could not get pylint to recognize peewee's DoesNotExist error type, so raise ValueError instead\n raise ValueError(\"NoCustomer\")", "def temperature_limit(self):\n return self._read(MX_TEMPERATURE_LIMIT)", "def max_pending(self):\n return self._max_pending", "def get_valid_expiration(requested_expiration, max_limit=None, default=None):\n if requested_expiration is None:\n return default\n try:\n rv = int(requested_expiration)\n assert rv > 0\n if max_limit:\n rv = min(rv, max_limit)\n return rv\n except (ValueError, AssertionError):\n raise UserError(\n \"Requested expiry must be a positive integer; instead got {}\".format(\n requested_expiration\n )\n )" ]
[ "0.70615613", "0.6857322", "0.68410796", "0.6831366", "0.6759597", "0.661983", "0.6546747", "0.63908046", "0.63815755", "0.6301675", "0.6296434", "0.6276246", "0.626394", "0.6231881", "0.62091506", "0.62080514", "0.6149896", "0.6149896", "0.6126138", "0.61131227", "0.6094338", "0.606758", "0.6062746", "0.60339886", "0.6025979", "0.59604186", "0.59421223", "0.59421223", "0.59421223", "0.59329826", "0.5928023", "0.5913741", "0.59124994", "0.5908298", "0.59051776", "0.59012735", "0.58849204", "0.5865021", "0.58632797", "0.58623046", "0.58583647", "0.5845893", "0.5814262", "0.5814245", "0.58140314", "0.58107483", "0.58002645", "0.5800258", "0.5794916", "0.57933956", "0.57795304", "0.57766247", "0.5759175", "0.57444507", "0.5736553", "0.5722456", "0.57188654", "0.5710958", "0.5706438", "0.5686761", "0.5671766", "0.5670719", "0.56627345", "0.56607485", "0.5637746", "0.5633437", "0.56251246", "0.56119347", "0.5608782", "0.5603629", "0.55967283", "0.5593555", "0.5584188", "0.5582939", "0.5581353", "0.5576911", "0.5576049", "0.5575716", "0.5570152", "0.5569003", "0.556811", "0.55665165", "0.55512214", "0.55390424", "0.5537059", "0.5537011", "0.5533931", "0.5522167", "0.55104715", "0.5507828", "0.55046046", "0.54975", "0.5495169", "0.54951286", "0.5492449", "0.5490134", "0.54856604", "0.5479696", "0.54776603" ]
0.6619479
7
Return the current balancr
def get_balance(self): return self._balance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BB ( self ) :\n return self.__bb", "def BB ( self ) :\n return self.__bb", "def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()", "def get_bribe(self):\r\n return self.bribe", "def get_bolsa(self):\n return self.bolsa", "def SB ( self ) :\n return self.__sb", "def SB ( self ) :\n return self.__sb", "def getB(self):\n\t\treturn self.b", "def getBL(self):\r\n return self.bL;", "def BS ( self ) :\n return self.SB", "def get_b(self):\n return self._b", "def read_acbr(self):\n return self.ACBR", "def get_branch(self):\n if self._repository:\n return self._repository.dirstate.branch()", "def bairro(self):\n return self._bairro", "def bank_account_bban(self):\n return self.__bank_account_bban", "def getC(self):\n\t\treturn self.c", "def b(self):\r\n return self.__b", "def acc_b(self):\n return self._acc_b", "def active_branch(self) -> Head:\n # reveal_type(self.head.reference) # => Reference\n return self.head.reference", "def BS ( self ) :\n return self.__bs", "def c(self):\r\n return self.__c", "def acc_b_tr(self):\r\n return self._acc_b_tr", "def get_current(self):\n return self.current", "def _get_active(self):\n return self.__active", "def _get_active(self):\n return self.__active", "def _get_active(self):\n return self.__active", "def _get_active(self):\n return self.__active", "def current(self):\n pass", "def afmetingenBL(self):\n return self._afmetingenBL.get_waarde()", "def get_bct_result():\n # Import the model module.\n from lexos.models.bct_model import BCTModel\n # Cache all the options.\n session_manager.cache_bct_option()\n session_manager.cache_analysis_option()\n # Get the bootstrap consensus tree result.\n return BCTModel().get_bootstrap_consensus_tree_plot_decoded()", "def active_brdch(self):\n return self._faux._active_brdch.copy()", "def currently(self):\n return c.Currently(self)", "def b(self):\n return self._b", "def b(self):\n return self._b", "def b(self):\n return self._b", "def b(self):\n return self._b", "def CL(self):\n return self.__CL", "def c(self):\n return self._c", "def get_br_cell(self):\n return self._br_cell", "def getCurrent(self):\n return self.__current", "def Current(self) -> str:", "def base(self):\n return self if self._base is None else self._base", "def getBeta(self):\n\t\treturn self.relativistic_beta", "def get_branch():\n command = [\"git\", \"branch\", \"--show-current\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n branch_str = proc.stdout.readline()\n return branch_str.decode(\"utf-8\").rstrip()", "def get_sb(self, Rs=None):\n return self.rc.get_bender_mot(self.rc.get_bender_pos(Rs=Rs))", "def active_branch(self):\n return self.repo.active_branch.name", "def get_bl_cell(self):\n return self._bl_cell", "def get_Pbat(self):\r\n return self.Pbat", "def get_Pbat(self):\r\n return self.Pbat", "def get_Pbat(self):\r\n return self.Pbat", "def CL(self):", "def branch(self):\n return None", "def GetBranch():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(2)\n return DEFAULT_BRANCH", "def crd(self):\r\n return self.__trajectory[0]", "def __getstate__(self):\n\t\treturn self", "def gbr_ul(self):\n return self._gbr_ul", "def current(self):\n return self._current", "def current(self):\n return self._current", "def current(self):\n return self._current", "def get_current(self):\n return self.x", "def current(cls):\n return stackless.getcurrent()", "def gbr_dl(self):\n return self._gbr_dl", "def get_bank(self):\n return self._bank", "def branch(self):\n return self._changeset.get('branch', None)", "def b(self):\n pass", "def b(self):\n pass", "def getCL(self):\r\n return self.cL;", "def bringC(self):\n endC = self.countCannibalOnEnd()\n if endC < 1:\n return None\n else:\n newStart = self.start[0:2] + str(4-endC) + self.start[3]\n newEnd = self.end[0:2] + str(endC-1) + self.end[3]\n return MissionaryState(newStart,newEnd,\"bringC\")", "def get_current(self) -> typing.Any:\n\n return self.current_obj", "def __repr__(self):\n return '<Twilio.Preview.TrustedComms.BrandedCallInstance>'", "def c(self):\n pass", "def c(self):\n pass", "def get_biGramTable(self):\n\t\treturn self._state.biGramTable", "def B(self) -> int:\n return self.params.B", "def bijector(self):\n return self._bijector", "def get_current_basis(self):\n l = len(self.basis_stack)\n return self.basis_stack[l-1]", "def get_consequent(self):\n return self.consequent", "def breand(self) -> int:\n return self._breand", "def breand(self) -> int:\n return self._breand", "def breand(self) -> int:\n return self._breand", "def getBase(self):\n return self.base", "def getBase(self):\n return self.base", "def getBase(self):\n return self.base", "def cost_b(self):\n return self._cost_b", "def acc_b_v(self):\r\n return self._acc_b_v", "def bank_account_iban(self):\n return self.__bank_account_iban", "def get_balance(self):\n\n return self.config", "def get_current(self, event=None):\n childes = self.nb.winfo_children() # return the list objects of child widgets of notebook[tab widget]\n return childes[self.nb.index('current')].winfo_children()[0]", "def get_balance(self):\n print(f\"Your current balance is: ${self.balance}\")\n return self.balance", "def tlbr(self):\n ret = self.tlwh()\n ret[2:] += ret[:2]\n return ret", "def get_cloc(self):\n return self.order_hist[-1]", "def get_current(self):\n\n # for comparison\n initial_state = self._read('CFR')\n bits = initial_state[1] & 0x03\n\n if bits == 0b11:\n divider = 1\n elif bits == 0b01:\n divider = 2\n elif bits == 0b10:\n divider = 4\n elif bits == 0b00:\n divider = 8\n\n print ('Latest divider set (i.e. currently in register):', divider)\n\n # Returns values saved in set_current\n return self.currents", "def chebi(self):\n return self._chebi", "def __get_balance(self):\n return self.__balance", "def last_bloodwork(self):\n if hasattr(self, '_prior_bloodwork'):\n #in memory lookup\n return self._prior_bloodwork\n\n #memcached lookup\n last_bw = cache.get('%s_bloodwork' % (self._id), None)\n if last_bw == None:\n #it's null, so it's a cache miss.\n #do a requery\n pass\n elif last_bw == '[##Null##]':\n return None\n else:\n self._prior_bloodwork = CBloodwork.wrap(simplejson.loads(last_bw))\n return self._prior_bloodwork\n\n\n #requery\n bw_docs = CBloodwork.view('pactcarehq/patient_bloodwork', key=self.pact_id).all()\n bw_docs = sorted(bw_docs, key=lambda x: x['test_date'])\n if len(bw_docs) > 0:\n self._prior_bloodwork = bw_docs[0]\n cache.set('%s_bloodwork' % (self._id), simplejson.dumps(bw_docs[0].to_json()))\n return bw_docs[0]\n if self.prior_bloodwork.test_date == None:\n #this is a bit hacky, it should really be null, but since this is an added on object, to PactPatient, it doesn't show up as None\n #so we need to do an explicit check for the\n cache.set('%s_bloodwork' % (self._id), '[##Null##]')\n pass\n else:\n self._prior_bloodwork = self.prior_bloodwork\n return self.prior_bloodwork\n return None", "def get_active(self):\n return self._active", "def current_branch():\n return subprocess.check_output('git branch --show-current'.split()).decode().strip()", "def __init__(self):\n curafl = 1\n return", "def state(self):\n return self", "def get_reference(self):\t\t\n\t\treturn self._reference", "def getBackref(self):\n return self._backref" ]
[ "0.689818", "0.689818", "0.6664285", "0.6529631", "0.64657265", "0.63538027", "0.63538027", "0.63459975", "0.6342656", "0.6154038", "0.6110734", "0.60647565", "0.6034983", "0.59981585", "0.59864616", "0.5886264", "0.5817673", "0.58071756", "0.57708025", "0.5752032", "0.57341397", "0.5690663", "0.56486505", "0.5648222", "0.5648222", "0.5648222", "0.5648222", "0.56438917", "0.5625631", "0.56093305", "0.5594789", "0.5593757", "0.55872506", "0.55872506", "0.55872506", "0.55872506", "0.5581998", "0.55814576", "0.55726945", "0.55449945", "0.5535921", "0.5501463", "0.54995567", "0.54813576", "0.5481177", "0.5474511", "0.54612494", "0.5458898", "0.5458898", "0.5458898", "0.54585755", "0.54550046", "0.5453223", "0.5450948", "0.54456234", "0.5442614", "0.54277104", "0.54277104", "0.54277104", "0.5381494", "0.5380787", "0.5369103", "0.5360481", "0.53554624", "0.5350637", "0.5350637", "0.53461695", "0.5328758", "0.53239346", "0.53188705", "0.5315584", "0.5315584", "0.5303382", "0.5300304", "0.5295455", "0.5295228", "0.52900386", "0.52823216", "0.52823216", "0.52823216", "0.52726835", "0.52726835", "0.52726835", "0.527142", "0.5268576", "0.5262691", "0.525853", "0.52556336", "0.52538115", "0.52515835", "0.5244915", "0.5229949", "0.5229707", "0.52258515", "0.5223584", "0.5222279", "0.52219117", "0.52171266", "0.5209139", "0.5207939", "0.51973206" ]
0.0
-1
Charge given price to the card, assuming sufficient card limit Return True if charge was processed;False if charge was denied
def charge(self,price): if price + self._balance> self._limit: return False else: self._balance+=price return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError('Price must be numeric')\n if price + self._balance > self._limit: # if charge would exceed limit\n return False # cannot accept charge\n self._balance += price\n return True", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def price_check(cash, price, shares):\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def charge(self, other):\n if self.flag:\n self.credit += other\n return \"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(\n other, self.credit)\n else:\n return \"Sorry, your card has expired.\"", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def check_price(self, price_diff):\n chance = exp(price_diff / self.T)\n\n if price_diff < 0 and not chance > random():\n return True\n \n return False", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def use(self):\n if self.flag:\n if self.credit < self.price_of_trip:\n return \"Your credit is not enough, please increase your credit\"\n else:\n self.credit -= self.price_of_trip\n return \"Done\"\n else:\n return \"Sorry, your card has expired.\"", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def charge_customer(customer, amount):\n stripe.api_key = Config.STRIPE_SECRET_KEY\n\n if not customer.cards:\n return False # This situation is impossible, but anyway\n try:\n charge = stripe.Charge.create(\n amount=int(amount * 100),\n currency='AUD',\n customer=customer.stripe_customer_id,\n source=customer.cards[-1].stripe_card_id,\n description='Payment for donations.'\n )\n except Exception as e:\n print(e.args[0])\n return False\n\n if charge.status == 'succeeded':\n return True\n return False", "def recharge(self, amount):\n self.action.recharge(self.cardUid, amount)\n self.start()", "def can_accept_credit(self, value):\n return value >= 0", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def price(self, value):\n self.price_ = max(value, 0)\n\n if self.price_ == 0:\n self.mark_as_paid()", "def cc_charge(self, battery_instance=None, inverter_instance=None, start_timestamp=None, timeout_seconds = 0):\n #TODO\n #1. Place inverter in charge mode.\n #2. Every 2 seconds check for battery data and timeout.\n inverter_instance.charge()\n log_test_case.info('Issued charge mode to inverter on port %s.', inverter_instance.com_port)\n while (time.time()-start_timestamp)>timeout_seconds:\n if battery_instance.pack_variables['is_not_safe_level_1']:\n log_test_case.info('Reached level 1 limits during charging on battery on port: %s.', battery_instance.com_port)\n break\n \n time.sleep(2)\n \n inverter_instance.rest()\n battery_instance.clear_level_1_error_flag()\n log_test_case.info('CC charge mode on inverter on port %s finished.', inverter_instance.com_port)\n return True", "def charge_credit_card(amount,save_to_cim=False):\n\n # Create a merchantAuthenticationType object with authentication details\n # retrieved from the constants file\n merchantAuth = apicontractsv1.merchantAuthenticationType()\n merchantAuth.name = CONSTANTS.apiLoginId\n merchantAuth.transactionKey = CONSTANTS.transactionKey\n\n\n # Create the payment data for a credit card\n creditCard = apicontractsv1.creditCardType()\n card_types = ['visa','discover','mastercard','jcb']\n creditCard.cardNumber = fake.credit_card_number(card_type=random.choice(card_types))\n creditCard.expirationDate = fake.credit_card_expire()\n creditCard.cardCode = fake.credit_card_security_code()\n\n # Add the payment data to a paymentType object\n payment = apicontractsv1.paymentType()\n payment.creditCard = creditCard\n\n # Create order information\n order = apicontractsv1.orderType()\n order.invoiceNumber = str(random.randint(1000,3000))\n order.description = fake.bs()\n\n # Set the customer's Bill To address\n customerAddress = apicontractsv1.customerAddressType()\n customerAddress.firstName = fake.first_name()\n customerAddress.lastName = fake.last_name()\n customerAddress.company = fake.bs()\n customerAddress.address = fake.street_address()\n customerAddress.city = fake.city()\n customerAddress.state = fake.address().split()[-1].split()[0]\n customerAddress.zip = fake.postalcode_in_state()\n customerAddress.country = fake.country()\n customerAddress.phoneNumber = fake.phone_number()\n\n\n # Set the customer's identifying information\n customerData = apicontractsv1.customerDataType()\n customerData.type = \"individual\"\n customerData.id = fake.upc_e()\n customerData.email = fake.email()\n\n # Add values for transaction settings\n duplicateWindowSetting = apicontractsv1.settingType()\n duplicateWindowSetting.settingName = \"duplicateWindow\"\n duplicateWindowSetting.settingValue = \"600\"\n settings = apicontractsv1.ArrayOfSetting()\n settings.setting.append(duplicateWindowSetting)\n\n # setup individual line items\n random_num = random.randint(2000,5000)\n line_item_1 = apicontractsv1.lineItemType()\n line_item_1.itemId = str(random.randint(1,9))\n line_item_1.name = \"first\"\n line_item_1.description = fake.catch_phrase()\n line_item_1.quantity = str(random.randint(1,9))\n line_item_1.unitPrice = \"12.95\"\n line_item_2 = apicontractsv1.lineItemType()\n line_item_2.itemId = str(random.randint(1,9))\n line_item_2.name = \"second\"\n line_item_2.description = fake.catch_phrase()\n line_item_2.quantity = str(random.randint(1,9))\n line_item_2.unitPrice = \"7.95\"\n line_item_3 = apicontractsv1.lineItemType()\n line_item_3.itemId = str(random.randint(1,9))\n line_item_3.name = \"third\"\n line_item_3.description = fake.catch_phrase()\n line_item_3.quantity = str(random.randint(1,9))\n line_item_3.unitPrice = \"100.00\"\n\n\n # build the array of line items\n line_items = apicontractsv1.ArrayOfLineItem()\n line_items.lineItem.append(line_item_1)\n line_items.lineItem.append(line_item_2)\n line_items.lineItem.append(line_item_3)\n\n # Create a transactionRequestType object and add the previous objects to it.\n transactionrequest = apicontractsv1.transactionRequestType()\n transactionrequest.transactionType = \"authCaptureTransaction\"\n transactionrequest.amount = amount\n transactionrequest.payment = payment\n transactionrequest.order = order\n transactionrequest.billTo = customerAddress\n transactionrequest.customer = customerData\n transactionrequest.transactionSettings = settings\n transactionrequest.lineItems = line_items\n\n # Assemble the complete transaction request\n createtransactionrequest = apicontractsv1.createTransactionRequest()\n createtransactionrequest.merchantAuthentication = merchantAuth\n createtransactionrequest.refId = \"1234-3432\"\n createtransactionrequest.transactionRequest = transactionrequest\n # Create the controller\n createtransactioncontroller = createTransactionController(\n createtransactionrequest)\n createtransactioncontroller.execute()\n\n response = createtransactioncontroller.getresponse()\n\n if response is not None:\n # Check to see if the API request was successfully received and acted upon\n if response.messages.resultCode == \"Ok\":\n # Since the API request was successful, look for a transaction response\n # and parse it to display the results of authorizing the card\n if hasattr(response.transactionResponse, 'messages') is True:\n print(\n 'Successfully created transaction with Transaction ID: %s'\n % response.transactionResponse.transId)\n if save_to_cim:\n # create CIM profile\n cim_create.append(response.transactionResponse.transId)\n create_customer_profile_from_transaction(str(cim_create[0]))\n print('Transaction Response Code: %s' %\n response.transactionResponse.responseCode)\n print('Message Code: %s' %\n response.transactionResponse.messages.message[0].code)\n print('Description: %s' % response.transactionResponse.\n messages.message[0].description)\n else:\n print('Failed Transaction.')\n if hasattr(response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(response.transactionResponse.\n errors.error[0].errorCode))\n print(\n 'Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n # Or, print errors if the API request wasn't successful\n else:\n print('Failed Transaction.')\n if hasattr(response, 'transactionResponse') is True and hasattr(\n response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(\n response.transactionResponse.errors.error[0].errorCode))\n print('Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n else:\n print('Error Code: %s' %\n response.messages.message[0]['code'].text)\n print('Error message: %s' %\n response.messages.message[0]['text'].text)\n else:\n print('Null Response.')\n\n return response", "def amount_to_charge(opportunity):\n amount = float(opportunity.amount)\n if opportunity.agreed_to_pay_fees:\n total = (amount + 0.30) / (1 - 0.022)\n else:\n total = amount\n return quantize(total)", "def limit_chase(self, oq, max_chase=3.0, failsafe=False, double_check=False):\n ret = self.send_order(oq=oq, ot='limit', price=None)\n order_id = ret[0]['orderID']\n last_price = ret[0]['price']\n side = ret[0]['side']\n max_chase_buy = float(last_price) + float(max_chase)\n max_chase_sell = float(last_price) - float(max_chase)\n avg = last_price\n time.sleep(1)\n self.logger.info(\n f'Chasing {side} order {order_id}, order_price: {avg}, last_price: {last_price}, '\n f'current price: {last_price} max chase: {max_chase_buy}')\n count = 0\n while True:\n count += 1\n o = self.ws_orders(order_id)\n if o:\n if side == 'Buy':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['buy']\n else:\n _price = self.ws.get_ticker()['buy']\n if float(_price) <= float(max_chase_buy):\n if float(last_price) < float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price}')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n elif side == 'Sell':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['sell']\n else:\n _price = self.ws.get_ticker()['sell']\n if float(_price) >= float(max_chase_sell):\n if float(last_price) > float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price} ')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n else:\n time.sleep(0.5)\n if o:\n self.logger.info(f'{side} Order manually Canceled!')\n self.logger.info('Order Filled')\n break", "def charge_charity_plan(charity):\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n price = charity.plan.price\n cards = charity.cards\n if price > 0:\n if len(cards) > 0:\n try:\n payment = stripe.Charge.create(\n amount=int(price * 100),\n currency='AUD',\n # source=charity.stripe_custom_account_id,\n customer=charity.stripe_customer_id,\n source=cards[-1].stripe_card_id,\n description=f'Payment for {charity.plan.name} plan.'\n )\n except stripe.error.StripeError as e:\n current_app.logger.error(f\"charge_charity_plan. Stripe error: {e.json_body['error']['message']}\")\n return False\n if payment.status == 'succeeded':\n charity.last_plan_payment = datetime.now()\n db.session.commit()\n return True\n return False\n charity.last_plan_payment = datetime.now()\n db.session.commit()\n return True", "def is_buy(order):\n return(copysign(1, order.amount)>=0)", "def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def test_credit_card():\n cc = chap2.CreditCard('John Doe', '1st Bank', '5391 0375 9387 5309', 1000)\n assert cc.get_balance() == 0\n cc.charge(300)\n assert cc.get_balance() == 300\n cc.charge(15.50)\n assert cc.get_balance() == 315.50\n cc.make_payment(250)\n assert cc.get_balance() == (315.50 - 250)\n cc.make_payment(45.50)\n assert cc.get_balance() == 20\n with pytest.raises(TypeError):\n cc.charge('$5.00')\n with pytest.raises(TypeError):\n cc.make_payment('$10.00')\n assert not cc.charge(5000)", "def buy_one_cent_less_than_bid_or_50(self, bid_price):\n if bid_price:\n buying_price = self.buy_fixed_quantity_less_than_bid_price(\n bid_price=bid_price,\n fixed_quantity=0.01)\n else:\n buying_price = self.buy_fixed_price(50)\n return buying_price", "def risk_control(self, bid, ask, last_trade):\n # Are we long or short?\n pos_cost = abs(self.portfoolio['Cost'])\n side = self.sign(self.portfoolio['Cost'])\n\n if side > 0:\n # Sell if long\n price = bid\n else:\n # Cover if short\n price = ask\n\n shares = (pos_cost * 0.05) / last_trade * side * -1\n shares = int(round(shares, -2))\n print('Reducing position, {} shares, price: {}'.format(shares, price))\n self.trade(shares, price, last_trade)", "def swipe_loyalty(brand='Core', card_name='Loyalty'):\n if not click_function_key('Pay'):\n return False\n\n if _is_element_present(PROMPT_BOX['Heading']):\n if not click_prompt_key('Yes'):\n return False\n \n payload = pinpad.swipe_loyalty(\n brand=brand,\n card_name=card_name\n )\n start_time = time.time()\n while time.time() - start_time <= pinpad_timeout:\n try:\n if payload['success'] and _is_element_present(CARD_PROCESSING[\"PINPad Image\"]):\n return True\n except:\n continue\n else:\n logger.warning(f\"Unable to swipe loyalty card {card_name}\")\n return False\n else:\n return False", "def charge(self):\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tpayment_value = loan.borrower._pay(loan.value)\n\t\t\tloan.value -= payment_value\n\t\t\tif loan.value <= 0.0:\n\t\t\t\tloan.borrower.debt_link = None\n\t\t\t\tdel self.loans[l]\n\t\t\tself.stock += payment_value", "def check_cap(org, amount):\n from django.db.models import Sum, Q\n\n if amount < 0:\n query = Q(favor__lt=0)\n else:\n query = Q(favor__gt=0)\n total = abs(\n org.reputations.filter(query).aggregate(sum=Sum(\"favor\"))[\"sum\"] or 0\n ) + abs(amount)\n mod = org.social_modifier * 5\n if total > mod:\n noun = \"favor\" if amount > 0 else \"disfavor\"\n raise CommandError(\n \"That would bring your total %s to %s, and you can only spend %s.\"\n % (noun, total, mod)\n )", "def spend_cash(self, num):\r\n self.cash -= num\r\n return self.cash > num", "def execute(self, **params):\n self.__requireParams(params, ['amount', 'currency'])\n\n return self.__req('execute_charge', params)", "def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")", "def credit_deliverer():\n return True", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def check_transaction(menu, drink, resources):\r\n customer_money = process_coins()\r\n drink_cost = menu[drink]['cost']\r\n if customer_money < drink_cost:\r\n print(\"Sorry that's not enough money.Money refunded\")\r\n return False\r\n else:\r\n if customer_money > drink_cost:\r\n change = round((customer_money - drink_cost), 2)\r\n print(f\"Here is your ${change} in change\")\r\n resources['Money'] += drink_cost\r\n return True", "def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems", "def pricevalidator(self, price):\n if type(price) != int:\n API.abort(400, error_messages[15]['str_price'])\n\n return True", "def check_for_offer(self, bid, commodity, limit, actual, quantity, price):\n if bid:\n if len(self.trades[\"buys\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"buys\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"buys\"][commodity])\n\n # if total < limit:\n # #PLACE MORE BIDS.\n return total\n\n else:\n if len(self.trades[\"asks\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"asks\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"asks\"][commodity])\n #\n # if total < limit:\n # #PLACE MORE asks.\n # return total\n # if total < limit:\n # #PLACE MORE asks.\n return total # - limit", "def charge(self):\n self.battery.charge_level = 100\n print(\"The vehicle is fully charged.\")", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def Charge(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def check_sufficient_funds(self, amount):\n balance = self.get_balance_amount()\n if(balance < amount):\n return False\n return True", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def recharge(self, charge: int) -> str:\n if self.mp + charge > MAX_MP:\n charge = MAX_MP - self.mp\n new_mp = self.mp + charge\n self.mp = new_mp\n return f'{self.name} recharged for {charge} MP!'", "def confirm_payment(informed_card: dict, method: str, value: float) -> dict:\n\n with open(\"Utils/payments/cards.json\", \"r\") as read_file:\n data = json.load(read_file)\n\n for card in data['cards']:\n if card['number'] == informed_card['number'] and \\\n card['month'] == informed_card['month'] and \\\n card['year'] == informed_card['year'] and \\\n card['cvc'] == informed_card['cvc']:\n\n if method == 'credit':\n if card['credit_used'] + value <= card['credit_limit']:\n card['credit_used'] = card['credit_used'] + value\n else:\n return dict(status=False, message=\"insufficient credit balance\")\n\n elif method == 'debit':\n if card['debit'] >= value:\n card['debit'] = card['debit'] - value\n else:\n return dict(status=False, message=\"insufficient debit balance\")\n\n with open('Utils/payments/cards.json', 'w') as outfile:\n json.dump(data, outfile)\n return dict(status=True, message=\"ok\")\n\n return dict(status=False, message=\"operation not allowed\")", "def charge(self, charge):\n self._charge = charge", "def is_transaction_successful(money_recieved, drink_cost):\n change = 0\n if money_recieved < drink_cost:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False\n elif money_recieved > drink_cost:\n change = round(money_recieved - drink_cost, 2)\n # We know there is enough change. So lets add it to the machine\n # Need to add the Global keyword to let this function know that \"profit\"\n # is a Global variable, not local\n global profit\n profit += drink_cost\n # Check for Change...\n if change > 0:\n print(f\"Here is ${change} dollars in change.\")\n return True", "def pay_card(brand='Core', card_name='Visa', debit_fee=False, cashback_amount=None, zip_code=None, cvn=None, custom=None, split_tender=False):\n # TODO : match loyalty prompt text to see if it is there once, Merlin gets the sco html finalized.\n if not _is_element_present(CARD_PROCESSING['PINPad Image']):\n if not click_function_key('Pay'):\n return False\n if _is_element_present(PROMPT_BOX[\"Heading\"]):\n if not click_prompt_key('No'):\n return False\n\n payload = pinpad.swipe_card(\n brand=brand,\n card_name=card_name,\n debit_fee=debit_fee,\n cashback_amount=cashback_amount,\n zip_code=zip_code,\n cvn=cvn,\n custom=custom\n )\n # Wait until transaction is fully finished.\n start_time = time.time()\n while time.time() - start_time <= pinpad_timeout: # TODO : how long do we want to wait?\n try:\n if payload['success'] and not _is_element_present(CARD_PROCESSING['PINPad Image']):\n # Answer receipt prompt if it appears.\n if _is_element_present(PROMPT_BOX[\"Heading\"]):\n if not click_prompt_key('No'):\n return False\n break\n # TODO : Has not been tested. I know this will fail as the 'Yes' and 'No'\n # button text are \"weird\". Instead of \"Yes\" it is \"\\n\\n\\nYes\"..\n # When I run into this scenario, I will fix this.\n elif \"SPLIT PAY\" in _get_text(PROMPT_BOX['Message']).upper():\n if split_tender:\n if not click_prompt_key('Yes'): \n logger.warning(\"Unable to click 'Yes' for Split Pay\") \n return False\n else:\n logger.info(\"Clicked Yes for Split Pay\")\n return True\n else:\n if not click_prompt_key('No'):\n logger.warning(\"Unable to click 'No' for Split Pay\")\n return False\n else:\n logger.info(\"Clicked No for Split Pay\")\n return True\n except:\n continue\n else:\n logger.warning(f\"Unable to pay with {card_name}\")\n return False\n\n msg = read_message_box()\n if msg:\n logger.warning(f\"Got an error after payment: {msg}\")\n return False\n\n return True", "def is_buy(self):\n return(copysign(1, self.volume)>0)", "def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums", "def make_card_wish(self, symbol, player):\n if player == self.current_player:\n if symbol in \"s c h d\":\n self.wait_for_card_wish = False\n self.card_wished = symbol\n self.choose_next_player()\n return True\n return False", "def allowed(self, user, amount):\n return True", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "async def _pay_money(ctx, user : discord.Member, amount : int):\n if amount<0:\n await bot.reply(\"You can't pay someone a negative amount!\")\n elif user==ctx.message.author:\n await bot.reply(\"You can't pay yourself!\")\n else:\n await transfer(bot, ctx.message.author, user, amount)", "def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True", "def charge(self, other):\n self.credit += other\n print(\"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(other,\n self.credit))", "def check_cents_per_point(wallet):\n which_card = input(\"Which card are you checking the CPP for? (Enter \"\n \"in Issuer,Card Name) \")\n card_parts = list(which_card.split(\",\"))\n found = False\n if len(card_parts) == 2:\n for card in wallet.get_cards():\n if card.get_issuer() == card_parts[0]:\n if card.get_card_name() == card_parts[1]:\n found = card_parts\n found.append(card.get_cents_per_point())\n break\n return found", "def charge(customer):\n payment_method = generate_payment_method(customer[\"email\"])\n payment_intent = stripe.PaymentIntent.create(\n amount=80000,\n currency=\"dkk\",\n automatic_payment_methods={\"enabled\": True},\n customer=customer,\n description=\"Payment intent created by pytest test_payments.py\",\n payment_method=payment_method,\n )\n result = payment_intent.confirm(\n return_url=\"https://test.dknog.dk/returnurl\"\n )\n assert \"charges\" in result and len(result[\"charges\"][\"data\"]) == 1\n return result[\"charges\"][\"data\"][0]", "def charge(self):\n return self._charge", "def check_card(self, card, issued_amount=None):\n\n # MODIF: issued_amount is only used for CardIssues as we need to know how much was issued in the bundle.\n # CONVENTION: voters' weight is the balance at the start block of current epoch\n\n debug = self.debug_donations\n\n if card.type == \"CardIssue\":\n if debug: print(\"PARSER: Checking validity of CardIssue\", card.txid, \"based on txid:\", card.donation_txid)\n\n # First step: Look for a matching DonationTransaction.\n dtx_id = card.donation_txid\n\n # check 1: filter out duplicates (less expensive, so done first)\n if (card.sender, dtx_id) in self.used_issuance_tuples:\n if debug: print(\"PARSER: Ignoring CardIssue: Duplicate or already processed part of CardBundle.\")\n return False\n\n # Check if it is a proposer or a donation issuance.\n # Proposers provide the ref_txid of their proposal transaction.\n # If this TX is in proposal_txes and they are the sender of the card and fulfill all requirements,\n # then the token is granted to them at their proposal address.\n\n if (dtx_id in self.valid_proposals) and self.validate_proposer_issuance(dtx_id, issued_amount, card.sender, card.blocknum):\n if debug: print(\"PARSER: DT CardIssue (Proposer):\", card.txid)\n\n elif self.validate_donation_issuance(dtx_id, issued_amount, card.sender):\n if debug: print(\"PARSER: DT CardIssue (Donation):\", card.txid)\n\n else:\n if debug: print(\"PARSER: Ignoring CardIssue: Invalid data.\")\n return False\n\n self.used_issuance_tuples.append((card.sender, dtx_id))\n return True\n\n else:\n\n if debug: print(\"PARSER: DT CardTransfer:\", card.txid)\n return True", "def check_funds(self, amount):\n if amount > self.get_balance():\n return False\n else:\n return True", "def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def round(self, price) -> (int, bool):\n self.user_count += 1\n return self.current_phase.get_crp_sample(class_idx=self.current_user_class, price=price), len(self.sampled_users) == 0", "def modify_price(self, price):\n if price is not None and self.is_cancellable:\n log.info(\"bo#%s: modify price (pending) order \" % self.ticket)\n not_implemented_error(\"Can't modify price for now (only for pending orders which wasn't triggered\")\n order_id = self.order_id_master\n cancel_order(order_id) # DANGEROUS! it should be atomic operation!\n #style = self.style\n #if self.is_limit:\n #elif self.is_stop:\n #elif self.is_stop_limit\n #order_id = order(self.symbol, self.volume, style=new_style))\n \n else:\n return", "def itemIsCredit(self, track, level):\n if (track == PETSOS):\n return 0\n return level < self.creditLevel", "def safe_to_dance(self):\n #check for all fil/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"not safe to dance!\")\n return False\n else:\n self.turn_by_deg(90)\n #after all checks have been done, we deduce its safe to dance\n print(\"Dance on!\")\n return True", "def charge(self, charge):\n\n self._charge = charge", "def discount_card(subtotal):\n\n if \"gold\" in CARDS:\n return gold_card(subtotal) #This calculates the 5%\n\n elif \"silver\" in CARDS:\n return silver_card(subtotal) #This calculates the 2%\n\n elif \"gold\" in CARDS and \"silver\" in CARDS:\n return gold_card(subtotal)\n\n else:\n return 0 #Whitout discount", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()", "def limit_buy(self, order_id, quantity, price):\n Library.functions.limit_buy(self._book, order_id, quantity, price)", "def add_card(self, card):\n # This basically means \"the previous card was the 2nd so you can't double/split anymore\"\n if self.can_double and self.get_card_value(self.first_card) != self.count:\n self.can_double = False\n self.can_split = False\n # This is the second card and it's the same as the first, you can now split!\n if self.can_double and self.first_card == card:\n self.can_split = True\n if self.first_card == 0:\n self.first_card = card\n if card == 1:\n self.soft = True\n self.count += self.get_card_value(card)\n # Unsoften if you have an Ace worth 11 and it would make you bust\n if self.count > 21 and self.soft:\n self.soft = False\n self.count -= 10", "def can_bet(self, amount: int) -> bool:\n if isinstance(amount, int):\n if self.chips_amount - amount >= 0:\n return True\n else:\n raise ValueError(\"The 'amount' param must be an instance of int, got\"+\n str(type(amount)))\n\n return False", "def credits_purchase(request):\n if request.method==\"POST\":\n \"\"\" POST request can come from card form submit or from initial\n credits amount selection page \"\"\"\n try:\n credits_amount = int(request.POST.get('credits_amount', None))\n except TypeError:\n messages.error(request, \"Amount was invalid\")\n return redirect('add_credits')\n if credits_amount or credits_amount == 0:\n if (credits_amount % 10) is not 0:\n # Credits amount wasn't a multiple of 10, so user bypassed\n # JavaScript form validation\n messages.error(\n request, \n \"Credits can only be added in multiples of 10\"\n )\n return redirect('add_credits')\n credits_cost = \\\n settings.COST_PER_TEN_CREDITS * (credits_amount / 10)\n if \"stripeToken\" in request.POST:\n # POST request came from card form submit\n try:\n customer = stripe.Charge.create(\n amount = int(credits_cost*100),\n currency = \"EUR\",\n description = request.user.email,\n source = request.POST['stripeToken'],\n )\n except stripe.error.CardError:\n messages.error(request, \"Your card was declined!\")\n return redirect('credits_purchase')\n if customer.paid:\n # All is good, so add the chosen amount of credits\n profile = request.user.profile\n profile.credits += credits_amount\n profile.save()\n return render(request, 'credits_added.html')\n else:\n messages.error(request, \"Unable to take payment\")\n return redirect('credits_purchase')\n else:\n \"\"\" POST request came from initial credits selection page\n so now render Stripe card form \"\"\"\n return render(request, \"credits_checkout.html\", {\n 'publishable': settings.STRIPE_PUBLISHABLE,\n 'ten_credit_cost': settings.COST_PER_TEN_CREDITS,\n 'credits_amount': credits_amount,\n 'total': credits_cost,\n })\n else:\n messages.error(request, \"No amounts of credits selected\")\n return redirect('add_credits')\n else:\n return redirect('add_credits')", "def click_card(self):\n time.sleep(3)\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, self.CSS_CASHONDELIVERY1)))\n # result = self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).is_displayed()\n # if result == True:\n self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).click()\n time.sleep(0.4)\n self.driver.find_element_by_xpath(self.XPATH_CVV).send_keys(\"111\")\n # self.driver.find_element_by_css_selector('.cvv-con > ion-input >input.text-input').send_keys('111')\n time.sleep(0.2) \n # else:\n # print(\"else condition\")\n result = self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).is_displayed()\n if result == True:\n self.driver.find_element_by_css_selector(self.CSS_CASHONDELIVERY1).click()\n time.sleep(0.4)\n self.driver.find_element_by_css_selector(self.CSS_CVV).click()\n #self.driver.find_element_by_css_selector('#payment-mode-cvv-14 > input').send_keys('111')\n self.driver.find_element_by_css_selector('.cvv-con > ion-input >input.text-input').send_keys('111')\n time.sleep(0.2)\n \n else:\n print()\n except:\n print(\"not clicked\")\n time.sleep(3)", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def charge_if_available(self, num_bytes, current_time=None):\n if self._get_time_to_sleep(num_bytes, current_time) > 0.0:\n return False\n else:\n self.__bucket_contents -= num_bytes\n return True", "def purchase(self, tile):\n if tile.value <= self._cash:\n self._cash -= tile.value\n tile.owner = self\n self._board.acceptNotification(notification.PNTilePurchase(self, tile))\n else:\n self._board.acceptNotification(notification.PNInsufficientFunds(self, tile.value - self._cash))", "def check_symbol_price(self, data):\n if self.input_price < float(data.get(\"price\")):\n logging.info(\"Symbol price is higher than the input provided by the user.\")\n logging.info(\"Input Price :- \")\n logging.info(str(self.input_price))\n logging.info(\"Symbol Price :- \")\n logging.info(str(data.get(\"price\")))\n logging.info(\"+++++++++++++++++++++++++++++\")", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def needRestock(self):\n #TODO check if the quantity<threshold and return true if it is\n #we'll set for now the threshold at *five* items\n #so we need to check if self.quantity is less than five.\n threshold = 5\n if self.quantity < threshold:\n return True\n else:\n return False", "def has_money(self) -> bool: \n \n return self.money > 0.0", "def pay_for_item(self, item):\n while self.amount < item.price:\n paid_amount = float(input(f\"Pay €{round((item.price - self.amount), 2)} : \"))\n if paid_amount <= 0:\n custom_log(\"Invalid amount entered.\", MSG_ERROR)\n continue\n self.amount = self.amount + paid_amount", "def duty_free(price: int, discount: int, holiday_cost: int) -> int:\n if holiday_cost == 500:\n return holiday_cost\n\n discount /= 100\n price = holiday_cost / (price * discount)\n price = int(price)\n return price", "def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())", "def _pay(self, asked_value):\n\t\tpayment_value = min(self.stock, asked_value)\n\t\tself.stock -= payment_value\n\t\treturn payment_value" ]
[ "0.8322736", "0.8296711", "0.7829243", "0.6570418", "0.6408199", "0.612218", "0.61032504", "0.60590565", "0.6037275", "0.60188115", "0.5950383", "0.59281176", "0.59213865", "0.5874455", "0.58660394", "0.5860026", "0.5825693", "0.5783239", "0.57501924", "0.5738126", "0.57249475", "0.5714982", "0.5703037", "0.56896293", "0.56810635", "0.5623873", "0.56220025", "0.5594615", "0.5582282", "0.5580885", "0.5545751", "0.55449736", "0.55382043", "0.55198413", "0.54918355", "0.549151", "0.5488236", "0.5487333", "0.54857635", "0.545583", "0.54461193", "0.5435799", "0.5434937", "0.54297006", "0.5428678", "0.542549", "0.5417946", "0.54044825", "0.53867364", "0.53857875", "0.53740036", "0.5373338", "0.5372704", "0.5369145", "0.5369145", "0.5360172", "0.53583455", "0.5352807", "0.53342736", "0.53315544", "0.5316556", "0.53157246", "0.5304477", "0.52918875", "0.52768403", "0.5275868", "0.52645123", "0.5262346", "0.5255936", "0.52468634", "0.5245573", "0.5243692", "0.5241647", "0.52406865", "0.52388346", "0.52307093", "0.5226323", "0.5224761", "0.5218758", "0.52114356", "0.5210847", "0.5195691", "0.5192333", "0.5188755", "0.51827633", "0.51819855", "0.51768374", "0.51724446", "0.51540965", "0.514462", "0.51428473", "0.5138976", "0.5132789", "0.51289296", "0.5124555", "0.512327", "0.51203907", "0.51144785", "0.5097708", "0.5090944" ]
0.8432956
0
Process customer payment that reduces balance
def make_payment(self,amount): self._balance-=amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_payment(self, payment):\n self._balance -= payment", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError('Amount must be numeric')\n self._balance -= amount", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError()\n if amount < 0: raise ValueError()\n self._balance -= amount", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def deposit(account, amount):\n pass", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def OperateAccount(self, user_id, amount_money):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n old_balance = user_data.GetAmountMoney()\n new_balance = int(old_balance) + int(amount_money)\n if new_balance >= 0:\n user_data.SetAmountMoney(new_balance)\n self.db_manager.UpdateData(user_id, user_data.GetAmountMoney())\n return JsonSerializer.SerializeObject(user_data)\n else:\n return \"{\\\"ERROR\\\":\\\"Operation denied insufficient money\\\"}\"", "def cash_deposit(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to Deposit:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n old_balance = MY_MEMBER[i].balance\n MY_MEMBER[i].balance += amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Depositing Cash******\"\n print\"your Old Bank balance: %r\" % old_balance\n print\"Amount Deposited: %r\" % amount\n print\"your New Bank balance: %r\" % new_balance\n print\"*************************\"\n what_to_do(name, bank_id, password)", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def process_month(self):\n if self.balance > 0:\n # if positive balance, convert APR to monthly multiplicative factor\n monthly_factor = pow(1 + self.apr, 1 / 12)\n self.balance *= monthly_factor", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def __init__(self, customer, bank, account, limit, bank_bal = 0):\n\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = bank_bal # To store customer spendings.", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, amount):\n self.balance += amount", "def handle_balance_update(self, form):\n\n # Update balances of old and new accounts\n account_object: Account = form.cleaned_data.get('account', None)\n if account_object:\n if account_object == self.data_previous_account:\n \"\"\"\n Case 1: New account is same as previous account\n \"\"\"\n # Find difference between new and old balances, and deduct the difference from account\n balance_diff = form.cleaned_data.get('amount', None) - self.data_previous_amount\n account_object.balance -= balance_diff\n account_object.save()\n else:\n \"\"\"\n Case 2: New account is not the same as previous account\n \"\"\"\n # Add old amount to the previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()\n\n # Remove new amount from new account\n account_object.balance -= self.object.amount\n account_object.save()\n elif self.data_previous_account:\n \"\"\"\n Case 3:\n Previous account exists but was removed from expense; \n no account listed on submitted form\n \"\"\"\n # Add old amount to previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()", "def final_step_customer(Xaction_type, Xcredit_type, Xcredit_file, Xusers_account):\n ####################################################\n if Xaction_type == \"deposit\" and Xcredit_type == \"savings\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"deposit\" and Xcredit_type == \"current\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"savings\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_sav_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"current\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_cur_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"balance\" and Xcredit_type == \"savings\":\n print(\"savings total is #\" + f'{users_account.get_sav_bal():,}')\n\n if Xaction_type == \"balance\" and Xcredit_type == \"current\":\n print(\"current total is #\" + f'{users_account.get_cur_bal():,}')\n\n if Xaction_type == \"history\" and Xcredit_type == \"savings\":\n #print necessary information from the file\n print_history(Xcredit_file)\n\n if Xaction_type == \"history\" and Xcredit_type == \"current\":\n #print necessary information from the file\n print_history(Xcredit_file)", "def withdraw(self, amount):\n self.balance -= amount", "def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def update_balance(self):\n if self.calculated_balance < 0:\n raise AccountBalanceError('calculated_balance on account {} is below 0'.format(self))\n self.balance = self.calculated_balance\n self.save()", "def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12", "def Credit(self):\n self.Deposit()\n self.balance += self.amount\n print \"balance credited\"\n print \" Total balance =\",self.balance\n return self.balance", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def _balance_update(self):\n return_rate = self.df.loc[self.currentStep, \"return_Close\"]\n self.buy_amount += return_rate * self.buy_amount\n self.sell_amount -= return_rate * self.sell_amount", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def finalize(state, coinbase):\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "async def balance(self, ctx: commands.Context, user: discord.Member = None):\r\n if user is None:\r\n user = ctx.author\r\n\r\n bal = await bank.get_balance(user)\r\n currency = await bank.get_currency_name(ctx.guild)\r\n max_bal = await bank.get_max_balance(ctx.guild)\r\n if bal > max_bal:\r\n bal = max_bal\r\n await bank.set_balance(user, bal)\r\n await ctx.send(\r\n _(\"{user}'s balance is {num} {currency}\").format(\r\n user=user.display_name, num=humanize_number(bal), currency=currency\r\n )\r\n )", "def withdraw(account, amount):\n pass", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(600), # debited (600) + adjustment (0) = invoiced (600)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def awaiting_payment(self):", "def deposit(self, account_number: int, deposit: float): \n self._accounts[account_number][1] += deposit", "def payment_post_delete(**kwargs):\n payment = kwargs['instance']\n bill = payment.bill\n bill.update_cached_totals()", "def getRemainingBalance(monthlyPayment, balance, monthlyInterestRate):\r\n numberOfMOnthNeeded = 0\r\n for i in range (0, 12):\r\n numberOfMOnthNeeded += 1\r\n balance = round(balance * (1 + monthlyInterestRate) - monthlyPayment, 2)\r\n if balance < 0:\r\n print 'Number of month needed:', numberOfMOnthNeeded\r\n break\r\n return balance", "def my_rebalance(context, data):\n freq_month = 3\n context.counter += 1\n if context.counter == freq_month:\n for stock, weight in context.weights.iteritems():\n context.counter = 0\n if data.can_trade(stock):\n order_target_percent(stock, weight)", "def __init__(self,customer, bank, acnt,limit):\n self._customer=customer\n self._bank=bank\n self._account=acnt\n self._limit=limit\n self._balance=0", "def calculate_debt(acc, key, value):\n if key == 'transactions':\n for amount in value:\n acc += amount \n\n return acc", "def payment_post_save(**kwargs):\n payment = kwargs['instance']\n bill = payment.bill\n bill.update_cached_totals()", "def deposit(self, amount):\n self.balance = self.balance + amount\n return self.balance", "def get_balance(self, payments):\n # calc monthly interest\n monthly_interest = self.__calculate_monthly_interest()\n m = 1 + monthly_interest\n\n # calculate balance\n balance = self.principle * (\n ((m ** self.__months) - (m ** payments)) / (\n (m ** self.__months) - 1))\n return balance", "def update_invoice(cls, invoice_id: int, payment_request: Tuple[Dict[str, Any]], is_apply_credit: bool = False):\n current_app.logger.debug('<update_invoice')\n\n invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=False)\n # If the call is to apply credit, apply credit and release records.\n if is_apply_credit:\n credit_balance = Decimal('0')\n payment_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)\n invoice_balance = invoice.total - (invoice.paid or 0)\n if (payment_account.credit or 0) >= invoice_balance:\n pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(\n invoice.payment_method_code)\n # Only release records, as the actual status change should happen during reconciliation.\n pay_service.apply_credit(invoice)\n credit_balance = payment_account.credit - invoice_balance\n invoice.paid = invoice.total\n invoice.save()\n elif (payment_account.credit or 0) <= invoice_balance:\n invoice.paid = (invoice.paid or 0) + (payment_account.credit or 0)\n invoice.save()\n\n payment_account.credit = credit_balance\n payment_account.save()\n else:\n payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')\n\n is_not_currently_on_ob = invoice.payment_method_code != PaymentMethod.ONLINE_BANKING.value\n is_not_changing_to_cc = payment_method not in (PaymentMethod.CC.value, PaymentMethod.DIRECT_PAY.value)\n # can patch only if the current payment method is OB\n if is_not_currently_on_ob or is_not_changing_to_cc:\n raise BusinessException(Error.INVALID_REQUEST)\n\n # check if it has any invoice references already created\n # if there is any invoice ref , send them to the invoiced credit card flow\n\n invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)\n if invoice_reference:\n invoice.payment_method_code = PaymentMethod.CC.value\n else:\n pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(\n PaymentMethod.DIRECT_PAY.value)\n payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)\n pay_service.create_invoice(payment_account, invoice.payment_line_items, invoice,\n corp_type_code=invoice.corp_type_code)\n\n invoice.payment_method_code = PaymentMethod.DIRECT_PAY.value\n invoice.save()\n current_app.logger.debug('>update_invoice')\n return invoice.asdict()", "def deposit(self, amount):\n self.balance += amount\n self.transactions.append((\"Deposit\", amount))\n print \"Your new balance is $%d.\" % self.balance", "def deposit(self, amount) -> None:\n self._balance += amount\n return None", "def __init__(self, customer, bank, acnt, limit):\n self._customer = customer\n self._bank = bank\n self._account = acnt\n self._limit = limit\n self._balance = 0", "def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()", "def exchange_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n move_line_pool = self.pool.get('account.move.line')\n\n voucher_obj = self.pool.get('account.voucher')\n old_voucher_ids = voucher_obj.search(cr, uid, [('move_id', '=', context['active_id'])], context=context)\n old_chk_log_ids = check_log_pool.search(cr,uid,[('name','in',old_voucher_ids),('status','=','active')], context=context)\n '''if chk_log_ids:\n check_log_pool.write(cr, uid, chk_log_ids, {'status':'delete','deleted':True},context=context)'''\n if old_chk_log_ids:\n raise osv.except_osv(_('Warning'), _('This move have already exchanged'))\n voucher_id = self.check_move_data(cr, uid, ids, context=context)\n if not voucher_id:\n raise osv.except_osv(_('Warning'), _('The account in credit lines must be of type liquidity'))\n if data.new_no and voucher_id:\n move = move_pool.browse(cr, uid, context['active_id'], context=context)\n journal_id=move and move.journal_id\n if self._exchange_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('status','=','active')], context=context)\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':data.new_no}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',context['active_id'])], context=context)\n line = move_line_pool.browse(cr, uid, lines, context=context)[0]\n check_log_pool.create(cr, uid,{'name': voucher_id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n #check_log_pool.create(cr, uid,{'partner_id':line.partner_id.id,'date_due':move.date,'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n move_pool.write(cr, uid,[context['active_id']], {'ref' : next_seq or ' '}, context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n return {'type':'ir.actions.act_window_close'}", "def __init__(self, customer, bank, account, limit):\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = 0", "def test_update_customer_credit(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n update_customer_credit(user_1['customer_id'], 5000.00)\r\n query = Customer.get(Customer.customer_id == user_1['customer_id'])\r\n self.assertEqual(5000.00, query.customer_limit)\r\n\r\n # Test for non-existant customer\r\n with self.assertRaises(ValueError):\r\n update_customer_credit('456879', 5000.00)\r\n\r\n # Test for non-float value inputted\r\n with self.assertRaises(TypeError):\r\n update_customer_credit(user_1['customer_id'], '$20')\r\n drop_db()", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def draw_money(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to withdraw:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n if MY_MEMBER[i].balance >= amount:\n MY_MEMBER[i].balance -= amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Withdrawing Cash*****\"\n print\"your New Bank balance: %r\" % new_balance\n print\"Amount Withdraw: %r\" % amount\n print\"*************************\"\n\n else:\n print\"your Account Balance is low!! \"\n print\"Transaction Failed...\"\n what_to_do(name, bank_id, password)\n return\n what_to_do(name, bank_id, password)", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def charge_credit_card(amount,save_to_cim=False):\n\n # Create a merchantAuthenticationType object with authentication details\n # retrieved from the constants file\n merchantAuth = apicontractsv1.merchantAuthenticationType()\n merchantAuth.name = CONSTANTS.apiLoginId\n merchantAuth.transactionKey = CONSTANTS.transactionKey\n\n\n # Create the payment data for a credit card\n creditCard = apicontractsv1.creditCardType()\n card_types = ['visa','discover','mastercard','jcb']\n creditCard.cardNumber = fake.credit_card_number(card_type=random.choice(card_types))\n creditCard.expirationDate = fake.credit_card_expire()\n creditCard.cardCode = fake.credit_card_security_code()\n\n # Add the payment data to a paymentType object\n payment = apicontractsv1.paymentType()\n payment.creditCard = creditCard\n\n # Create order information\n order = apicontractsv1.orderType()\n order.invoiceNumber = str(random.randint(1000,3000))\n order.description = fake.bs()\n\n # Set the customer's Bill To address\n customerAddress = apicontractsv1.customerAddressType()\n customerAddress.firstName = fake.first_name()\n customerAddress.lastName = fake.last_name()\n customerAddress.company = fake.bs()\n customerAddress.address = fake.street_address()\n customerAddress.city = fake.city()\n customerAddress.state = fake.address().split()[-1].split()[0]\n customerAddress.zip = fake.postalcode_in_state()\n customerAddress.country = fake.country()\n customerAddress.phoneNumber = fake.phone_number()\n\n\n # Set the customer's identifying information\n customerData = apicontractsv1.customerDataType()\n customerData.type = \"individual\"\n customerData.id = fake.upc_e()\n customerData.email = fake.email()\n\n # Add values for transaction settings\n duplicateWindowSetting = apicontractsv1.settingType()\n duplicateWindowSetting.settingName = \"duplicateWindow\"\n duplicateWindowSetting.settingValue = \"600\"\n settings = apicontractsv1.ArrayOfSetting()\n settings.setting.append(duplicateWindowSetting)\n\n # setup individual line items\n random_num = random.randint(2000,5000)\n line_item_1 = apicontractsv1.lineItemType()\n line_item_1.itemId = str(random.randint(1,9))\n line_item_1.name = \"first\"\n line_item_1.description = fake.catch_phrase()\n line_item_1.quantity = str(random.randint(1,9))\n line_item_1.unitPrice = \"12.95\"\n line_item_2 = apicontractsv1.lineItemType()\n line_item_2.itemId = str(random.randint(1,9))\n line_item_2.name = \"second\"\n line_item_2.description = fake.catch_phrase()\n line_item_2.quantity = str(random.randint(1,9))\n line_item_2.unitPrice = \"7.95\"\n line_item_3 = apicontractsv1.lineItemType()\n line_item_3.itemId = str(random.randint(1,9))\n line_item_3.name = \"third\"\n line_item_3.description = fake.catch_phrase()\n line_item_3.quantity = str(random.randint(1,9))\n line_item_3.unitPrice = \"100.00\"\n\n\n # build the array of line items\n line_items = apicontractsv1.ArrayOfLineItem()\n line_items.lineItem.append(line_item_1)\n line_items.lineItem.append(line_item_2)\n line_items.lineItem.append(line_item_3)\n\n # Create a transactionRequestType object and add the previous objects to it.\n transactionrequest = apicontractsv1.transactionRequestType()\n transactionrequest.transactionType = \"authCaptureTransaction\"\n transactionrequest.amount = amount\n transactionrequest.payment = payment\n transactionrequest.order = order\n transactionrequest.billTo = customerAddress\n transactionrequest.customer = customerData\n transactionrequest.transactionSettings = settings\n transactionrequest.lineItems = line_items\n\n # Assemble the complete transaction request\n createtransactionrequest = apicontractsv1.createTransactionRequest()\n createtransactionrequest.merchantAuthentication = merchantAuth\n createtransactionrequest.refId = \"1234-3432\"\n createtransactionrequest.transactionRequest = transactionrequest\n # Create the controller\n createtransactioncontroller = createTransactionController(\n createtransactionrequest)\n createtransactioncontroller.execute()\n\n response = createtransactioncontroller.getresponse()\n\n if response is not None:\n # Check to see if the API request was successfully received and acted upon\n if response.messages.resultCode == \"Ok\":\n # Since the API request was successful, look for a transaction response\n # and parse it to display the results of authorizing the card\n if hasattr(response.transactionResponse, 'messages') is True:\n print(\n 'Successfully created transaction with Transaction ID: %s'\n % response.transactionResponse.transId)\n if save_to_cim:\n # create CIM profile\n cim_create.append(response.transactionResponse.transId)\n create_customer_profile_from_transaction(str(cim_create[0]))\n print('Transaction Response Code: %s' %\n response.transactionResponse.responseCode)\n print('Message Code: %s' %\n response.transactionResponse.messages.message[0].code)\n print('Description: %s' % response.transactionResponse.\n messages.message[0].description)\n else:\n print('Failed Transaction.')\n if hasattr(response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(response.transactionResponse.\n errors.error[0].errorCode))\n print(\n 'Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n # Or, print errors if the API request wasn't successful\n else:\n print('Failed Transaction.')\n if hasattr(response, 'transactionResponse') is True and hasattr(\n response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(\n response.transactionResponse.errors.error[0].errorCode))\n print('Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n else:\n print('Error Code: %s' %\n response.messages.message[0]['code'].text)\n print('Error message: %s' %\n response.messages.message[0]['text'].text)\n else:\n print('Null Response.')\n\n return response", "def balance(self, date=None):\r\n\r\n qs = self._entries()\r\n if date:\r\n qs = qs.filter(transaction__t_stamp__lt=date)\r\n r = qs.aggregate(b=Sum('amount'))\r\n b = r['b']\r\n\r\n flip = self._DEBIT_IN_DB()\r\n if self._positive_credit():\r\n flip *= -1\r\n\r\n if b == None:\r\n b = Decimal(\"0.00\")\r\n b *= flip\r\n\r\n #print \"returning balance %s for %s\" % (b, self)\r\n return b", "def withdraw(self, currency, amount, address):\n pass", "def post(self):\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in (\n 'outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def caseB_test(self, payment, debit):\n credit_jobs(\n [(self.job, payment, A(0), A(0))], payment.gross\n ) # this creates the 'negative' part of balance\n debit_jobs(\n [(self.job, debit, Entry.FLAT_DEBIT)]\n ) # this creates the 'positive' part of balance\n case = (\n payment.negate + debit\n ) # this is either net:-0.01,tax:0.01 or net:0.01,tax:-0.01\n self.assert_balances(\n bank=A(payment.gross, 0, 0),\n balance=case,\n invoiced=debit,\n promised=case,\n partial=payment.net_amount,\n tax=payment.tax_amount,\n paid=payment.negate,\n )\n zero_out_payment = A(\n n=payment.net, t=payment.tax\n ) # we can't create final invoice with negative net/tax\n debit_jobs(\n [(self.job, zero_out_payment, Entry.FLAT_DEBIT)], recognize_revenue=True\n )\n self.assert_balances(\n bank=A(payment.gross, 0, 0),\n balance=debit,\n invoiced=payment + debit,\n credited=A(\n n=\"0.01\", t=\"0.01\"\n ).negate, # the recognized revenue debit first clears the oustanding balance\n debited=debit\n + debit\n + zero_out_payment, # the recognized revenue debit then re-debits the outstanding balance\n paid=payment.negate,\n income=A(n=\"0.01\"),\n tax=A(t=\"0.01\"),\n )", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def deposit(self, amount):\n self.__balance += amount\n return self.__balance", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def deposit(self, amount):\n self.dep = amount\n self.balance += self.dep", "def credit(self, amount, debit_account, description, debit_memo=\"\", credit_memo=\"\", datetime=None):\r\n assert amount >= 0\r\n return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime)", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def pay_fee(self, fee):\n self.wallet -= fee", "def deposit(self, amount):\r\n self.balance = self.balance + amount\r\n amount = abs(amount)\r\n self.transactions.append(+amount)\r\n return amount", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n logger.info(\n f\"Successfully updated customer {customer_id} credit limit\"\n )\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to update customer {customer_id}\"\n \" credit limit. {unknown_error}\"\n )\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def credits_purchase(request):\n if request.method==\"POST\":\n \"\"\" POST request can come from card form submit or from initial\n credits amount selection page \"\"\"\n try:\n credits_amount = int(request.POST.get('credits_amount', None))\n except TypeError:\n messages.error(request, \"Amount was invalid\")\n return redirect('add_credits')\n if credits_amount or credits_amount == 0:\n if (credits_amount % 10) is not 0:\n # Credits amount wasn't a multiple of 10, so user bypassed\n # JavaScript form validation\n messages.error(\n request, \n \"Credits can only be added in multiples of 10\"\n )\n return redirect('add_credits')\n credits_cost = \\\n settings.COST_PER_TEN_CREDITS * (credits_amount / 10)\n if \"stripeToken\" in request.POST:\n # POST request came from card form submit\n try:\n customer = stripe.Charge.create(\n amount = int(credits_cost*100),\n currency = \"EUR\",\n description = request.user.email,\n source = request.POST['stripeToken'],\n )\n except stripe.error.CardError:\n messages.error(request, \"Your card was declined!\")\n return redirect('credits_purchase')\n if customer.paid:\n # All is good, so add the chosen amount of credits\n profile = request.user.profile\n profile.credits += credits_amount\n profile.save()\n return render(request, 'credits_added.html')\n else:\n messages.error(request, \"Unable to take payment\")\n return redirect('credits_purchase')\n else:\n \"\"\" POST request came from initial credits selection page\n so now render Stripe card form \"\"\"\n return render(request, \"credits_checkout.html\", {\n 'publishable': settings.STRIPE_PUBLISHABLE,\n 'ten_credit_cost': settings.COST_PER_TEN_CREDITS,\n 'credits_amount': credits_amount,\n 'total': credits_cost,\n })\n else:\n messages.error(request, \"No amounts of credits selected\")\n return redirect('add_credits')\n else:\n return redirect('add_credits')", "def withdraw(self, currency, amount, address):\n return self.__call__('balance', 'withdrawcurrency',\n {\"currencyname\": currency, \n \"quantity\": amount, \n \"address\": address})", "def handle_buyer_bills(account, resp_bills, bill_parser=utility_api_bill_parser):\n bills = resp_bills.get('bills', None)\n if bills:\n bills = list(map(bill_parser, bills))\n recent_bills = get_last_12_months(bills)\n\n if len(recent_bills) == 0:\n return 0.0\n\n #TODO: next lines, specifically calculating credit to buy --are they correct?\n total = sum(bill[\"bill_total_cost\"] for bill in recent_bills)\n avg_bill = (total / len(recent_bills))\n\n account.credit_to_buy = round(avg_bill * (1-settings.SAFETY_FACTOR), 2)\n account.average_monthly_credit = avg_bill\n account.save()", "def test_update_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n update_customer_credit(1, 500000.00)\n self.assertEqual(500000.00, Customer.get_by_id(1).credit_limit)", "def update_customer_credit(customer_id, credit_limit):\n customer = search_customer(customer_id)\n if customer is None:\n raise ValueError(f'Could not find customer for update with id '\n f'{customer_id}.')\n customer.credit_limit = credit_limit\n customer.save()", "def payment(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n sale_order_obj = request.registry.get('sale.order')\n\n order = request.website.sale_get_order(context=context)\n order.write({'usersess': request.session['webcalc_session_id']})\n #order.env.cr.commit()\n redirection = self.checkout_redirection(order)\n if redirection:\n return redirection\n\n shipping_partner_id = False\n if order:\n if order.partner_shipping_id.id:\n shipping_partner_id = order.partner_shipping_id.id\n else:\n shipping_partner_id = order.partner_invoice_id.id\n\n values = {\n 'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context),\n 'usersess': request.session['webcalc_session_id']\n }\n values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)\n values.update(sale_order_obj._get_website_data(cr, uid, order, context))\n\n if not values['errors']:\n acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)\n values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))\n render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Завершить оформление'))\n for acquirer in values['acquirers']:\n acquirer.button = payment_obj.render(\n cr, SUPERUSER_ID, acquirer.id,\n '/',\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=shipping_partner_id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=render_ctx)\n #vips_shop\n return request.website.render(\"vips_shop.payment\", values)", "def credit_card_payment(self, card, order, user):\n payment = {\n 'intent': 'sale',\n 'payer': {\n 'payment_method': 'credit_card',\n 'funding_instruments': [{\n 'credit_card': {\n 'type': card['type'],\n 'number': card['number'],\n 'expire_month': unicode(card['expire_month']),\n 'expire_year': unicode(card['expire_year']),\n 'cvv2': card['cvv2'],\n 'first_name': card['first_name'],\n 'last_name': card['last_name']\n }\n }]\n },\n 'transactions': [{\n 'amount': {\n 'total': unicode(order.charge_amount),\n 'currency': order.currency.code\n },\n 'description': 'Payment for order #%s' % (order.id)\n }],\n }\n\n logger.info('Processing Credit Card via PayPal', extra=payment)\n payment = paypalrestsdk.Payment(payment, api=self.api)\n\n with transaction.atomic():\n payment_txn = Transaction.objects.create(gateway=self.gateway,\n order=order,\n description='Transaction for order #%s' % order.id,\n status=Transaction.STATUS_PROCESSING,\n currency=order.currency.code,\n amount=order.charge_amount,\n updated_by=unicode(user),\n created_by=unicode(user))\n\n try:\n payment_created = payment.create()\n except Exception as e:\n logger.error('Failed to process Credit Card (transaction_id: %s)' % payment_txn.id)\n logger.exception(e)\n\n raise DoorstepError('We failed to process your Credit Card at the moment, please try again later!')\n\n if payment_created:\n try:\n with transaction.atomic():\n # Saving only few necessary fields refunding & record\n payment_txn.status = Transaction.STATUS_APPROVED\n payment_txn.add_param('id', unicode(payment.id), user)\n payment_txn.add_param('create_time', unicode(payment.create_time), user)\n payment_txn.add_param('update_time', unicode(payment.update_time), user)\n payment_txn.add_param('state', unicode(payment.state), user)\n payment_txn.add_param('intent', unicode(payment.intent), user)\n payment_txn.add_param('payment_method', unicode(payment.payer.payment_method), user)\n payment_txn.add_param('sale_id',\n unicode(payment.transactions[0].related_resources[0].sale.id), user)\n payment_txn.save()\n\n order.payment_status = Order.PAYMENT_PAID\n order.updated_by = unicode(user)\n order.save()\n except Exception as e:\n logger.error(('Failed to save successful Credit Card payment'\n ' (transaction_id: %s, payment_id: %s) in database.') % (payment_txn.id, payment.id))\n raise e\n else:\n logger.error('Failed to process Credit Card (transaction_id: %s)' % payment_txn.id,\n extra={'error': payment.error})\n\n with transaction.atomic():\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.error_message = payment.error['message']\n payment_txn.save()\n\n raise DoorstepError('We failed to process your Credit Card at the moment, please try again later!')\n\n return payment_txn", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def action_process(self):\n self.check_difference_balance()\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n for statement_line in statement_lines:\n if statement_line.move_line_id:\n statement_line.move_line_id.write({'cleared_bank_account': statement_line.cleared_bank_account,\n 'bank_acc_rec_statement_id': statement_line.cleared_bank_account and statement.id or False\n })\n statement.write({'state': 'done',\n 'verified_by_user_id': self._uid,\n 'verified_date': time.strftime('%Y-%m-%d')\n })\n for debit_line in statement.debit_move_line_ids:\n if debit_line.move_line_id.cleared_bank_account:\n debit_line.move_line_id.move_id.is_reconciled = True\n return True", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "def _decrease_money(self, amount):\n if 1 in self.money:\n self.money[1] -= amount", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def deposit(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(amount)\n else:\n print('deposit error')", "def _compute_account_balance(self, accounts, income_activity):\n mapping = {\n 'balance': \"COALESCE(SUM(debit),0) - COALESCE(SUM(credit), 0) as balance\",\n 'debit': \"COALESCE(SUM(debit), 0) as debit\",\n 'credit': \"COALESCE(SUM(credit), 0) as credit\",\n }\n\n res = {}\n result = {}\n for account in accounts:\n res[account.id] = dict.fromkeys(mapping, 0.0)\n if income_activity == True:\n res[account.id].update({'restricted_balance': 0.0,'unrestricted_balance': 0.0, 'awqaf_balance': 0.0})\n if accounts:\n tables, where_clause, where_params = self.env['account.move.line']._query_get()\n tables = tables.replace('\"', '') if tables else \"account_move_line\"\n wheres = [\"\"]\n if where_clause.strip():\n wheres.append(where_clause.strip())\n filters = \" AND \".join(wheres)\n request = \"SELECT account_id as id, \" + ', '.join(mapping.values()) + \\\n \" FROM \" + tables + \\\n \" WHERE account_id IN %s \" \\\n + filters + \\\n \" GROUP BY account_id\"\n params = (tuple(accounts._ids),) + tuple(where_params)\n self.env.cr.execute(request, params)\n for row in self.env.cr.dictfetchall():\n if income_activity == True:\n res[row['id']].update(row)\n else:\n res[row['id']] = row\n # Used for income activity report, calculate balance accourding to analytic account cost type (reatricted, unreatricted, awqaf)\n ## query for calculating reatricted balance\n if income_activity == True:\n restrec_res = {}\n unrestrec_res = {}\n awqaf_res = {}\n activity_filters = filters.replace('account_move_line__move_id', 'm').replace('account_move_line', 'l')\n restricted_request = \"SELECT account_id as id, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as restricted_balance\" + \\\n\t\t \" FROM account_move_line l \" +\\\n\t\t\t \" LEFT JOIN account_account a ON (l.account_id=a.id)\" +\\\n\t\t\t \" LEFT JOIN account_move m ON (l.move_id=m.id)\" +\\\n\t\t \" LEFT JOIN account_analytic_account an ON (l.analytic_account_id=an.id)\" +\\\n\t\t \" WHERE l.account_id IN %s AND an.cost_type = %s\" \\\n\t\t + activity_filters + \\\n\t\t \" GROUP BY account_id\"\n params = (tuple(accounts._ids),'restricted') + tuple(where_params)\n self.env.cr.execute(restricted_request, params)\n for re_row in self.env.cr.dictfetchall():\n restrec_res[re_row['id']] = re_row \n if re_row['id'] in res.keys():\n res[re_row['id']].update({'restricted_balance': restrec_res[re_row['id']].get('restricted_balance')})\n\n\t\t ## query for calculating unreatricted balance\n unrestricted_request = \"SELECT account_id as id, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as unrestricted_balance\" + \\\n\t\t \" FROM account_move_line l \" +\\\n\t\t\t \" LEFT JOIN account_account a ON (l.account_id=a.id)\" +\\\n\t\t\t \" LEFT JOIN account_move m ON (l.move_id=m.id)\" +\\\n\t\t \" LEFT JOIN account_analytic_account an ON (l.analytic_account_id=an.id)\" +\\\n\t\t \" WHERE l.account_id IN %s AND an.cost_type = %s\" \\\n\t\t + activity_filters + \\\n\t\t \" GROUP BY account_id\"\n params = (tuple(accounts._ids),'unrestricted') + tuple(where_params)\n self.env.cr.execute(unrestricted_request, params)\n for unre_row in self.env.cr.dictfetchall():\n unrestrec_res[unre_row['id']] = unre_row\n if unre_row['id'] in res.keys():\n res[unre_row['id']].update({'unrestricted_balance': unrestrec_res[unre_row['id']].get('unrestricted_balance') or 0.0})\n\t\t \n\t\t ## query for calculating awqaf balance\n awqaf_request = \"SELECT account_id as id, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as awqaf_balance\" + \\\n\t\t \" FROM account_move_line l \" +\\\n\t\t\t \" LEFT JOIN account_account a ON (l.account_id=a.id)\" +\\\n\t\t\t \" LEFT JOIN account_move m ON (l.move_id=m.id)\" +\\\n\t\t \" LEFT JOIN account_analytic_account an ON (l.analytic_account_id=an.id)\" +\\\n\t\t \" WHERE l.account_id IN %s AND an.cost_type = %s\" \\\n\t\t + activity_filters + \\\n\t\t \" GROUP BY account_id\"\n params = (tuple(accounts._ids),'awqaf') + tuple(where_params)\n self.env.cr.execute(awqaf_request, params)\n for awq_row in self.env.cr.dictfetchall():\n awqaf_res[awq_row['id']] = awq_row\n if awq_row['id'] in res.keys():\n res[awq_row['id']].update({'awqaf_balance': awqaf_res[awq_row['id']].get('awqaf_balance') or 0.0})\n result = res\n else:\n result = res\n return result", "def withdraw_money(transaction):\n conn = create_connection(database)\n\n sql = ''' UPDATE card\n SET balance = balance - ?\n WHERE number = ?'''\n\n with conn:\n cur = conn.cursor()\n cur.execute(sql, transaction)\n\n conn.commit()", "def payment_transaction(request):\n if request.method == 'POST':\n user = request.data['user']\n order = request.data['order']\n amount = request.data['amount']\n\n try:\n user_qry = User.objects.get(username=user)\n except User.DoesNotExist:\n message = 'An user does not exist in this name({})!'.format(user)\n data = {'error': message}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n try:\n ordr = Order.objects.get(id=order)\n except Order.DoesNotExist:\n message = 'An order does not exist in this ID({})!'.format(order)\n data = {'error': message}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n \n \n if ordr.total_amount < amount:\n data = {'error': 'You provided the amount longer than you have payable!'}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n if Payment.objects.filter(order=order):\n payment = Payment.objects.get(order=order)\n\n if payment.balance_amount < amount:\n data = {'error': 'You provided the amount longer than you have payable!'}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n \n payment.paid_amount += amount\n payment.balance_amount -= amount\n payment.save()\n\n else:\n payment = Payment()\n payment.user = user_qry\n payment.order = ordr\n payment.total_amount = ordr.total_amount\n payment.paid_amount = amount\n payment.balance_amount = ordr.total_amount - amount\n payment.save()\n\n data = {'Success': 'Success'}\n return Response(data, status=status.HTTP_201_CREATED)", "def update_account(row, account):\n if row['LAST_UPDATED_FROM_PAYGOV']:\n updated_at = datetime_from(row['LAST_UPDATED_FROM_PAYGOV'])\n account.donations.filter(time__lte=updated_at).delete()\n if account.category == Account.PROJECT:\n set_balances(row, account)\n account.save()", "def test_withdraw_amount_view(self):\n self.account.current_balance = 100000\n self.account.save()\n\n amount = random.randint(10, 100000)\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': amount}, format='json')\n self.account.refresh_from_db()\n self.assertEqual(100000-amount, self.account.current_balance)", "def deposit(self, amount):\r\n new_balance = self['get']('balance') + amount\r\n self['set']('balance', new_balance)\r\n return self['get']('balance')", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def process(request, order):\n # Transaction results\n APPROVED = '1'\n DECLINED = '2'\n ERROR = '3'\n HELD_FOR_REVIEW = '4'\n print \"I am processing the request\"\n\n postdata = request.POST.copy()\n amount = cart.cart_subtotal(request)\n\n print amount\n\n charge = stripe.Charge.create(\n amount=int(amount*100),\n currency=\"ngn\", # I can Change to naira if needed\n card=postdata.get('stripeToken', ''),\n description=\"Example charge\"\n )\n #\n #charge.capture()\n\n\n if charge['card']['cvc_check']:\n transaction_id = charge.id[3:22]\n order = create_order(request, order, transaction_id)\n results = {'order_number': order.id, 'message': u''}\n elif charge.balance_transaction:\n results = {'order_number': 0, 'message': charge.failure_message, 'code': charge.failure_code,\n 'text': charge.description}\n else:\n results = {'order_number': 0, 'message':charge.failure_message, 'errors': charge.errors}\n return results", "def cash_withdrawal(amt):\r\n global withdraw_money\r\n global balance_money\r\n withdraw_money = amt\r\n print(\"Amout enetered : \", withdraw_money)\r\n balance_money = balance_money - withdraw_money\r\n print(\"Withdraw success\")", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def update_customer_credit(customer_id, credit_limit):\n try:\n customer = cm.Customers.get(cm.Customers.customer_id == customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except cm.DoesNotExist:\n raise ValueError", "def process_payment():\n\n url = 'https://api.worldpay.com/v1/orders'\n headers = {'Authorization': environ.get('WORLDPAY_API_KEY'),\n 'Content-type': 'application/json'}\n body = {\n \"paymentMethod\": {\n \"type\": \"Card\",\n \"name\": session['caller_name'],\n \"expiryMonth\": session['expiry'][:2],\n \"expiryYear\": f\"20{session['expiry'][2:]}\",\n \"cardNumber\": session['card_number'],\n \"cvc\": session['cvv'],\n \"issueNumber\": \"1\"\n },\n \"orderType\": \"ECOM\",\n \"orderDescription\": session['call_sid'],\n \"amount\": session['payment_amount'],\n \"currencyCode\": \"GBP\"}\n r = requests.post(url, headers=headers, data=json.dumps(body))\n requests.post(environ.get('END_OF_INTERACTION_URL'), r.text)\n response = VoiceResponse()\n response.say(\"Payment processed, goodbye\")\n # If your flow started in Twilio Studio, redirect back to it to complete the call\n # response.redirect(\n # 'https://webhooks.twilio.com/v1/Accounts/ACfd0573f9f976b99746c693XXXXXXXXXX/Flows/FWbfdeda0a21644267231d3dXXXXXXXXXX?FlowEvent=return')\n return str(response)" ]
[ "0.66964227", "0.6128591", "0.61197674", "0.5983254", "0.5962717", "0.59586847", "0.5945208", "0.59426755", "0.59344083", "0.59324026", "0.5862457", "0.58423245", "0.5832974", "0.58243793", "0.58243793", "0.58215094", "0.582141", "0.5816743", "0.58033234", "0.5802297", "0.5776027", "0.5766413", "0.57551765", "0.5743205", "0.57422805", "0.5722566", "0.5703514", "0.56864107", "0.5672723", "0.56633234", "0.5637521", "0.56342745", "0.5633491", "0.5610878", "0.5604155", "0.55985177", "0.5597013", "0.5583092", "0.5578869", "0.55643904", "0.5548611", "0.5546194", "0.55395687", "0.5538881", "0.5538077", "0.5529414", "0.5527774", "0.5526866", "0.55250454", "0.5523274", "0.55224305", "0.55223715", "0.55223507", "0.55182445", "0.55182445", "0.55182445", "0.5503228", "0.54912424", "0.5487842", "0.5484689", "0.548397", "0.54795665", "0.5478642", "0.5474138", "0.5467304", "0.5458019", "0.5451369", "0.54468465", "0.5435803", "0.5435461", "0.5433106", "0.5424351", "0.54148173", "0.54126805", "0.5410919", "0.5406273", "0.53997433", "0.5393204", "0.5389325", "0.5388686", "0.5388686", "0.5388686", "0.53881335", "0.5383666", "0.538247", "0.5380344", "0.5354791", "0.53496283", "0.5346067", "0.53413475", "0.5336905", "0.5335323", "0.5332596", "0.53312606", "0.5322897", "0.532055", "0.5318481", "0.5315111", "0.5311428", "0.53095454" ]
0.6793116
0
Generate random bytes to use as csrf secret
def gen_csrf_secret(): return Random.new().read(csrf_secret_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def generate_csrf_token() -> int:\r\n ...", "def generate_key(self)->bytes:\n return os.urandom(32)", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def get_random_secret_key():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n return get_random_string(50, chars)", "def _get_encode_random(self):\n return os.urandom(16).encode('hex')", "def random_bytes(self, length: int) -> bytes:\n return token_bytes(length)", "def create_temporary_secret():\n return uuid.uuid4().hex", "def generate_token():\n return uuid4()", "def get_request_authentication():\n return os.urandom(16)", "def generate_nonce():\n return uuid4().hex", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def create_challenge():\n\treturn os.urandom(12)", "def create_csrf_token(salt=''):\n\tif not salt:\n\t\tsalt = Random.new().read(csrf_salt_len).encode('hex')\n\th = SHA256.new()\n\th.update(get_csrf_secret() + salt)\n\treturn h.hexdigest() + salt", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def _generate_token_value():\n return secrets.token_urlsafe()", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def make_token():\n return secrets.token_urlsafe(36)", "def iv_gen():\r\n rndiv = os.urandom(16)\r\n return rndiv", "def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()", "def get_salt():\n return os.urandom(32)", "def random(n: int) -> bytes:\n return os.urandom(n)", "def genKey(length=32):\r\n return os.urandom(length)", "def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)", "def generate_state_token():\n chars = (ascii_letters + digits)\n rand = SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(len(chars)))\n return hmac.new(\n config.SECRET_KEY.encode('utf-8'),\n random_string.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def generateRandomString():\n return ''.join(b64encode(urandom(32)).decode('utf-8'))", "def random_bytes(N):\n return Crypto.Random.get_random_bytes(N)", "def generate_random_key():\n return '%030x' % (random.randrange(256**15),)", "def generate_salt(size):\n return hexlify(urandom(size)).decode()", "def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def generateAuthToken():\r\n alnum = ''.join(c for c in map(chr, range(256)) if c.isalnum())\r\n return ''.join(random.choice(alnum) for _ in range(32))", "def _random_id():\n return binascii.hexlify(os.urandom(4)).decode()", "def mock_urandom(size: int) -> bytes:\n if size == 12:\n return b'Mb\\xd5N\\xc2\\xbd\\xa0\\xc8\\xa4L\\xfb\\xa0'\n elif size == 16:\n return b'\\xbb\\xd6\\x87\\xb6j\\xe5\\xdc\\x93\\xb0\\x13\\x1e\\xcc\\x9f\\xf4\\xca\\xab'\n elif size == 32:\n return b'\\x08\\xe0A\\xb6\\xf2\\xb7x\\x8f\\xe5\\xdap\\x87^6x~\\xa4F\\xc4\\xe9\\xb1\\x8a:\\xfbC%S\\x0cZ\\xbb\\xbe\\x88'\n else:\n return os.urandom(size)", "def gen_oauth_nonce():\n\trandom = os.urandom(32)\n\tencoded = base64.b64encode(random)\n\twords = re.sub('[^\\w]', '', str(encoded))\n\treturn words", "def _server_cookie_secret() -> str:\n return secrets.token_hex()", "def passwordGen() :\n\treturn __randomString(12)", "def create_hash(self):\n return os.urandom(32).encode('hex')", "def generate(length):\n return base64.encodestring(OpenSSL.rand.bytes(256))[:length]", "def get_random_str(len):\n return base64.urlsafe_b64encode(os.urandom(len))[0:len]", "def generate_sharedsecret_bytes(self):\n return number_to_string(\n self.generate_sharedsecret(),\n self.private_key.curve.order)", "def _generate_nonce(self):\n return str(random.randrange(100000, 999999))", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def dummy_content():\n return os.urandom(1 * 1024 * 1024) # 1MB", "def GetCspNonce():\n NONCE_LENGTH = 16\n return base64.b64encode(os.urandom(NONCE_LENGTH))", "def getRandomBytes(n=16):\n\n with open(\"/dev/urandom\") as f:\n _bytes = f.read(n)\n\n return binascii.hexlify(_bytes)", "def random_string(length):\n # this conservatively gets 8*length bits and then returns 6*length of\n # them. Grabbing (6/8)*length bits could lose some entropy off the ends.\n return urlsafe_b64encode(os.urandom(length))[:length]", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def generate_nonce():\n return str(int(round(time.time() * 1000)))", "def make_secret(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def generate_session_key(self):\n return ''.join(random.choice(string.digits + string.letters) for _ in range(16))", "def generate_password(n):\n import os\n import math\n from base64 import b64encode\n return b64encode(os.urandom(int(math.ceil(0.75*n))),'-_')[:n]", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def tubeid():\n return binascii.hexlify(os.urandom(12))", "def generate_nonce(length=8):\n return ''.join([str(random.randint(0, 9)) for i in range(length)])", "def get_random_secret_key(cls, size=None):\n if not size:\n size = cls.default_secret_key_size\n return os.urandom(size)", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def GET(self):\n return base64.b64encode(hashlib.sha256( str(random.getrandbits(256)) ).digest(),\n random.choice(['rA','aZ','gQ','hH','hG','aR','DD'])).rstrip('==')", "def generate_key(self, size):\n key = bytearray()\n for i in range(0,size):\n random_byte = ord(os.urandom(1))\n key.append(random_byte)\n return key", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def gera_chave():\n AES_tamanho_chave = 32\n chave_aes_secreta = os.urandom(AES_tamanho_chave)\n return chave_aes_secreta", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def regenerate(self):\n self.secret_code = random.randint(self.min, self.max)", "def newKeyGenerate():\n generate()\n return '', 204", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def generate_iv():\n\n return get_random_bytes(IV_SIZE)", "def tokhex(length=10, urlsafe=False):\n if urlsafe is True:\n return secrets.token_urlsafe(length)\n return secrets.token_hex(length)", "def _random_bytes(n):\n return ''.join(map(chr, (random.randrange(256) for i in xrange(n))))", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def get_xsrf_token(self, offset=0):\n if not self.xsrf_secret:\n self.xsrf_secret = os.urandom(8)\n self.put()\n m = md5.new(self.xsrf_secret)\n email_str = self.lower_email\n if isinstance(email_str, unicode):\n email_str = email_str.encode('utf-8')\n m.update(self.lower_email)\n when = int(time.time()) // 3600 + offset\n m.update(str(when))\n return m.hexdigest()", "def generate_verification_code():\n new_ver_code = str(random.randint(1000000, 9999999))\n return new_ver_code", "def random_nonb64_string(length):\n return ''.join(\n random.choices('!@#$%^&*(){}[]', k=length)\n )", "def generate_aes_key ( ) :\n import hashlib\n sr = Crypto.Random.random.StrongRandom( )\n key_bits = sr.getrandbits( 256 )\n sha_key = hashlib.sha256( str( key_bits ) ).digest( )\n return sha_key", "def generate_key():\n return str(uuid.uuid4())", "def make_secret(length=SecretLength.GOOGLE_AUTH):\n if hasattr(length, \"value\"):\n length = length.value\n\n return token_bytes(length)", "def gen_site_secret(self, request, site_id=None, salt='', **kw):\n if site_id is None:\n site_id = self.gen_site_id(request)\n if site_id is None:\n return ''\n \"\"\" Generate site + uid specific secret \"\"\"\n secret_base = site_id + salt\n return sha1(secret_base).hexdigest()", "def generate_id(urandom=os.urandom, encode=base64.b64encode):\n return encode(urandom(12)).strip()", "def generate_key():\n return get_token_generator().generate_token()", "def get_nonce(length=16):\n characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n charlen = len(characters)\n return \"\".join([characters[SystemRandom().randint(0, charlen - 1)] for _ in range(0, length)])" ]
[ "0.751621", "0.7423662", "0.7333086", "0.7329716", "0.7236352", "0.7232258", "0.71953297", "0.71489805", "0.7082189", "0.70771956", "0.7053713", "0.70158905", "0.70001644", "0.6955148", "0.69448227", "0.6937748", "0.69271857", "0.692591", "0.69196445", "0.6904558", "0.6870434", "0.68677014", "0.68434286", "0.6815161", "0.68140215", "0.6792783", "0.67678404", "0.67356676", "0.67184037", "0.67014724", "0.6687232", "0.6671658", "0.66665393", "0.6652076", "0.6631812", "0.6631008", "0.66192305", "0.6612304", "0.6601965", "0.6579833", "0.6564515", "0.65533406", "0.65493417", "0.65304184", "0.6510059", "0.64999413", "0.6479024", "0.6472456", "0.6467723", "0.6465909", "0.6463142", "0.6459668", "0.6459632", "0.64583176", "0.64492816", "0.6431616", "0.64229006", "0.64229006", "0.6422482", "0.6422482", "0.64215493", "0.64142007", "0.6412025", "0.63923514", "0.6384574", "0.6363672", "0.6353733", "0.6343574", "0.63359696", "0.6332369", "0.6331287", "0.63268256", "0.6326456", "0.6301454", "0.6298179", "0.62834173", "0.6249751", "0.6235618", "0.6234101", "0.62338006", "0.623213", "0.62193227", "0.62148815", "0.62114984", "0.6210499", "0.61964047", "0.6189746", "0.6183829", "0.61822593", "0.61732787", "0.6167598", "0.61559236", "0.61553496", "0.6152297", "0.61517304", "0.61455977", "0.61448455", "0.6142036", "0.6115601", "0.6107761" ]
0.87860125
0
Read csrf secret from session if it exists; otherwise generate it and store in session
def get_csrf_secret(): sess = managers.request_manager.get_request().session() secret = sess.get(csrf_secret_sess_var_name, None) if not secret: secret = gen_csrf_secret() sess[csrf_secret_sess_var_name] = secret return secret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def _GetXsrfKey():\n client = memcache.Client()\n xsrf_key = client.get('xsrf_key')\n if not xsrf_key:\n config = models.GetApplicationConfiguration()\n xsrf_key = config.xsrf_key\n client.set('xsrf_key', xsrf_key)\n return xsrf_key", "def generate_csrf_token() -> int:\r\n ...", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def init_base_cookie(self):\n url = 'https://ceq.nkust.edu.tw/'\n try:\n res = self.main_session.get(url=url)\n if res.status_code == 200:\n soup = BeautifulSoup(res.text, 'html.parser')\n\n self.csrf_key = soup.find(\n 'input', {'name': '__RequestVerificationToken'}).get('value')\n if self.csrf_key != \"\":\n return True\n except:\n return False\n return False", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def _fetch_csrf(self) -> str:\n login_page = self._session.get(\"https://www.redpocket.com/login\")\n csrf_element = re.search(\n r'<input type=\"hidden\" name=\"csrf\" value=\"([\\w|-]+)\">', login_page.text\n )\n\n if csrf_element:\n csrf = csrf_element.group(1)\n self._logger.debug(\"Using CSRF: %s\", csrf)\n return csrf\n\n raise RedPocketException(\"Failed to get CSRF token from login page!\")", "def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token", "def _get_csrf(self):\n\n csrf_token_header_name = \"X-CsrfToken\"\n if csrf_token_header_name not in self.headers:\n home_head_response = requests.head(self.BASE_URL)\n self.cookies.update(home_head_response.cookies)\n csrf_token = self.cookies[\"csrftoken\"]\n csrf_header = {csrf_token_header_name: csrf_token}\n self.headers.update(csrf_header)", "def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def valid_session():\n def wrapper(f):\n @wraps(f)\n def decorated_view(*args, **kwargs):\n\n secret_key = current_app.config['SESSION_PASSWORD']\n session_token = request.cookies.get(SESSION_COOKIE_NAME, None)\n if not session_token or not session_token_valid(secret_key, session_token):\n # .. when there are views\n # -- url_for('authentication_views.user_login')\n login_url = request.host_url[:-1] + \"/login/\"\n assert request.url.startswith(request.host_url)\n next_hop_path = request.url[len(request.host_url):]\n next_hop = urlencode({'next': next_hop_path})\n return redirect(login_url + \"?\" + next_hop)\n\n return f(*args, **kwargs)\n return decorated_view\n return wrapper", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def _server_cookie_secret() -> str:\n return secrets.token_hex()", "def _shib_get_token(self): # pragma: no cover\n\n shibCookie = None\n for cookie in self._session.cookies:\n if \"shibsession\" in cookie.name:\n shibCookie = cookie\n break\n\n if not shibCookie:\n warnings.warn(\"No session token found.\", AuthenticationWarning)\n\n return shibCookie", "def test_csrf_token_session_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n csrf_client.logout()\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per session')", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()", "def csrf_protect_app(app):\n\n @app.before_request\n def csrf_protect():\n if request.path == \"/api/login\" or session.get('bypass_csrf', False):\n # Bypass csrf protection for trusted api sessions (see /api/login_for_apps):\n return\n if request.method == \"POST\":\n token = session.get('_csrf_token', None)\n header = request.headers.get('X-csrf', None)\n if not token or not header or token != header:\n abort(make_response(\"Invalid x-csrf token\", 403))\n\n def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = random_token()\n return session['_csrf_token']\n\n app.jinja_env.globals['csrf_token'] = generate_csrf_token", "def _generate_token_value():\n return secrets.token_urlsafe()", "def get_review_token(site):\n return site.get_tokens([\"csrf\"])[\"csrf\"]", "def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()", "def _get_enc_secret():\n return current_app.config.get('ACCOUNT_SECRET_KEY')", "def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf", "def _get_initial_token(url):\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(url)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })", "def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)", "def get_token(self, session, **kwargs):\n return None", "def create_csrf_token(salt=''):\n\tif not salt:\n\t\tsalt = Random.new().read(csrf_salt_len).encode('hex')\n\th = SHA256.new()\n\th.update(get_csrf_secret() + salt)\n\treturn h.hexdigest() + salt", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def secret():\n pass", "def get_shared_secret(self):\n shared_secret = self.charm_config.get(\"shared-secret\")\n saved_shared_secret = self.kv.get(\"shared-secret\")\n if not shared_secret:\n if saved_shared_secret:\n return saved_shared_secret\n else:\n shared_secret = self.random_string(16)\n self.kv.set(\"shared-secret\", shared_secret)\n return shared_secret", "def setup_csrf_protection(app, cookie_name='r3csrfprot'):\n\n middleware = CSRFProtectionMiddleware(app, cookie_name)", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))", "def test_csrf_inject(self, mock_csrf):\n mw = CSRFHeaderInject()\n request = MagicMock()\n response = MagicMock()\n mw.process_response(request, response)\n response.set_cookie.assert_called_with('csrftoken', 'csrf-token', max_age=31449600)", "def generate_keyfile(csrf_key, session_key):\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n if os.path.exists(file_name):\n if options.force is None:\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n write_file(output)\n else:\n write_file(output)", "def req_session():\n request = Request()\n session = PoorSession(request.secret_key)\n session.data['test'] = True\n session.write()\n request.cookies = session.cookie\n return request", "def _get_xsrf(self):\n self.session.get('https://www.zhihu.com/', allow_redirects=False)\n for c in self.session.cookies:\n if c.name == '_xsrf':\n return c.value\n raise AssertionError(' 获取 xsrf 失败')", "def trust_session(request):\n from .models import SESSION_TOKEN_KEY, Agent\n\n if request.user.is_authenticated:\n # We need a token to link this agent to the current session. It's\n # strictly internal, so it doesn't have to be cryptographically sound,\n # just probabalistically unique.\n token = randrange(2**32)\n\n request.session[SESSION_TOKEN_KEY] = token\n request.agent = Agent.session_agent(request.user, token)", "def csrf(request):\n return django_csrf(request)['csrf_token']", "def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)", "def inbound(request):\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n # Use same token next time\n request.context['csrf_token'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.context['csrf_token'] = _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n # Note that get_host() includes the port.\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n raise Response(403, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)", "def secret() -> None:\n pass", "def test_csrf():\n\n # The authenticate method must not be altered for this test to be valid.\n assert (\n SessionAuthentication.authenticate\n is CsrfExemptSessionAuthentication.authenticate\n )\n\n # The `enforce_csrf` method should just pass with any request.\n assert CsrfExemptSessionAuthentication().enforce_csrf(\"foo\") is None", "def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value", "def get_secret_key():\n return get_config_handler().get_secret_key()", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def verify_csrf_token(token=''):\n\tif not token:\n\t\ttoken = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, \"\")\n\t\tif token:\n\t\t\ttoken = token[0]\n\tif len(token) != 2 * digest_size + 2 * csrf_salt_len:\n\t\tdebug('Incorrect csrf token length')\n\t\traise VDOM_csrf_exception()\n\tsalt = token[2*digest_size:]\n\tif token != create_csrf_token(salt):\n\t\tdebug('Incorrect csrf token value')\n\t\traise VDOM_csrf_exception()", "def test_write_load(self, req_session):\n session = PoorSession(SECRET_KEY)\n session.load(req_session.cookies)\n assert session.data == {'test': True}", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def env_var_aws_session_token():\n return 'AWS_SESSION_TOKEN'", "def check_csrf(self):\n if (self.HTTP_X_CSRF_TOKEN in os.environ and\n self.is_csrf_token(os.environ[self.HTTP_X_CSRF_TOKEN])):\n pass\n else:\n common.render_error('Invalid CSRF token.')", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def make_token():\n return secrets.token_urlsafe(36)", "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def shared_secret(self):\n return self.__shared_secret", "def generate_edit_credentials(self):\n params = {\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json'\n }\n response = requests.get(self.base_url, params=params, cookies=self.cookie_jar)\n self.edit_token = response.json()['query']['tokens']['csrftoken']\n\n self.cookie_jar.update(response.cookies)\n\n return self.cookie_jar", "def extract_token_from_cookie(request):\n try:\n token = request.headers.cookie['csrf_token'].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache due to the\n # Vary header we set below. Don't set it on callbacks, because we use IP\n # filtering there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}", "def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()", "def trusted(req):\n # Get the CRSF token from the user session.\n session = req.environ.get('rex.session', {})\n session_csrf_token = session.get('_csrf_token')\n # Get the token value from the request.\n request_csrf_token = req.environ.get('HTTP_X_CSRF_TOKEN') or \\\n req.params.get('_csrf_token')\n # Check if the values coincide.\n if not session_csrf_token or not request_csrf_token:\n return False\n is_equal = True\n for ch1, ch2 in itertools.zip_longest(session_csrf_token,\n request_csrf_token):\n is_equal &= (ch1 == ch2)\n return is_equal", "async def save(self, request, response) -> None:\n value = self.cipher.encrypt(request.session.dumps().encode())\n cookie = f'{self.cookie_name}={value.decode()}; SameSite=Lax'\n response.headers['Set-Cookie'] = cookie", "def parse_csrftoken(text):\n the_match = re.search(r'csrf_token.*?value=\"(.*?)\"', text, re.M | re.S)\n if the_match:\n return the_match.group(1)\n\n return ''", "def get_request_authentication():\n return os.urandom(16)", "def encrypt_cookie_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encrypt_cookie_secret\")", "def encrypt_cookie_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encrypt_cookie_secret\")", "def test_good_with_no_prior_key(self):\n # config seems to be shared across tests, so we have to specifically set\n # it to None.\n config.set(xsrf_token_key=None)\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def get_wharton_sessionid(public=False):\n sessionid = request.args.get('sessionid')\n cache_key = 'studyspaces:gsr:sessionid'\n\n if sessionid:\n return sessionid\n\n if public:\n if db.exists(cache_key):\n return db.get(cache_key).decode('utf8')\n\n return os.environ.get('GSR_SESSIONID')\n\n return None", "def create_temporary_secret():\n return uuid.uuid4().hex", "def get_secure_cookie( name, value=None ):", "def secret(self):\n\n return self.info.get('env', {}).get('APP_SECRET')", "def _fresh_secret(self, request: SecretRequest) -> Secret:\n if type(request) is AWSSecretRequest:\n secret = self.vault.aws(request.role, request.mount_point)\n elif type(request) is DatabaseSecretRequest:\n if request.engine.split('+', 1)[0] == MYSQL:\n secret = self.vault.mysql(request.role, request.mount_point)\n else:\n raise NotImplementedError('No other database engine available')\n elif type(request) is GenericSecretRequest:\n secret = self.vault.generic(request.path, request.key,\n request.mount_point)\n return secret", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def test_csrf_token_request_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per request')", "def token_required(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n #wrapper of current func\n\n #take csrf-token from cookies\n token = request.cookies.get('token')\n if not token:\n #returning login page\n flash(\"Authentication required\", category='danger')\n return redirect(url_for('login'))\n #decoding token\n try:\n uuid = jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])['user_id']\n except:\n #returning login page\n flash(\"Token timeout\", category='danger')\n return redirect(url_for('login'))\n #get current user\n user = User.query.filter_by(uuid=uuid).first()\n if not user:\n #returning login page\n flash(\"Profile error\", category='danger')\n return redirect(url_for('login'))\n return func(self, *args, **kwargs)\n\n return wrapper", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()", "def client_secret(self) -> str:", "def load_session_credentials(request_handler):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n userid = session.get_secure_cookie(name='userid')\n if userid:\n return userid, StorageByKeyName(Credentials, userid, 'credentials').get()\n else:\n return None, None", "def extract_session_from_secret(secret_key, session_token):\n if secret_key and '@@@' in secret_key and not session_token:\n return secret_key.split('@@@')[0], secret_key.split('@@@')[1]\n else:\n return secret_key, session_token", "def require_session(handler: _HandlerWithSession) -> Handler:\n\n @functools.wraps(handler)\n async def decorated(request: web.Request) -> web.Response:\n request_session_token = request.match_info[\"session\"]\n session = session_from_request(request)\n if not session or request_session_token != session.token:\n LOG.warning(f\"request for invalid session {request_session_token}\")\n return web.json_response(\n data={\n \"error\": \"bad-token\",\n \"message\": f\"No such session {request_session_token}\",\n },\n status=404,\n )\n return await handler(request, session)\n\n return decorated", "def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()", "def xsrf_protected(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n non_xsrf_protected_verbs = ['options', 'head', 'get']\n if (self.request.method.lower() in non_xsrf_protected_verbs or\n self._RequestContainsValidXsrfToken()):\n return f(self, *args, **kwargs)\n else:\n try:\n self.XsrfFail()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n return wrapper", "def secretstore():\n pass" ]
[ "0.7415861", "0.74125195", "0.72343606", "0.69048315", "0.686525", "0.6681955", "0.65113086", "0.64043695", "0.6368639", "0.6332918", "0.6305956", "0.625523", "0.6235409", "0.6150814", "0.6095577", "0.60392517", "0.6010801", "0.59956753", "0.59555525", "0.59422135", "0.59069234", "0.5896854", "0.584515", "0.57109857", "0.56914985", "0.5688088", "0.5679138", "0.5660914", "0.56388474", "0.5632423", "0.5607104", "0.5595699", "0.55814946", "0.55802304", "0.5560207", "0.5537029", "0.5532557", "0.55040175", "0.54918146", "0.5490581", "0.5481861", "0.54755783", "0.5472813", "0.54713637", "0.5455887", "0.5452802", "0.54499817", "0.5444611", "0.54275346", "0.54254943", "0.5419723", "0.54193974", "0.5417905", "0.5417837", "0.54150933", "0.54085314", "0.5397036", "0.5395006", "0.5343908", "0.53328353", "0.5319321", "0.5318346", "0.5309546", "0.5308455", "0.52969843", "0.5278145", "0.526826", "0.52614665", "0.5257633", "0.5257632", "0.5256213", "0.52522033", "0.52405304", "0.5230519", "0.5224347", "0.5222648", "0.521822", "0.521822", "0.52133024", "0.5212341", "0.52015674", "0.5198861", "0.51786137", "0.51339376", "0.5121181", "0.51115537", "0.51110923", "0.51104563", "0.5106184", "0.5104294", "0.5102886", "0.5102811", "0.5099186", "0.50851023", "0.50814134", "0.50813645", "0.5080456", "0.508016", "0.5077414", "0.50688535" ]
0.7971118
0
Generate csrf token based on existing/new csrf secret and provided/new salt
def create_csrf_token(salt=''): if not salt: salt = Random.new().read(csrf_salt_len).encode('hex') h = SHA256.new() h.update(get_csrf_secret() + salt) return h.hexdigest() + salt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def generate_csrf_token() -> int:\r\n ...", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def make_token():\n return secrets.token_urlsafe(36)", "def _generate_token_value():\n return secrets.token_urlsafe()", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth", "def generate_state_token():\n chars = (ascii_letters + digits)\n rand = SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(len(chars)))\n return hmac.new(\n config.SECRET_KEY.encode('utf-8'),\n random_string.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_xsrf_token(self, offset=0):\n if not self.xsrf_secret:\n self.xsrf_secret = os.urandom(8)\n self.put()\n m = md5.new(self.xsrf_secret)\n email_str = self.lower_email\n if isinstance(email_str, unicode):\n email_str = email_str.encode('utf-8')\n m.update(self.lower_email)\n when = int(time.time()) // 3600 + offset\n m.update(str(when))\n return m.hexdigest()", "def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def gen_site_secret(self, request, site_id=None, salt='', **kw):\n if site_id is None:\n site_id = self.gen_site_id(request)\n if site_id is None:\n return ''\n \"\"\" Generate site + uid specific secret \"\"\"\n secret_base = site_id + salt\n return sha1(secret_base).hexdigest()", "def get_salt():\n return os.urandom(32)", "def _server_cookie_secret() -> str:\n return secrets.token_hex()", "def __generate_session_token(self):\n\n return get_random_string(length=32)", "def generate_token():\n return uuid4()", "def generate_token(login, password):\n time = datetime.datetime.now().timestamp()\n raw_string = str(login) + str(password) + str(time)\n return hashlib.sha256(str(raw_string).encode('utf-8')).hexdigest()", "def verify_csrf_token(token=''):\n\tif not token:\n\t\ttoken = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, \"\")\n\t\tif token:\n\t\t\ttoken = token[0]\n\tif len(token) != 2 * digest_size + 2 * csrf_salt_len:\n\t\tdebug('Incorrect csrf token length')\n\t\traise VDOM_csrf_exception()\n\tsalt = token[2*digest_size:]\n\tif token != create_csrf_token(salt):\n\t\tdebug('Incorrect csrf token value')\n\t\traise VDOM_csrf_exception()", "def generate_token(secret, message=None):\n timestamp = str(int(time.time()))\n return '{}:{}'.format(\n timestamp,\n get_hmac(secret, str(message) + timestamp),\n )", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def create_temporary_secret():\n return uuid.uuid4().hex", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def generate_auth_token(self):\n s = Serializer(app.config['SECRET_KEY'])\n return s.dumps({'email': self.email})", "def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })", "def generate_new_token(self):\n self.access_token = random_auth_key()", "def get_review_token(site):\n return site.get_tokens([\"csrf\"])[\"csrf\"]", "def generate_auth_token(self, expiration):\n ser = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return ser.dumps({'id': self.id}).decode('utf-8')", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def _create_security_token(user):\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf", "def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def regenerate_authentication_token(self):\n new_token = os.urandom(self.TOKEN_LENGTH).encode('hex')\n expires = int(time.time()) + Auth.SESSION_DURATION\n self.write(self.token_filename, ('%s %d' % (new_token, expires)))\n return new_token", "def identity_token_generator(app_key, app_secret, uid=None, role=None):\n return lambda nonce: generate_identity_token(app_key, app_secret, nonce, uid, role)", "def generate_nonce():\n return uuid4().hex", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token", "def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def get_request_authentication():\n return os.urandom(16)", "def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def tokhex(length=10, urlsafe=False):\n if urlsafe is True:\n return secrets.token_urlsafe(length)\n return secrets.token_hex(length)", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def GetCspNonce():\n NONCE_LENGTH = 16\n return base64.b64encode(os.urandom(NONCE_LENGTH))", "def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))", "def generate_password_reset_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'reset': self.id})", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def generate_salt(size):\n return hexlify(urandom(size)).decode()", "def generate_tokens(callback_key):\n random_hash = generate_random_security_hash()\n\n return generate_security_hash(random_hash, callback_key), random_hash", "def build_access_token_guest():\n return do_build_access_token(tenant_id='guest_tenant_id')", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def generate_reset_password_token(self, expiration=3600):\n ser = Serializer(current_app.config['SECRET_KEY'], expiration)\n return ser.dumps({'reset_password': self.id}).decode('utf-8')", "def gen_salt(salt_len):\n return bytes([random.choice(SALT_SYMBOLS) for _ in range(salt_len)])", "def generate_edit_credentials(self):\n params = {\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json'\n }\n response = requests.get(self.base_url, params=params, cookies=self.cookie_jar)\n self.edit_token = response.json()['query']['tokens']['csrftoken']\n\n self.cookie_jar.update(response.cookies)\n\n return self.cookie_jar", "def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value", "def token(db):\n token = TokenFactory()\n db.session.commit()\n return token", "def generate_token(self, length=6, valid_secs=300, commit=True):\n self.token = random_number_token(length)\n self.valid_until = timezone.now() + timedelta(seconds=valid_secs)\n if commit:\n self.save()", "def build_evil_access_token():\n return do_build_access_token(tenant_id='intility_tenant_id', evil=True)", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def generate_key():\n return get_token_generator().generate_token()", "def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)", "def _GetXsrfKey():\n client = memcache.Client()\n xsrf_key = client.get('xsrf_key')\n if not xsrf_key:\n config = models.GetApplicationConfiguration()\n xsrf_key = config.xsrf_key\n client.set('xsrf_key', xsrf_key)\n return xsrf_key", "def create_nonce():\n default_seed = 'ifh2847fhsn\"lqOEYd@#Djh(&'\n hash = sha.new(default_seed)\n hash.update(str(datetime.utcnow()))\n return hash.hexdigest()", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def generate_access_token_cache_key(token):\n\n return 'wopi_access_token_' + str(token)", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def generate_esgf_slcs_token(self):\n client = ESGFSLCSClient(self.request)\n if client.get_token():\n try:\n client.refresh_token()\n except Exception as err:\n self.session.flash('Could not refresh token: {}'.format(escape(err.message)), queue=\"danger\")\n else:\n self.session.flash('ESGF token was updated.', queue=\"success\")\n return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))\n else:\n try:\n auth_url = client.authorize()\n except Exception as err:\n self.session.flash('Could not retrieve token: {}'.format(escape(err.message)), queue=\"danger\")\n return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))\n else:\n return HTTPFound(location=auth_url)", "def generate_password_reset_token(self, expiration=3600):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"], expiration)\n return serializer.dumps({\"reset\": self.id}).decode(\"utf-8\")", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def client_secret(self) -> str:" ]
[ "0.768297", "0.7293141", "0.70243514", "0.67706597", "0.6689844", "0.6654554", "0.6647972", "0.66283864", "0.6550067", "0.6501651", "0.64808244", "0.6338237", "0.6312519", "0.6307513", "0.6307513", "0.6238732", "0.62366146", "0.6235569", "0.6225915", "0.6221798", "0.6175029", "0.6172401", "0.61631954", "0.60913724", "0.6085538", "0.60345185", "0.59842396", "0.59575176", "0.59305733", "0.59235656", "0.59122694", "0.58992743", "0.589816", "0.58667505", "0.5830906", "0.58189785", "0.5799931", "0.5771341", "0.57710034", "0.57576275", "0.5753529", "0.5751735", "0.5742961", "0.57272315", "0.5723904", "0.5720122", "0.5709907", "0.5704354", "0.569574", "0.56810796", "0.56415737", "0.56396663", "0.563134", "0.5628145", "0.56142277", "0.559907", "0.55981255", "0.55728143", "0.55658954", "0.5551059", "0.5535349", "0.55332655", "0.5498899", "0.54600996", "0.54588926", "0.5457242", "0.5451261", "0.54381746", "0.5435849", "0.5435396", "0.5428498", "0.5428413", "0.54278165", "0.5421868", "0.54137814", "0.5413298", "0.5383096", "0.5373658", "0.53735596", "0.5367574", "0.5362101", "0.5350351", "0.5328211", "0.53266305", "0.53265685", "0.5325756", "0.5325174", "0.532159", "0.5303799", "0.529949", "0.529444", "0.52934074", "0.5273743", "0.52698034", "0.5264173", "0.5263176", "0.5261828", "0.52616173", "0.5255664", "0.52519286" ]
0.80260617
0
Verify csrf token against csrf secret from the session; if token is not provided it's read from request arguments
def verify_csrf_token(token=''): if not token: token = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, "") if token: token = token[0] if len(token) != 2 * digest_size + 2 * csrf_salt_len: debug('Incorrect csrf token length') raise VDOM_csrf_exception() salt = token[2*digest_size:] if token != create_csrf_token(salt): debug('Incorrect csrf token value') raise VDOM_csrf_exception()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def check_csrf(self):\n if (self.HTTP_X_CSRF_TOKEN in os.environ and\n self.is_csrf_token(os.environ[self.HTTP_X_CSRF_TOKEN])):\n pass\n else:\n common.render_error('Invalid CSRF token.')", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def trusted(req):\n # Get the CRSF token from the user session.\n session = req.environ.get('rex.session', {})\n session_csrf_token = session.get('_csrf_token')\n # Get the token value from the request.\n request_csrf_token = req.environ.get('HTTP_X_CSRF_TOKEN') or \\\n req.params.get('_csrf_token')\n # Check if the values coincide.\n if not session_csrf_token or not request_csrf_token:\n return False\n is_equal = True\n for ch1, ch2 in itertools.zip_longest(session_csrf_token,\n request_csrf_token):\n is_equal &= (ch1 == ch2)\n return is_equal", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def has_csrf_token(self,content,url,is_input=True):\n\t\tif content:\n\t\t\tprotected = False\n\t\t\tcontent = content.strip()\n\t\t\tfor token in self.tokens:\n\t\t\t\ttoken = token.lower().strip()\n\t\t\t\tif token in content:\n\t\t\t\t\tprotected = True\n\t\t\t\n\t\t\tif not protected:\n\t\t\t\tif is_input:\n\t\t\t\t\tvul = \"inputs at \"+url+ \" is missing csrf token\"\n\t\t\t\t\tif vul not in self.vuln_inputs:\n\t\t\t\t\t\tself.vuln_inputs.append(vul)\n\t\t\t\telse:\n\t\t\t\t\tvul = \"the url \"+url+\" parameters is missing csrf token\"\n\t\t\t\t\tif vul not in self.vuln_urls:\n\t\t\t\t\t\tself.vuln_urls.append(vul)", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf", "def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)", "def validate_against_csrf(event, Validator=CSRFValidator):\n \n request = event.request\n settings = request.registry.settings\n \n # Only validate if enabled.\n if not settings.get('csrf.validate', True):\n return\n \n # Ignore specified routes.\n matched_route = request.matched_route\n ignore_routes = settings.get('csrf.ignore_routes', None)\n if matched_route and ignore_routes:\n if matched_route.name in ignore_routes.split():\n return\n \n # Ignore specified paths.\n ignore_paths = settings.get('csrf.ignore_paths', None)\n if ignore_paths:\n for path in ignore_paths.split():\n if request.path.startswith(path):\n return\n \n session_token = request.session.get_csrf_token()\n try:\n Validator(session_token).validate(request)\n except CSRFError:\n raise HTTPUnauthorized", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def test_csrf():\n\n # The authenticate method must not be altered for this test to be valid.\n assert (\n SessionAuthentication.authenticate\n is CsrfExemptSessionAuthentication.authenticate\n )\n\n # The `enforce_csrf` method should just pass with any request.\n assert CsrfExemptSessionAuthentication().enforce_csrf(\"foo\") is None", "def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)", "def verify_token(self, token):\n return False", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def is_csrf_token(self, candidate_csrf_token):\n valid_token = bytearray(self.get_csrf_token())\n candidate = bytearray(candidate_csrf_token)\n return constant_time_equals(valid_token, candidate)", "async def validate_token(self, token):", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()", "def inbound(request):\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n # Use same token next time\n request.context['csrf_token'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.context['csrf_token'] = _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n # Note that get_host() includes the port.\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n raise Response(403, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def validate_token(self, payload, headers, request):\n token = headers.get(self.TOKEN_NAME, \"\")\n\n # no token\n if self.verify == VerificationMethod.NONE:\n # do nothing as no method was chosen\n pass\n\n # static token\n elif self.verify == VerificationMethod.TOKEN:\n if not compare_digest(token, self.token):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n # hmac token\n elif self.verify == VerificationMethod.HMAC:\n digest = hmac.new(self.secret.encode('utf-8'), request.body, hashlib.sha256).digest()\n computed_hmac = base64.b64encode(digest)\n if not hmac.compare_digest(computed_hmac, token.encode('utf-8')):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n return True", "def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))", "def decoratedCheckToken(*args, **kwargs):\n if \"token\" not in request.headers:\n raise InvalidUsage(\"Must pass a token!\")\n\n # Execute if the token matches\n logger.debug(\"Token: {0}\".format(request.headers[\"token\"]))\n if request.headers[\"token\"] == receiverParameters[\"apiToken\"]:\n return func(*args, **kwargs)\n\n # Note that it is invalid otherwise\n raise InvalidUsage(\"Invalid token!\")", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def token_required(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n #wrapper of current func\n\n #take csrf-token from cookies\n token = request.cookies.get('token')\n if not token:\n #returning login page\n flash(\"Authentication required\", category='danger')\n return redirect(url_for('login'))\n #decoding token\n try:\n uuid = jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])['user_id']\n except:\n #returning login page\n flash(\"Token timeout\", category='danger')\n return redirect(url_for('login'))\n #get current user\n user = User.query.filter_by(uuid=uuid).first()\n if not user:\n #returning login page\n flash(\"Profile error\", category='danger')\n return redirect(url_for('login'))\n return func(self, *args, **kwargs)\n\n return wrapper", "def valid_session():\n def wrapper(f):\n @wraps(f)\n def decorated_view(*args, **kwargs):\n\n secret_key = current_app.config['SESSION_PASSWORD']\n session_token = request.cookies.get(SESSION_COOKIE_NAME, None)\n if not session_token or not session_token_valid(secret_key, session_token):\n # .. when there are views\n # -- url_for('authentication_views.user_login')\n login_url = request.host_url[:-1] + \"/login/\"\n assert request.url.startswith(request.host_url)\n next_hop_path = request.url[len(request.host_url):]\n next_hop = urlencode({'next': next_hop_path})\n return redirect(login_url + \"?\" + next_hop)\n\n return f(*args, **kwargs)\n return decorated_view\n return wrapper", "def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token", "def csrf(request):\n return django_csrf(request)['csrf_token']", "def is_token_valid(self,pk,request):\n\n pass", "def xsrf_protected(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n non_xsrf_protected_verbs = ['options', 'head', 'get']\n if (self.request.method.lower() in non_xsrf_protected_verbs or\n self._RequestContainsValidXsrfToken()):\n return f(self, *args, **kwargs)\n else:\n try:\n self.XsrfFail()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n return wrapper", "async def tus_check_session(request: web.Request) -> web.Response:\n ctx: Context = request.app[\"ctx\"]\n secret = ctx.local_config[\"storage-proxy\"][\"secret\"]\n async with check_params(\n request,\n t.Dict(\n {\n t.Key(\"token\"): tx.JsonWebToken(\n secret=secret, inner_iv=upload_token_data_iv\n ),\n }\n ),\n read_from=CheckParamSource.QUERY,\n ) as params:\n token_data = params[\"token\"]\n async with ctx.get_volume(token_data[\"volume\"]) as volume:\n headers = await prepare_tus_session_headers(request, token_data, volume)\n return web.Response(headers=headers)", "def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })", "def test_csrf_inject(self, mock_csrf):\n mw = CSRFHeaderInject()\n request = MagicMock()\n response = MagicMock()\n mw.process_response(request, response)\n response.set_cookie.assert_called_with('csrftoken', 'csrf-token', max_age=31449600)", "def is_valid_xsrf_token(self, action):\n token = self.request.get('xsrf_token')\n return token and XsrfTokenManager.is_xsrf_token_valid(token, action)", "def test_csrf_token_request_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per request')", "def validate_webui_token(from_cookie=True, session_token=None):\n if from_cookie:\n session_token = request.cookies.get('x-rucio-auth-token')\n if session_token:\n session_token = unquote(session_token)\n valid_token_dict = auth.validate_auth_token(session_token)\n valid_token_dict['token'] = session_token # pylint: disable=E1137\n return valid_token_dict", "def _get_csrf(self):\n\n csrf_token_header_name = \"X-CsrfToken\"\n if csrf_token_header_name not in self.headers:\n home_head_response = requests.head(self.BASE_URL)\n self.cookies.update(home_head_response.cookies)\n csrf_token = self.cookies[\"csrftoken\"]\n csrf_header = {csrf_token_header_name: csrf_token}\n self.headers.update(csrf_header)", "def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token", "def generate_csrf_token() -> int:\r\n ...", "def enforce_csrf(request):\n check = CSRFCheck()\n check.process_request(request)\n reason = check.process_view(request, None, (), {})\n if reason:\n # CSRF failed, bail with explicit error message\n raise NotAuthenticated(\"CSRF validation failed: %s\" % reason)", "def _validar_token(self):\n\n\t\ttoken = request.headers.get(\"Authorization\").split(\" \")[1]\n\n\t\tres = self.autenticador.validarToken(token)\n\t\tif(not res):\n\t\t\treturn False\n\t\treturn True", "def token_required(func):\n def func_wrapper(self, *args, **kwargs):\n auth_token = self.request.headers.get('X-Auth-Token',\n self.request.get('token', ''))\n namespace = self.request.route_kwargs.get('namespace', '')\n try:\n token = base64.urlsafe_b64decode(str(auth_token))\n except TypeError:\n self.abort(412, 'Please update your token')\n try:\n token = auth_models.AuthToken.query(\n auth_models.AuthToken.token == token\n ).get()\n except datastore_errors.BadValueError:\n self.abort(401, 'Incorrect token')\n try:\n payload = jwt.decode(token.token, config.JWT_SECRET,\n algorithms=config.JWT_HASH_ALGORITHM)\n except (jwt.DecodeError, AttributeError):\n return self.abort(401)\n if payload['namespace'] != namespace:\n return self.abort(412, 'Token payload is incorrect.')\n return func(self, *args, **kwargs)\n return func_wrapper", "def enforce_csrf(self, request):\n return # To not perform the csrf check previously happening", "def check_token(token):\n # Avoid SQL injection before doing requests\n # with the token and check the validity of it.\n token = MySQLdb.escape_string(token)\n if not validator_db.valid_token(token):\n return custom_response(400, responseMessage.BAD_TOKEN)\n\n return Response(status=200)", "def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())", "def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())", "def get_token(self, session, **kwargs):\n return None", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def parse_csrftoken(text):\n the_match = re.search(r'csrf_token.*?value=\"(.*?)\"', text, re.M | re.S)\n if the_match:\n return the_match.group(1)\n\n return ''", "def test_good_with_no_prior_key(self):\n # config seems to be shared across tests, so we have to specifically set\n # it to None.\n config.set(xsrf_token_key=None)\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def test_csrf(self):\n self.assertContains(self.resp, 'csrfmiddlewaretoken')", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def test_csrf_token_session_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n csrf_client.logout()\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per session')", "def forward_validate_token_request(request):\n # TODO(garcianavalon) figure out if this method belongs to keystone client or if\n # there is a better way to do it/structure this\n keystone_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')\n endpoint = '/access-tokens/{0}'.format(request.GET.get('access_token'))\n url = keystone_url + endpoint\n LOG.debug('API_KEYSTONE: GET to {0}'.format(url))\n response = requests.get(url)\n return response", "def get_review_token(site):\n return site.get_tokens([\"csrf\"])[\"csrf\"]", "def get_csrf_token(self, opener, cookiejar, login_url):\n opener.open(login_url)\n try:\n token = [x.value for x in cookiejar if x.name == 'csrftoken'][0]\n except Exception:\n token = None\n return token", "def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def _fetch_csrf(self) -> str:\n login_page = self._session.get(\"https://www.redpocket.com/login\")\n csrf_element = re.search(\n r'<input type=\"hidden\" name=\"csrf\" value=\"([\\w|-]+)\">', login_page.text\n )\n\n if csrf_element:\n csrf = csrf_element.group(1)\n self._logger.debug(\"Using CSRF: %s\", csrf)\n return csrf\n\n raise RedPocketException(\"Failed to get CSRF token from login page!\")", "def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def test_csrf(self):\n response = self.client.get(self.url)\n self.assertContains(response, 'csrfmiddlewaretoken')", "def token_validation(self, realm=None, token=None):\n token_url = 'sessions/' + token + '?_action=validate'\n uri = self._uri_realm_creator(realm=realm, uri=token_url)\n data = self._post(uri=uri, data='{}', headers=self.headers)\n if data.status_code == 200:\n return data.json()\n else:\n return False", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = request.headers.get('Authorization', default=None)\n\n if token is None or token == '':\n return jsonify({\n 'error': 'token missing from request'\n }), 401\n\n # Validate that this token is legit\n try:\n reddit_client = RedditClient(\n client_id=app.config['CLIENT_ID'],\n client_secret=app.config['CLIENT_SECRET'],\n token=token\n )\n\n authenticated_user = reddit_client.authenticated_user\n except RedditClientAuthenticationException:\n return jsonify({\n 'error': 'invalid token'\n }), 401\n except RedditClientException:\n return jsonify({\n 'error': 'invalid token'\n }), 401\n\n return f(*args, **kwargs)\n\n return decorated", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def __update_token(self) -> bool:\r\n\r\n self.__sess.cookies.clear()\r\n\r\n r = self.__sess.get(f'{DOMAIN}/')\r\n m = re.search(r'var token = \\'(\\S{42,48})\\';', r.text)\r\n\r\n if not m:\r\n self.__log_msg(f'No token found!', is_err=True)\r\n return False\r\n\r\n old_token = self.__payload.get('token', None)\r\n self.__payload['token'] = m[1]\r\n\r\n # midnight today\r\n self.__token_expiration_date = datetime.now(self.__tz).replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(1)\r\n\r\n if old_token:\r\n self.__log_msg(f'TOKEN UPDATED: \"{old_token}\" -> \"{m[1]}\"')\r\n else:\r\n self.__log_msg(f'TOKEN SET: \"{m[1]}\"')\r\n return True", "def csrf_protect_app(app):\n\n @app.before_request\n def csrf_protect():\n if request.path == \"/api/login\" or session.get('bypass_csrf', False):\n # Bypass csrf protection for trusted api sessions (see /api/login_for_apps):\n return\n if request.method == \"POST\":\n token = session.get('_csrf_token', None)\n header = request.headers.get('X-csrf', None)\n if not token or not header or token != header:\n abort(make_response(\"Invalid x-csrf token\", 403))\n\n def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = random_token()\n return session['_csrf_token']\n\n app.jinja_env.globals['csrf_token'] = generate_csrf_token", "def csrf_failure(request, reason=''):\n # if request.is_ajax():\n # return JResponse(codes.get('csrf_invalid'))\n # return\n get_token(request)\n return JResponse(codes.get('csrf_invalid'), status=403)", "def setup_csrf_protection(app, cookie_name='r3csrfprot'):\n\n middleware = CSRFProtectionMiddleware(app, cookie_name)", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "async def token(request: Request):\n return get_token()", "def check_xsrf_cookie(self):\n pass", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def test_csrf(self):\n self.assertContains(self.response, 'csrfmiddlewaretoken')", "def test_csrf(self):\n self.assertContains(self.response, 'csrfmiddlewaretoken')", "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}), 401\n except jwt.InvalidSignatureError:\n return jsonify({\"message\": \"Signature verification failed\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid Token verification failed\"}), 401\n except KeyError:\n return jsonify({\"message\": \"Missing token\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def test_validate_token(self, mock_xsrf_validate_token):\n self.handler.validate_token('test token', 'user@example.com')\n mock_xsrf_validate_token.assert_called_once_with(\n 'test token', 'user@example.com',\n timeout=xsrf.REFRESH_TOKEN_TIMEOUT_SEC)", "def _get_initial_token(url):\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(url)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()", "def check_request(auth_dir = default_path()):\n if (REQUEST_URI in os.environ and\n not os.environ[REQUEST_URI] in LOGGED_OUT_ENDPOINTS):\n a = Auth(auth_dir)\n a.check_authentication()\n if REQUEST_METHOD in os.environ and os.environ[REQUEST_METHOD] == \"POST\":\n a.check_csrf()\n return True", "def extract_token_from_cookie(request):\n try:\n token = request.headers.cookie['csrf_token'].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache due to the\n # Vary header we set below. Don't set it on callbacks, because we use IP\n # filtering there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}", "def test_csrf_no_inject(self, mock_csrf):\n mw = CSRFHeaderInject()\n request = MagicMock()\n response = MagicMock()\n mw.process_response(request, response)\n response.set_cookie.assert_not_called()", "def verify_auth_token(cls, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n user = User.query.get(data['id'])\n if user and user.session_token == token:\n return user\n return None", "def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()", "def test_csfr(self):\n self.assertContains(self.response, 'csrfmiddlewaretoken')", "def validate_request_token():\n if not g.x_tapis_token:\n raise errors.NoTokenError(\"No access token found in the request.\")\n claims = validate_token(g.x_tapis_token)\n g.token_claims = claims\n g.username = claims.get('username')\n g.tenant_id = claims.get('tenant_id')\n g.account_type = claims.get('account_type')\n g.delegation = claims.get('delegation')", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def init_base_cookie(self):\n url = 'https://ceq.nkust.edu.tw/'\n try:\n res = self.main_session.get(url=url)\n if res.status_code == 200:\n soup = BeautifulSoup(res.text, 'html.parser')\n\n self.csrf_key = soup.find(\n 'input', {'name': '__RequestVerificationToken'}).get('value')\n if self.csrf_key != \"\":\n return True\n except:\n return False\n return False", "def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf" ]
[ "0.75995755", "0.73856825", "0.73174137", "0.71832883", "0.7028827", "0.6969976", "0.69217205", "0.68938845", "0.68345594", "0.68069303", "0.66359556", "0.66284615", "0.6612116", "0.644829", "0.63917625", "0.6361629", "0.6360583", "0.63481784", "0.6308884", "0.6281791", "0.6273652", "0.6272894", "0.62668514", "0.6259172", "0.6225697", "0.6222117", "0.6212538", "0.6201275", "0.619299", "0.6192089", "0.6183536", "0.6146473", "0.6139196", "0.61235696", "0.6114227", "0.6103838", "0.60953015", "0.60696435", "0.60686785", "0.60536987", "0.60495615", "0.60358655", "0.60332584", "0.6022177", "0.60081846", "0.5993505", "0.59890753", "0.5977006", "0.5971244", "0.59602743", "0.59523433", "0.59086215", "0.59027624", "0.59027624", "0.5894105", "0.58927166", "0.5891486", "0.588448", "0.5878975", "0.58689713", "0.58506507", "0.5845289", "0.58346194", "0.58240795", "0.58184904", "0.58107185", "0.5799438", "0.57893515", "0.5781119", "0.57795215", "0.57541275", "0.5754122", "0.5738692", "0.57386154", "0.5721871", "0.5717679", "0.5717006", "0.57134163", "0.5704203", "0.5702004", "0.5691988", "0.56871533", "0.56730276", "0.56730276", "0.56641793", "0.5656993", "0.56563735", "0.56537306", "0.5642947", "0.563126", "0.5624767", "0.5623481", "0.5621419", "0.5617859", "0.56114036", "0.5608663", "0.5601845", "0.5601731", "0.5598687", "0.5582391" ]
0.79013884
0
list starter arguments that must be applied conditionally based on version
def get_version_specific_arguments(self, version: str): result = [] semversion = semver.VersionInfo.parse(version) # Extended database names were introduced in 3.9.0 if self.supports_extended_names: result += ["--args.all.database.extended-names-databases=true"] # Telemetry was introduced in 3.11.0 if (semversion.major == 3 and semversion.minor >= 11) or (semversion.major > 3): result += ["--all.server.telemetrics-api=false"] # Column cache if ( self.cfg.enterprise and semver.compare(version, "3.9.5") >= 0 and semver.compare(version, "3.10.0") != 0 and semver.compare(version, "3.10.1") != 0 ): result += ["--args.all.arangosearch.columns-cache-limit=10000"] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prelim_opts_args(application):\n opts, args = application.parse_preliminary_options(\n ['--foo', '--verbose', 'src', 'setup.py', '--statistics', '--version'])\n\n assert opts.verbose\n assert args == ['--foo', 'src', 'setup.py', '--statistics', '--version']", "def full_args():\n return setup_args()", "def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args", "def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):\n raise NotImplementedError", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def test_pre_cli_list_version(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-l\", \"-v\"))\n assert \"test.yml\" in out and dork.__version__ in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def getargs(ver='%prog 0.0'):\n parser = argparse.ArgumentParser(\n description=open(__file__).read().split(\"'''\")[1],\n formatter_class=argparse.RawDescriptionHelpFormatter) \n #@todo: OptionParser is depreciated in Python 3.2. \n #Need to move to the new style of parser. \n parser.add_argument(\"--fasta_a_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--fasta_b_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--set_operation\", \n default=\"U\",\n help = \"The operation you want to do. \")\n \n args = parser.parse_args()\n \n fasta_a_name = args.fasta_a_name\n fasta_b_name = args.fasta_b_name\n set_operation = args.set_operation\n \n return fasta_a_name,fasta_b_name,set_operation", "def _test_argv(self, verbose, extra_argv):\r\n #self.package_path = os.path.abspath(self.package_path)\r\n argv = [__file__, self.package_path]\r\n argv += ['--verbosity', str(verbose)]\r\n if extra_argv:\r\n argv += extra_argv\r\n return argv", "def GetMissingArguments(self):\n return []", "def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional", "def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]", "def setup_cl_args(cls, parser):\n\n parser.add_argument(\n \"spec\", \n nargs=\"?\", \n default=\"\",\n help=\"Print info for this ptask spec. First checks relative to \" + \\\n \"the currently set ptask. If no match is found, checks \" + \\\n \"relative to the project root.\",\n )\n\n parser.add_argument(\n \"-v\", \"--versions\",\n dest=\"versions\",\n nargs=\"*\",\n default=[],\n help=\"Show subscriptions for the supplied verisons. Default \" + \\\n \"is current. A list of integers can be supplied for \" + \\\n \"specific versions, or 'all' for all versions.\"\n )", "def extra_args(self):\n return []", "def _get_arguments(self, rargs):\r\n\r\n args = []\r\n i = 0\r\n count = len(rargs)\r\n while i < count and not self._is_opt(rargs[i]):\r\n args.append(rargs[i])\r\n i += 1\r\n\r\n return args", "def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list", "def get_cli_arguments(self):\n pass", "def getPositionalArgs():", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def _get_add_package_args(self, package, type_option, version_option):\n raise NotImplementedError()", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def common_args(revision=None, branch=None, ssh_username=None, ssh_key=None):\n args = []\n if ssh_username or ssh_key:\n opt = ['-e', 'ssh']\n if ssh_username:\n opt[1] += ' -l %s' % ssh_username\n if ssh_key:\n opt[1] += ' -i %s' % ssh_key\n args.extend(opt)\n if revision:\n args.extend(['-r', revision])\n elif branch:\n if hg_ver() >= (1, 6, 0):\n args.extend(['-b', branch])\n return args", "def required_options():\n return [\n 'projects',\n 'old_milestone_names',\n 'new_milestone_name',\n 'statuses',\n 'bugs_importance',\n 'maximum'\n ]", "def add_version_args(repo_root, build_num, args):\n try:\n semver = semantic_version.Version(args['ZAZU_BUILD_VERSION'])\n except KeyError:\n semver = make_semver(repo_root, build_num)\n args['ZAZU_BUILD_VERSION'] = str(semver)\n args[\"ZAZU_BUILD_NUMBER\"] = str(build_num)\n args['ZAZU_BUILD_VERSION_PEP440'] = pep440_from_semver(semver)", "def optargs(args):\n parser = OptionParser()\n parser.add_option(\"-a\", \"--abandon\", dest=\"abandon_current\", default=False, action=\"store_true\",\n help=\"Abandon outstanding changes when updating to migration\")\n parser.add_option(\"-d\", \"--dry\", dest=\"dry_run\", default=False, action=\"store_true\",\n help=\"Just update the revision number, don't perform updates\")\n (options, args) = parser.parse_args(args)\n return (options, args)", "def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras", "def requirement_args(argv, want_paths=False, want_other=False):\n was_r = False\n for arg in argv:\n # Allow for requirements files named \"-r\", don't freak out if there's a\n # trailing \"-r\", etc.\n if was_r:\n if want_paths:\n yield arg\n was_r = False\n elif arg in ['-r', '--requirement']:\n was_r = True\n else:\n if want_other:\n yield arg", "def help_args():\n pass", "def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))", "def test_arg_version(run_nait) -> None: # type: ignore\n expected = nanaimo.version.__version__\n assert run_nait(['--version']).stdout.decode('utf-8').startswith(expected)", "def test_with_markers_and_lowest_version(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0,<=4.1,!=4.0;python_version=='2.7'\"\n \"or python_version=='2.6'\"]))", "def _get_arguments(doc):\n docstring = doc.format(program='FlashAirMusic', ffmpeg_default=FFMPEG_DEFAULT_BINARY or FFMPEG_NOT_FOUND_LABEL)\n require = getattr(pkg_resources, 'require') # Stupid linting error.\n project = [p for p in require('FlashAirMusic') if p.project_name == 'FlashAirMusic'][0]\n version = project.version\n return docoptcfg(docstring, config_option='--config', env_prefix='FAM_', version=version)", "def command_line_arguments():\n _parser.add_argument('-l', '--list', nargs='+',\n help='<Required> Set flag', required=True)\n _parser.add_argument(\"-A\", \"--access\", required=True,\n help=\"access to host => grant/revoke\")", "def check_help_version(args):\n parser = SMArgumentParser(usage=usage)\n parser.add_argument(\"-v\",\"--version\", action=\"version\",\n version=\"ShapeMapper v{}\".format(version()))\n\n p, rest = parser.parse_known_args(args)\n return", "def main(\n ctx: typer.Context,\n version: Annotated[\n bool,\n typer.Option(\n \"--version\",\n callback=_version_callback,\n is_eager=True,\n ),\n ] = False,\n) -> None:\n assert ctx # nosec\n assert version or not version # nosec", "def inputs() -> List[str]:\n return Invocation.current.required", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Simple Jarvice CLI\",\n add_help=False)\n auth_group = parser.add_argument_group('auth', description='Configuration')\n auth_group.add_argument('-username', help='Jarvice username')\n auth_group.add_argument('-apikey', help='Jarvice API key')\n auth_group.add_argument('-apiurl', help='Jarvice API URL',\n default='https://api.jarvice.com')\n auth_group.add_argument('-v', help='loglevel',\n choices=['INFO', 'WARN', 'DEBUG', 'CRITICAL'],\n dest='loglevel', default='CRITICAL')\n auth_group.add_argument(\n 'command',\n choices=['connect', 'submit', 'info', 'status',\n 'action', 'terminate', 'shutdown', 'jobs',\n 'output', 'tail', 'apps', 'machines', 'summary',\n 'download', 'upload', 'wait_for', 'shutdown_all',\n 'terminate_all', 'ls'])\n\n known, unknown = parser.parse_known_args()\n return known, unknown, parser", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def prepper() -> list:\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--band', dest='bandname',\n help=\"band to lookup\", type=str, nargs='+')\n parser.add_argument('-w', '--watchlist', dest='watchlist',\n action='store_true', help=\"add to watchlist\")\n parser.add_argument('-f', '--fetch', dest='fetcher',\n action='store_true', help=\"fetch a band (with -b flag)\\\n or no extra flag for all bands tracking current status\"\n )\n parser.add_argument('-c', '--config', dest='config', action='store_true',\n help='print out current config info')\n parser.add_argument('-v', '--version', action='version',\n version=\"%(prog)s (\"+__version__+\")\")\n parser.add_argument('-r', '--reset', dest='reset', action='store_true',\n help='resets everything back to original download')\n args = parser.parse_args()\n return args", "def exec_args(self, plugin_invoker):\n args = [\"--config\", plugin_invoker.files[\"config\"]]\n\n catalog_path = plugin_invoker.files[\"catalog\"]\n if file_has_data(catalog_path):\n if \"catalog\" in plugin_invoker.capabilities:\n args += [\"--catalog\", catalog_path]\n elif \"properties\" in plugin_invoker.capabilities:\n args += [\"--properties\", catalog_path]\n else:\n logger.warn(\n \"A catalog file was found, but it will be ignored as the extractor does not advertise the `catalog` or `properties` capability\"\n )\n\n state_path = plugin_invoker.files[\"state\"]\n if file_has_data(state_path):\n if \"state\" in plugin_invoker.capabilities:\n args += [\"--state\", state_path]\n else:\n logger.warn(\n \"A state file was found, but it will be ignored as the extractor does not advertise the `state` capability\"\n )\n\n return args", "def get_experimental_arguments(self, config_section):\n\n if config_section is None:\n return []\n\n try:\n arguments_string = self.shishito_support.get_opt(config_section, 'experimental_arguments')\n except configparser.NoOptionError:\n return []\n\n if arguments_string:\n arguments = arguments_string.split('--')\n return arguments[1:]\n else:\n return None", "def usage(cls):\n return {\n 'name': 'version',\n 'args': '<version name>',\n 'desc': 'selects the current release version'\n }", "def __get_cli_args():\r\n parser = argparse.ArgumentParser()\r\n o = parser.add_mutually_exclusive_group()\r\n o.add_argument('-a', action='store_true')\r\n o.add_argument('-b', action='store_true')\r\n parser.add_argument('-suite', help='suite file name for execution')\r\n parser.add_argument('-log', help='LOG level for the execution', default='INFO',\r\n choices=['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL'])\r\n args = parser.parse_args()\r\n return args", "def ovftool_args(self):\n return list(self._ovftool_args)", "def checkArguments ( ) :\r\n\r\n if len( sys.argv ) <= 1 : return None\r\n\r\n\r\n # splits the arguments that contain quotes\r\n \r\n wordList = [ ]\r\n\r\n for argument in sys.argv :\r\n\r\n wordList.extend( argument.split( '\"' ) )\r\n\r\n\r\n # places all the arguments that start with \"--\" at the end, and joins the others into words\r\n\r\n noMinusList = [ ]\r\n\r\n minusList = [ ]\r\n\r\n argument = \"\"\r\n\r\n for word in wordList[ 1 : ] :\r\n\r\n # strips spaces and quotes\r\n \r\n word = word.strip( \" \\\"'\" ) \r\n\r\n if word.startswith( \"--\" ) :\r\n\r\n minusList.append( word )\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n argument = \"\"\r\n\r\n elif argument == \"\" :\r\n\r\n argument = word\r\n\r\n else :\r\n\r\n argument = argument + \" \" + word\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n\r\n # library = 1st argument of the form \"-- ... /\" that exists\r\n\r\n libraryPath = None\r\n\r\n for argument in minusList :\r\n\r\n if ( ( argument.endswith( os.sep ) ) and ( os.path.exists( argument.strip( \"- \" ) ) ) ) :\r\n\r\n libraryPath = argument.strip( \"-\" )\r\n\r\n break\r\n\r\n # recomposes the command line\r\n \r\n sys.argv = wordList[ : 1 ] + noMinusList + minusList \r\n\r\n return libraryPath", "def getRequiredArguments(self):\n if self._initValue.needsArgument:\n return [self._initValue.getArgument()]\n else:\n return []", "def _handle_version_argument(self, arguments):\n if '--version' in arguments:\n LOGGER.info('%s version %s', __name__, __version__)\n return exit(0)", "def args():\n\n useDB = docopt(__doc__)['--from-db']\n snapFile = docopt(__doc__)['-i']\n # csvFile = docopt(__doc__)['-o']\n # utils.askErase(csvFile)\n\n return [snapFile, useDB]", "def _argsForSubprocess(self) -> list[str]:\n pass", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args", "def list_args(args):\n run_list_args(args)", "def get_branching_arguments(self):\n return []", "def sysArgs(arguments):\n\n # if no args print usage\n if not arguments:\n print 'usage: [--auto] [--manual user_ID server_IP server_Port]'\n sys.exit()\n\n # --auto flag\n if arguments[0] == '--auto':\n return (USER_NAME, SERVER_HOST, SERVER_PORT)\n\n # --manual flag\n if arguments[0] == '--manual':\n return (arguments[1], arguments[2], int(arguments[3]))", "def parse_arguments(args):", "def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']", "def __getVersionArg(self, version):\n if version == \"WORKING\":\n return None\n else:\n return str(version)", "def get_args():\n\n # Make argparse object, add description\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=textwrap.dedent(\n '''\n summary:\n Takes a VCF file and parses the variants to produce a tab delimited \n variant report.\n '''\n ))\n\n\n # Version info\n parser.add_argument(\n '-v', '--version', action='version', \n version=\n '%(prog)s\\nversion:\\t{}\\nlast updated:\\t{}'.format(\n __version__, __updated__\n ))\n\n\n # Arguments (see help string for full descriptions):\n # REQUIRED: VCF file input\n parser.add_argument(\n 'input', action='store', \n help='Filepath to input VCF file. REQUIRED.'\n )\n\n\n # OPTIONAL: Output folder, defaults to current directory if empty\n parser.add_argument(\n '-O', '--output', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder where output reports will be saved. \n If missing, defaults to current directory.\n \\n'''\n ))\n\n\n # OPTIONAL: List of preferred transcripts\n parser.add_argument(\n '-t', '--transcripts', action='store', \n help=textwrap.dedent(\n '''\n Filepath to preferred transcripts file. \n\n Must be a tab seperated file with preferred transcripts in the second \n column. If missing, all entries in the preferred transcript column \n will be labelled as 'Unknown'.\n \\n'''\n ))\n\n\n # OPTIONAL: Preferred transcripts strictness\n parser.add_argument(\n '-T', '--transcript_strictness', action='store', default='low', \n help=textwrap.dedent(\n '''\n Strictness of matching while annotating preferred transcripts.\n Default setting is low.\n\n Options: \n\n high - Transcripts must be an exact match. \n e.g. NM_001007553.2 and NM_001007553.1 won't match,\n NM_001007553.1 and NM_001007553.1 will.\n\n low - Transcripts will match regardless of the version number. The \n version number is after the . at the end of a transcript \n e.g. NM_001007553.2 and NM_001007553.1 will match.\n \\n'''\n ))\n\n\n # OPTIONAL: either a single BED file or a folder containing BED \n # files, only one of these can be used\n bed_files = parser.add_mutually_exclusive_group()\n\n # Single BED file\n bed_files.add_argument(\n '-b', '--bed', action='store', \n help=textwrap.dedent(\n '''\n Filepath to a single BED file. \n\n The BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. This report will be saved in \n the same output folder as the original variant report, with the BED \n file name added to it.\n Cannot be used together with -B flag.\n \\n'''\n ))\n\n # Multiple BED files\n bed_files.add_argument(\n '-B', '--bed_folder', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder containing BED files. \n\n Each BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. These reports will be saved in\n a new folder within the output folder, named the same as the input BED\n folder. \n The file names will be the same as the original variant report, with \n the BED file name added to them.\n Cannot be used together with -b flag.\n \\n'''\n ))\n\n\n # OPTIONAL: File containing known variants\n parser.add_argument(\n '-k', '--known_variants', action='store', \n help=textwrap.dedent(\n '''\n Filepath to known variants file. \n\n This is a VCF file containing any known variants and an associated \n classification. The classification will be added to the variant \n report. The VCF must have an annotation named 'Classification' within \n the INFO field for each variant.\n\n Key:\n 0 - Artifact\n 1 - Benign\n 2 - Likely benign\n 3 - VUS\n 4 - Likely pathogenic\n 5 - Pathogenic\n \\n'''\n ))\n\n\n # OPTIONAL: File containing the headers for the report\n parser.add_argument(\n '-c', '--config', action='store', \n help=textwrap.dedent(\n '''\n Filepath to config file. \n\n This is a tab seperated text file containing a number of rows, where \n each row specifies an annotation to be included in the variant report.\n Only annotations included in the config file will be included in the\n variant report.\n The columns in the variant report will be in the same order as the \n order in which the annotations appear in the config file.\n\n Each row contains:\n\n Column 1 - Required. Annotation headers, these must match up with how\n they appear in the VCF (case sensitive).\n\n Column 2 - Required. Location where to find the data within the VCF, \n used to select the correct parsing function.\n options: info, format, vep, filter or pref.\n\n Column 3 - Optional. Alternative name for column header.\n\n To make a config file with all available options from a VCF, run:\n vcf_parse -l path_to_input_vcf > config.txt\n \\n'''\n ))\n\n\n # OPTIONAL: Lists all headers in a vcf then exits\n parser.add_argument(\n '-l', '--config_list', action='store_true', \n help=textwrap.dedent(\n '''\n Return a list of all availabile config to the screen, then exit.\n See CONFIG section for usage.\n \\n'''\n ))\n\n\n # OPTIONAL: Filter out any variants where FILTER column is not PASS\n parser.add_argument(\n '-F', '--filter_non_pass', action='store_true', \n help=textwrap.dedent(\n '''\n Filters out any variants where the FILTER annotation is not \n PASS. If missing then there will be no fitering based on the\n FILTER annotation.\n \\n'''\n ))\n\n return parser.parse_args()", "def test_cli_boolean_args(\n config,\n):\n args = CLI.parse_args([\"--version\"])\n assert args.version is True\n\n args = CLI.parse_args([\"--test\"])\n assert args.test is True\n\n args = CLI.parse_args([\"--print-config-file\"])\n assert args.print_config_file is True\n\n args = CLI.parse_args([\"-T\"])\n assert args.check_login is True", "def getOptions():\n\tdescription=\"\"\"This script takes an input fasta file of fusions and identifies all of the identical fusions.\"\"\"\n\tparser = argparse.ArgumentParser(description=description)\n\tparser.add_argument(\"-bowtie\", \"--bowtie_log_names\", dest=\"bowtie\", action='store', required=False, nargs = '*', help=\"bowtie log file names [Optional]\")\n\tparser.add_argument(\"-last\", \"--last_log_names\", dest=\"last\", action='store', required=False, help=\"LAST log file names [Optional]\")\n\tparser.add_argument(\"-treatment\",\"--treatment_name\",dest=\"treatment\",action='store',required=True,nargs= '*', help=\"Treatment variables [Required]\")\n\tparser.add_argument(\"-o\",\"--output_file\",dest=\"output\",action='store',required=True,help=\"Output file name [Required]\")\n\targs = parser.parse_args()\n\tif not args.bowtie and not args.last: #The user should give at least one bowtie or last log argument; otherwise the program does nothing\n\t parser.error('No input logs given; add -bowtie or -last')\n\treturn(args)", "def test_vargs(self):", "def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args", "def args(self) -> Optional[str]:\n return pulumi.get(self, \"args\")", "def test_cli_plus_defaults(mock_zip_file):\n\n option_subset = {'zip_path': str(mock_zip_file)}\n result = Packager.from_cli(['-z', str(mock_zip_file)]).options\n assert_dict_contains_subset(option_subset, result)\n\n option_subset = {'fields': ['kDefinition']}\n result = Packager.from_cli(['-f', 'kDefinition']).options\n assert_dict_contains_subset(option_subset, result)\n\n option_subset = {'fields': ['kDefinition', 'kXerox']}\n result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options\n assert_dict_contains_subset(\n option_subset, result, msg=\"fields -f allows multiple fields.\"\n )\n\n option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}\n result = Packager.from_cli(\n ['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']\n ).options\n assert_dict_contains_subset(\n option_subset, result, msg=\"fields -f allows additional arguments.\"\n )\n\n result = Packager.from_cli(['--format', 'json']).options\n option_subset = {'format': 'json'}\n assert_dict_contains_subset(option_subset, result, msg=\"format argument works\")", "def get_pytest_arguments(self, config_section):\n pass", "def main():\n licensify(_parse_args())", "def test_cli_args():\n expected = dict(\n paths=[\"path1\", \"path2\"],\n exclude=[\"file*.py\", \"dir/\"],\n ignore_decorators=[\"deco1\", \"deco2\"],\n ignore_names=[\"name1\", \"name2\"],\n make_whitelist=True,\n min_confidence=10,\n sort_by_size=True,\n verbose=True,\n )\n result = _parse_args(\n [\n \"--exclude=file*.py,dir/\",\n \"--ignore-decorators=deco1,deco2\",\n \"--ignore-names=name1,name2\",\n \"--make-whitelist\",\n \"--min-confidence=10\",\n \"--sort-by-size\",\n \"--verbose\",\n \"path1\",\n \"path2\",\n ]\n )\n assert isinstance(result, dict)\n assert result == expected", "def universal_args(self):\n args = list(self.BASIC_ARGS)\n # Set ATF to be the bios\n args += [\"-bios\", \"%s/bl1.bin\" % self.config.atf]\n\n if self.config.linux:\n args += [\n \"-kernel\",\n \"%s/arch/arm64/boot/Image\" % self.config.linux\n ]\n args += [\"-append\", self.LINUX_ARGS]\n\n if self.config.android:\n args += self.android_drives_args()\n\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def test_component_specifications_init_arg_list(self):\r\n\t\tself.assertTrue(\"{$AddWordTaskRepeat}\" in self._configuration_[\"AddWordDefinitionTask\"].init_args() )", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def cmd_list(args):", "def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)", "def install_args(f):\n args = [\n argh.arg('--clean-db', help=CLEAN_DB_HELP_MSG),\n argh.arg('--private-ip', help=PRIVATE_IP_HELP_MSG),\n argh.arg('--public-ip', help=PUBLIC_IP_HELP_MSG),\n argh.arg('-a', '--admin-password', help=ADMIN_PASSWORD_HELP_MSG)\n ]\n for arg in args:\n f = arg(f)\n return f", "def install_args(f):\n args = [\n argh.arg('--clean-db', help=CLEAN_DB_HELP_MSG),\n argh.arg('--private-ip', help=PRIVATE_IP_HELP_MSG),\n argh.arg('--public-ip', help=PUBLIC_IP_HELP_MSG),\n argh.arg('-a', '--admin-password', help=ADMIN_PASSWORD_HELP_MSG),\n ]\n for arg in args:\n f = arg(f)\n return f", "def add_args(self, parser):", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def _build_command_list(self, arguments=None, debug=False):\n \n if debug:\n command_name = arguments[\"script_name\"]\n else:\n command_name = os.path.splitext(arguments[\"script_name\"])[0]\n \n command_list = [command_name]\n #del arguments[\"script_name\"]\n #del arguments[\"optional_arguments\"]\n\n for k in arguments:\n if k == \"script_name\": continue\n if type(arguments[k]) == type(list()):\n for n in arguments[k]:\n command_list.append(\"--{0}\".format(k))\n command_list.append(\"{0}\".format(n))\n else: \n command_list.append(\"--{0}\".format(k))\n command_list.append(\"{0}\".format(arguments[k]))\n \n return command_list", "def args_template():\n def required_arg_template_pat():\n return ( \n (c.paren(var_or_atomics() + opt_colon_sort_meta())) |\n var_or_atomic()\n )\n return (brace_noassign().possibly() + required_arg_template_pat().many())", "def retrieve_options(env):\n\n options = []\n if env.core != -1:\n options.extend([\"--core {}\".format(env.core)])\n if env.mtor != 4:\n options.extend([\"--mtor {}\".format(env.mtor)])\n if env.n != 1000:\n options.extend([\"--n {}\".format(env.n)])\n if env.forcefield != \"OPLS2005\":\n options.extend([\"--force {}\".format(env.forcefield)])\n if env.mae_lig:\n options.extend([\"--mae_charges\"])\n if env.gridres != 10:\n options.extend([\"--gridres {}\".format(env.gridres)])\n return \" \".join(options)" ]
[ "0.6652257", "0.6436489", "0.64220834", "0.624852", "0.601735", "0.59371823", "0.5928165", "0.5906079", "0.59029114", "0.5887085", "0.58200306", "0.58132803", "0.57993174", "0.5784199", "0.57690525", "0.5752837", "0.573312", "0.57010204", "0.56947035", "0.5688802", "0.5674345", "0.56628543", "0.5662229", "0.5662091", "0.5643051", "0.5636418", "0.5619789", "0.5614484", "0.5605948", "0.56015736", "0.5597486", "0.5596032", "0.5593574", "0.55883497", "0.55871576", "0.55842566", "0.5581342", "0.5573577", "0.5571877", "0.55584383", "0.5558293", "0.5555279", "0.5544699", "0.5534767", "0.5534656", "0.5531669", "0.5527497", "0.5507242", "0.55071515", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5504999", "0.5499304", "0.549633", "0.5496125", "0.54942316", "0.54911876", "0.5478482", "0.5473749", "0.54701793", "0.54642147", "0.5462302", "0.5462107", "0.54565144", "0.54557616", "0.5451032", "0.54442763", "0.54406744", "0.5439474", "0.5435654", "0.5426505", "0.5422888", "0.5415494", "0.54143447", "0.5412361", "0.54113835", "0.54094774", "0.53986037", "0.53893286", "0.5379108", "0.5377055", "0.537004", "0.53667593" ]
0.74655694
0
serialize the instance info compatible with testing.js
def get_structure(self): instances = [] urls = [] leader_name = "" if self.is_leader: leader_name = self.name for arangod in self.all_instances: struct = arangod.get_structure() struct["JWT_header"] = self.get_jwt_header() urls.append(struct["url"]) instances.append(struct) return { "protocol": self.get_http_protocol(), "options": "", "addArgs": "", "rootDir": str(self.basedir), "leader": leader_name, "agencyConfig": "", "httpAuthOptions": "", "urls": str(urls), "arangods": instances, "JWT_header": self.get_jwt_header(), # 'url': self.url, # 'endpoints': self.endpoints, # 'endpoint': self.endpoint, # 'restKeyFile': self.restKeyFile, # 'tcpdump': self.tcpdump, # 'cleanup': self.cleanup }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self):\n cls = self.__class__\n return {\n \"spawn_prob\": self.spawn_prob,\n \"agent_locs\": self.agent_locs.copy(),\n \"agent_names\": self.agent_names.copy(),\n \"board\": self.board.copy(),\n \"class\": \"%s.%s\" % (cls.__module__, cls.__name__),\n }", "def serialize(self):", "def serialize(self):\n pass", "def _serialise(self):\n # TODO (M Foley)\n pass", "def serialize(self):\n return {\n\n\n }", "def _serialize(self, instance, owner):\n val = instance.__dict__[self._name]\n if val is None: return None\n return str(val)", "def serialize(self):\n\t\treturn {\n\t\t\t'name' : self.name,\n\t\t\t'id' : self.id,\n\t\t\t'description' : self.description,\n\t\t\t'kind_of_thing' : self.kind_of_thing,\n\t\t}", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'description' : self.description,\n 'is_private' : self.is_private,\n }", "def dump():\n\t\treturn self.__dict__;", "def serialize(self):\n return {\n 'id': self.id,\n 'internal_name': self.internal_name,\n 'external_ip': self.external_ip,\n 'external_port': self.external_port,\n 'description': self.description\n }", "def dumps(self):\n pass", "def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval", "def serialize(self):\n return {\n 'uuid': self.uuid,\n 'image': self.image,\n 'name': self.name,\n 'description': self.description,\n 'price': self.price,\n 'available': self.available,\n 'type_': self.type_.serialize,\n 'salesman': self.get_user,\n 'game': self.game,\n 'server': self.server,\n \"created_at\": self.created_at,\n \"updated_at\": self.updated_at\n }", "def __serialize__(self):\n return {\"_custom_type\" : self.__class__.__name__,\n \"name\" : self.name,\n \"src\" : self.src,\n \"exec_loc\" : self.exec_loc,\n \"precompiled\" : self.precompiled}", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def serialize(self, obj):\n pass", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def serialize_instance(obj):\n d = {'__classname__': type(obj).__name__}\n d.update(vars(obj))\n return d", "def serialize(self):\n\t\treturn {\n\t\t\t'name' : self.name,\n\t\t\t'id' : self.id,\n\t\t}", "def serialize(self): \n \n ret = {}\n\n for (name, field) in inst._fields:\n ret[name] = field.serialze(self, type(self))\n\n return ret", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def dumps(self) -> str:\n ...", "def serialize(self) -> str:\n pass", "def serialize(self):\n return {\n 'name' : self.name,\n 'description' : self.description,\n 'id' : self.id,\n 'picture' : self.picture,\n 'catalog_id' : self.catalog_id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str,\n }", "def to_json(self) -> str:\n if self.actual_instance is None:\n return \"null\"\n\n to_json = getattr(self.actual_instance, \"to_json\", None)\n if callable(to_json):\n return self.actual_instance.to_json()\n else:\n return json.dumps(self.actual_instance)", "def serialize(self):\n return{\n 'name':self.name,\n 'id' :self.id,\n }", "def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}", "def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def serialize(self):\n raise NotImplementedError(\"Abstract class, implemented in sub class\")", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n }", "def serialize(self):\n\t\treturn { 'description': self.description, 'zone' : self.zone, 'enabled' : self.enabled, 'dayTimeOnly' : self.day_time_only, 'authorizeOnly' : self.authorize_only, 'secret' : self.secret, 'deviceType' : self.deviceType, 'specific_configuration': self.specific_configuration}", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'url': self.url,\n 'created': self.created,\n 'update': self.update,\n 'active': self.active,\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str, \n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'phone' : self.phone,\n 'email' : self.email,\n 'address' : self.address,\n 'picture' : self.picture,\n }", "def serialize(self) -> str:\n return json.dumps(self.__dict__)", "def serialize(self):\n return{\n 'name': self.name,\n 'id': self.id,\n }", "def dump(self) -> dict[Any, str]:\r\n ...", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n 'date_added': self.date_added,\n }", "def dump(self) -> None:\n ...", "def serialize(self):\n return {\n 'lapyname': self.lapyname,\n 'speciality': self.speciality,\n 'ram': self.ram,\n 'storage': self.storage,\n 'warrenty': self.warrenty,\n 'price': self.price,\n 'rating': self.rating,\n 'date': self.date,\n 'id': self. id\n }", "def dump(self):\n return", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n }", "def info(self) -> dict:", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n }", "def dumps(self):\n return dumps(self)", "def serialize(self):\n return {\n 'name' :self.name,\n 'points' :self.pts,\n 'id' :self.id,\n 'league_id':self.league_id,\n 'userID':self.user_id\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name\n }", "def test_repr(self):\n\n application = \"Application(name='nginx', version='latest')\"\n services = \"{'www': {'state': 'starting'}}\"\n instance = \"Instance(name='nginx', current_state='starting', \" + \\\n \"desired_state='running', application=\" + \\\n application + \", services=\" + services + \", \" + \\\n \"parameters={'SETTING': 'value'}, \" + \\\n \"options={'storageBucket': 'custom'})\"\n self.assertEqual(repr(self.instance), instance)", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n 'room' : self.room.name,\n 'description' : self.description,\n 'price' : self.price,\n }", "def persistence_serialize(self):\n raise NotImplementedError", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'email' : self.email,\n 'picture' : self.picture,\n }", "def __str__(self):\n return str(self.serialize())", "def serialize(self):\n return {\n 'name' : self.name,\n 'email' : self.email,\n 'picture' : self.picture\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'userID': self.userID,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date_time' : str(self.date_time),\n 'duration' : self.duration,\n 'highlights' : self.highlights,\n 'conference_id' : self.conference_id,\n 'type_of_session_id' : self.type_of_session_id,\n 'speaker_id' : self.speaker_id,\n 'location_id' : self.location_id,\n 'documents' : self.documents \n }", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'name': self.name,\n\t\t\t'user': self.user_id\n\t\t}", "def serialize(self):\n return {'id':self.id,\n 'flavor':self.flavor,\n 'size':self.size,\n 'rating':self.rating,\n 'image':self.image}", "def serial(self) -> dict:\n return self.__dict__", "def serialize(self):\n\n\t\treturn str(self)", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'distance': self.distance,\n 'units': self.units,\n 'terrain': self.terrain\n }", "def cls2json(self):\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def serialize_model_instance(instance):\n ref = [instance.__class__.__module__, instance.__class__.__name__, instance.pk]\n dumped_ref = json.dumps(ref).encode('utf-8')\n return b64encode(dumped_ref).decode('utf-8')", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'category': self.category\n }", "def dump(self):\n return {\"data\": self.data, \"encoding\": self.encoding,\n \"type\": self.type_name}", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'catalog': self.catalog.serialize,\n }", "def serialize(self):\n raise Exception(\"Unimplemented!\")", "def get_json_serializable_info(self):\n return {\n 'covariance_type': self.covariance_type,\n 'hyperparameters': self.hyperparameters.tolist(),\n }", "def dumps(self) -> Dict[str, Any]:\n contents = super().dumps()\n contents[\"name\"] = self.name\n return contents", "def serialize(self, obj):\n return obj", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date' : str(self.date),\n 'owner_id' : self.owner_id,\n }", "def serialize(self, data):", "def serialize(self):\n\t\treturn {\n\t\t\t'name' : self.name,\n\t\t\t'id' : self.id,\n\t\t\t'picture' : self.picture,\n\t\t\t'email' : self.email,\n\t\t}", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'address': self.address,\n 'tel': self.tel\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'email' : self.email,\n 'rfidno' : self.rfidno,\n 'pin' : self.pin,\n 'rollno' : self.rollno,\n 'userLevel' : self.userLevel,\n \n }", "def serialize(self):\n # type: () -> Dict[str, Any]\n return {\n 'id': self.id,\n 'creator_id': self._creator_id,\n 'name': self._name,\n 'description': self._description,\n 'member_ids': self._member_ids,\n 'admin_ids': self._admin_ids\n }", "def serialize(self):\n return {\n 'sid' : self.sid,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n }", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'session_id' : self.session_id,\n 'filename' : self.filename,\n 'filetype' : self.filetype\n }", "def get_serialized_info(self):\n info = {\"table_id\": self.table_id,\n \"instance_id\": self.instance_id,\n \"project_id\": self.project_id}\n\n try:\n info[\"credentials\"] = self.client.credentials\n except:\n info[\"credentials\"] = self.client._credentials\n\n return info" ]
[ "0.7045984", "0.70322937", "0.7007658", "0.6789255", "0.6712205", "0.66500646", "0.66200024", "0.65715665", "0.6565833", "0.6550571", "0.6525462", "0.6522684", "0.6512101", "0.64929324", "0.6474648", "0.6463481", "0.64474213", "0.6426796", "0.64142424", "0.6408753", "0.6408586", "0.6408586", "0.6394255", "0.63911843", "0.6363417", "0.63549703", "0.6332607", "0.6325021", "0.6325021", "0.63211596", "0.6318784", "0.6310379", "0.6302783", "0.62953573", "0.6291017", "0.6291017", "0.6291017", "0.6291017", "0.6291017", "0.6291017", "0.6291017", "0.6277106", "0.6277106", "0.6277106", "0.6277106", "0.6267193", "0.6263391", "0.62585866", "0.6246988", "0.62463605", "0.62459534", "0.6245168", "0.6236078", "0.62302184", "0.62291116", "0.6228418", "0.622288", "0.622288", "0.622288", "0.6215936", "0.6207816", "0.6207339", "0.6207339", "0.6207339", "0.6200906", "0.6197203", "0.619561", "0.6189418", "0.61892986", "0.6186667", "0.61667544", "0.6164743", "0.61623454", "0.6156797", "0.61545455", "0.61541253", "0.61490107", "0.6136603", "0.6132772", "0.61101395", "0.6109619", "0.6109619", "0.610441", "0.61026955", "0.6097473", "0.6088838", "0.6085823", "0.6084478", "0.60831296", "0.6054976", "0.60479856", "0.60428405", "0.6041742", "0.60342264", "0.60335964", "0.60284305", "0.60224056", "0.6019321", "0.60170937", "0.6016991", "0.6016872" ]
0.0
-1
name of this starter
def name(self): return str(self.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name():\n pass", "def name():\n pass", "def name() -> str:\n pass", "def step_name(self):\n return \"main\"", "def get_name():\n return \"SVM Idea\"", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name(self):\n pass", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def testbed_name(self): \n return \"C-Lab\"", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:\n return 'dayily'", "def get_name(self):\n return \"make\"", "def name(self):\r\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def get_name():\n return __name__", "def name(self):\n ...", "def get_name(self):\n return \"catkin\"", "def name(self):\n return self._path or '__main__'", "def get_name():", "def package_name(self):", "def name():\n\n pass", "def get_name(self):\n # <<-- Creer-Merge: get-name -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n return \"Checkers Python Player\" # REPLACE THIS WITH YOUR TEAM NAME\n # <<-- /Creer-Merge: get-name -->>", "def get_name() -> str:\n pass", "def name(self) -> str: # pragma: no cover", "def get_name():\n return \"SVMd+ - simplified approach\"", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def name(self):\n return \"SMACK\"", "def resource_name(self):\n return \"suite\"", "def get_name():\n return config.APP_NAME", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def name(self):\r\n return None", "def name(self):\n return None", "def get_name(self):\n pass", "def get_name(self):\n pass", "def showName(self):\n print('your name is ', self.name)\n print('my name is ', self.__class__.__name__)", "def project_name(self):\n pass", "def getName(self):\n \n return \"Antiyoy AI\"", "def name(self):\r\n return self.setuptools_requirement.project_name", "def name(self):\n return self.config[\"name\"]", "def get_name() -> str:", "def get_name(self) -> str:\n pass", "def _getName(self):\n return 'HERE'", "def get_name(self):\n\n return \"Sawyer McLane\"", "def app_title():\n print(\"*\" * 27)\n print(\" Stock App\")\n print(\"*\" * 27)", "def name(self) -> str:\n\t\traise NotImplementedError", "def marketing_name(self):\n return \"Custom solution - 2\"", "def plugin_name(self):", "def start_name(self, attributes):\n self.name = True", "def get_name():\n return \"Boss\"", "def name(cls):\n return None", "def getProjectName():", "def scene_name():\n\n pass", "def name(self):\n\t\treturn self.args[0]", "def name():\n raise NotImplementedError", "def name():\n raise NotImplementedError", "def name_python_package_tests(self) -> str:\n return f'test_{self.name}'", "def name(self):\n return self.heater.name", "def greeting(self):\n print(\"Hello! My name is {name}.\".format(name=self.name))", "def greeting(self):\n print(\"Hello! My name is {name}.\".format(name=self.name))", "def nice_name():\n\n pass", "def get_name(self):", "def get_name(self):", "def getName(self):\n return \"\"", "def get_name(self):\n return", "def get_name():\n return \"SVMd+\"", "def get_name(self):\n return None", "def name() -> str:\n return \"test-helper-nuke\"", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def name ( self ) :\n return self.__name if self.__name else ''", "def name(self):\n raise NotImplementedError # pragma: no cover", "def get_name(cls):\n pass", "def show_name(self):\n return self.name", "def get_name(self):\n\t\treturn self.__name", "def name(self) -> str:\n return self.dev.label", "def get_name(self): #Doctests, pour tester directement les méthodes\n return self.__name", "def name(self) -> Optional[str]:\n ...", "def autoname(self):\n raise NotImplementedError()", "def get_name(self) -> str:\n return self.__name", "def task_name(self):\n pass", "def display_name(self):", "def name(self) -> str:\n raise NotImplementedError" ]
[ "0.715454", "0.715454", "0.6939584", "0.686997", "0.68043464", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6678551", "0.6673218", "0.6657183", "0.6657183", "0.6643718", "0.66373086", "0.66373086", "0.66373086", "0.66373086", "0.66373086", "0.66209406", "0.6614771", "0.65994585", "0.6586325", "0.6586325", "0.6586325", "0.6586325", "0.65363836", "0.65166533", "0.6514398", "0.65127283", "0.6506711", "0.65028715", "0.6490374", "0.6472479", "0.6466549", "0.645944", "0.6432107", "0.6414771", "0.6414771", "0.6414771", "0.6414771", "0.63153744", "0.6306052", "0.629035", "0.6251131", "0.6251131", "0.6251131", "0.6251131", "0.6244857", "0.62430495", "0.62347513", "0.62347513", "0.6226816", "0.62182987", "0.621334", "0.62026435", "0.61891806", "0.6184882", "0.6177044", "0.61688733", "0.615035", "0.6145971", "0.6138879", "0.6137231", "0.6136583", "0.61227196", "0.6121335", "0.61052024", "0.6098482", "0.6089656", "0.6086539", "0.60818267", "0.60818267", "0.60811377", "0.6076512", "0.6072344", "0.6072344", "0.6068726", "0.6068271", "0.6068271", "0.6059496", "0.6052978", "0.6047104", "0.6042844", "0.60414684", "0.6037996", "0.6037219", "0.6029197", "0.6026383", "0.6020362", "0.6014193", "0.601361", "0.6010932", "0.60050607", "0.6000884", "0.59992963", "0.59937465", "0.59884465", "0.59820384" ]
0.0
-1
get the frontend URLs of this starter instance
def get_frontends(self): ret = [] for i in self.all_instances: if i.is_frontend(): ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urls(self):\n return lambda : self.config.urls(active_only=True)", "def getURLs():", "def get_urls():\r\n return []", "def frontend_endpoint_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"frontend_endpoint_ids\")", "def urls(self) -> list[str]:\r\n ...", "def urls(self):\n return self._list_urls()", "def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_flask_endpoints(self):\n urls = self.endpoints.keys()\n return urls", "def endpoints(self):\n return self.settings[\"endpoints\"]", "def get_urls(self):\n return patterns('')", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def frontend_endpoint_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"frontend_endpoint_ids\")", "def external_server_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KlusterletSpecExternalServerURLsArgs']]]]:\n return pulumi.get(self, \"external_server_urls\")", "def get_urls(self, **kwargs):\n pass # pragma: no cover", "def get_layer_urls(self):\n urls = []\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls", "def get_urls(self):\n urls = super(EasyPublisher, self).get_urls()\n admin_site = self.admin_site\n opts = self.model._meta\n info = opts.app_label, opts.module_name,\n \n easy_publisher_urls = patterns(\"\",\n url(r\"^([^/]+)/drafts/$\", admin_site.admin_view(self.drafts_view), name='%s_%s_draftlist' % info),\n url(r\"^([^/]+)/drafts/([^/]+)/\", admin_site.admin_view(self.publish_view), name='%s_%s_draft' % info),\n url(r\"^(.+)/current/$\", admin_site.admin_view(self.change_view), {'extra_context':{'current':True}}, name='%s_%s_current' % info)\n )\n\n return easy_publisher_urls + urls", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def urls(self) -> str:\n return self._data['urls']", "def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)", "def get_xmodule_urls():\r\n if settings.DEBUG:\r\n paths = [path.replace(\".coffee\", \".js\") for path in\r\n settings.PIPELINE_JS['module-js']['source_filenames']]\r\n else:\r\n paths = [settings.PIPELINE_JS['module-js']['output_filename']]\r\n return [staticfiles_storage.url(path) for path in paths]", "def start_urls(self):\n if self.agency_doc_id or self.dept_doc_id:\n agency_doc_id = self.agency_doc_id\n if isinstance(agency_doc_id, list):\n agency_doc_id = agency_doc_id[0]\n return [\n \"https://detroitmi.gov/documents?{}={}&{}={}\".format(\n self.doc_query_param_dept,\n self.dept_doc_id,\n self.doc_query_param,\n agency_doc_id or \"\",\n )\n ]\n else:\n return [self.get_event_start_url()]", "def __get_urls(self):\n self.__valid_servers = {\n \"qa\": {\n \"server_url\": \"https://qa.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://qa.api.deepaffex.ai:9080\"\n },\n \"dev\": {\n \"server_url\": \"https://dev.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://dev.api.deepaffex.ai:9080\"\n },\n \"demo\": {\n \"server_url\": \"https://demo.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.ai:9080\"\n },\n \"prod\": {\n \"server_url\": \"https://api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://api.deepaffex.ai:9080\"\n },\n \"prod-cn\": {\n \"server_url\": \"https://api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://api.deepaffex.cn:9080\"\n },\n \"demo-cn\": {\n \"server_url\": \"https://demo.api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.cn:9080\"\n }\n }\n try:\n self.server_url = self.__valid_servers[self.server][\"server_url\"]\n self.websocket_url = self.__valid_servers[self.server][\"websocket_url\"]\n except KeyError:\n raise KeyError(\"Invalid server ID given\")", "def frontend_endpoint_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"frontend_endpoint_ids\")", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def get_static_urls(self):\n \n \n url_strings = dict()\n \n \n for platform in constants.PLATFORMS:\n if self.installer_exists(platform):\n download_url = (settings.CUSTOM_INSTALLER_URL.rstrip('/') + '/' +\n self.build_id + '/' + constants.PLATFORM_BUNDLES[platform])\n else:\n download_url = None\n \n url_strings[platform] = download_url\n \n \n return url_strings", "def get_absolute_url(self):\n return get_front_end_url(self)", "def get_urls(self):\n \n url_strings = dict()\n \n \n for platform in constants.PLATFORMS:\n download_path = reverse('download-installer', kwargs={\n 'build_id': self.build_id,\n 'platform': platform,\n })\n \n url_strings[platform] = settings.BASE_URL.rstrip('/') + download_path\n \n \n return url_strings", "def get_urls(self):\n info = self.model._meta.app_label, self.model._meta.model_name\n\n return super().get_urls() + [\n path(\"ajax\", self.callback, name=\"%s_%s_ajax\" % info),\n path(\"layer\", self.get_layer, name=\"%s_%s_layer\" % info)\n ]", "def get_url(self) -> List[str]:\n try:\n return self[\"url\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon url in system marathon config\"\n )", "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def get_observing_sites(self):\n pass", "def url_bases(self) -> List[str]:\n return self._url_module.url_bases", "def url_assets(self):\n return self.assets(asset_type='URL')", "def endpoints(self):\n return self[\"endpoints\"]", "def get_view_endpoints(self):\n return []", "def source_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_urls\")", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def url(self):\n return app.settings.cherrypy.url()", "def urls(self):\n base_url = r'^{}/'.format(self.label)\n return SiteModuleURLResolver(base_url, self.get_urls(), module=self, app_name=self.label, namespace=self.label)", "def sqs_urls(self) -> Sequence[str]:\n return pulumi.get(self, \"sqs_urls\")", "def get_urls(self):\r\n urls = super(ServeeAdminSite, self).get_urls()\r\n from django.conf.urls import patterns, url, include\r\n\r\n # Custom Views\r\n for path, view, name in self.custom_views:\r\n urls += patterns('',\r\n url(r'^%s$' % path, self.admin_view(view)),\r\n )\r\n\r\n # Inserts\r\n for insert_model_lookup, insert in self.insert_classes.iteritems():\r\n urls += patterns(\"\",\r\n (r\"^insert/%s/%s/\" % (insert.model._meta.app_label, insert.model._meta.module_name), include(insert.urls))\r\n )\r\n return urls", "def url(self) -> str:\n if \"main\" not in self._resources:\n self._initialize()\n return self._resources[\"main\"].url", "def get_resource_urls():\n base_url = 'http://developer.pardot.com/'\n pattern = re.compile(\n r'(?ims)\\<a [^>]*?href=\"(kb/api-version-3/[^>]*?/)\"[^>]*?\\>'\n r'[^<]*?\\</a\\>')\n response = requests.get(base_url)\n return [\n '%s/%s' % (base_url, url) for url in pattern.findall(response.text)]", "def app_url(self):\n return self.request.host_url", "def all_urls(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/allUrls/')))", "async def _landing_url(self, responses: SourceResponses) -> URL:\n return URL(f\"{await self._api_url()}/CxWebClient\")", "def get_urls(self) -> Dict[str, str]:\n return {}", "def list_urls(self, prefix: str = \"\", etl_name: str = None) -> Iterable[str]:", "def base_urls(self):\n # Due to the way Django parses URLs, ``get_multiple`` won't work without\n # a trailing slash.\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<slug_list>[\\w\\d_-]+)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<slug>[\\w\\d_-]+)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def generate_urls(self):\n # type: (Generator) -> str\n relative_urls = [path.replace(self.parser.base_path + \"/\", \"\")\n for path in self.parser.paths]\n entries = {\n fixup_parameters(relative_url, self.backend): path_to_class_name(relative_url)\n for relative_url in relative_urls\n }\n return render_to_string(\n self.backend, \"urls.py\", {\n \"entries\": entries,\n \"module\": self.module_name\n })", "def _get_urls(self, pkg):\n raise NotImplementedError", "def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")", "def inject_urls():\n return dict(company_name=config.company_name)", "def _get_resources():\n return {\n 'searchPageUrl': flask.url_for('search_page'),\n 'searchJsonUrl': flask.url_for('search'),\n 'userAddIconUrl': flask.url_for('static', filename='img/add-users.svg'),\n 'logoutUrl': flask.url_for('logout'),\n 'settingsUrl': flask.url_for('setup') + '#settingsDisplayTemplate',\n 'listAdminUrl': flask.url_for('admin_list'),\n 'addAdminUrl': flask.url_for('add_admin'),\n 'changeAdminPasswordUrl': flask.url_for('change_admin_password'),\n 'removeAdminUrl': flask.url_for('delete_admin'),\n 'loginUrl': flask.url_for('login'),\n 'recaptchaKey': ufo.app.config.get('RECAPTCHA_SITE_KEY', ''),\n 'setupUrl': flask.url_for('setup'),\n 'setupAdminUrl': flask.url_for('setup_admin'),\n 'setupOauthUrl': flask.url_for('setup_oauth'),\n 'download_chrome_policy': flask.url_for('download_chrome_policy'),\n 'policy_filename': 'chrome_policy.json',\n 'proxyServerAddUrl': flask.url_for('proxyserver_add'),\n 'proxyServerAddIconUrl': flask.url_for('static',\n filename='img/add-servers.svg'),\n 'proxyServerInverseAddIconUrl': flask.url_for(\n 'static', filename='img/add-servers-inverse.svg'),\n 'proxyServerListId': 'proxyList',\n 'proxyServerListUrl': flask.url_for('proxyserver_list'),\n 'listLimit': 10,\n 'proxyServerDetailsButtonId': 'serverDetailsButton',\n 'editButtonId': 'serverEditButton',\n 'proxyServerDetailsOverlayId': 'serverDetailsOverlay',\n 'proxyServerEditUrl': flask.url_for('proxyserver_edit'),\n 'proxyServerDeleteUrl': flask.url_for('proxyserver_delete'),\n 'proxyServerIconUrl': flask.url_for('static', filename='img/server.svg'),\n 'proxyServerAddButtonId': 'addServerButton',\n 'proxyServerModalId': 'serverModal',\n 'textAreaMaxRows': 10,\n 'ipInput': 'ipInput',\n 'nameInput': 'nameInput',\n 'sshPrivateKeyInput': 'sshPrivateKeyInput',\n 'hostPublicKeyInput': 'hostPublicKeyInput',\n 'getSettingsUrl': flask.url_for('get_settings'),\n 'settingsEditUrl': flask.url_for('edit_settings'),\n 'userAddUrl': flask.url_for('add_user'),\n 'userInverseAddIconUrl': flask.url_for(\n 'static', filename='img/add-users-inverse.svg'),\n 'userListId': 'userList',\n 'userListUrl': flask.url_for('user_list'),\n 'revokeToggleUrl': flask.url_for('user_toggle_revoked'),\n 'rotateKeysUrl': flask.url_for('user_get_new_key_pair'),\n 'inviteCodeUrl': flask.url_for('user_get_invite_code'),\n 'userDeleteUrl': flask.url_for('delete_user'),\n 'userDetailsButtonId': 'userDetailsButton',\n 'userDetailsOverlayId': 'userDetailsOverlay',\n 'userIconUrl': flask.url_for('static', filename='img/user.svg'),\n 'userAddButtonId': 'addUserButton',\n 'userModalId': 'userModal',\n 'groupAddTabId': 'groupAddTab',\n 'groupAddFormId': 'groupAdd',\n 'groupAddInputName': 'group_key',\n 'userAddTabId': 'userAddTab',\n 'userAddFormId': 'userAdd',\n 'userAddInputName': 'user_key',\n 'domainAddTabId': 'domainAddTab',\n 'domainAddFormId': 'domainAdd',\n 'manualAddTabId': 'manualAddTab',\n 'manualAddFormId': 'manualAdd',\n 'regexes': regex.REGEXES_AND_ERRORS_DICTIONARY,\n 'jsonPrefix': ufo.XSSI_PREFIX,\n 'maxFailedLoginsBeforeRecaptcha': ufo.MAX_FAILED_LOGINS_BEFORE_RECAPTCHA,\n 'userAddListFlipperId': 'userAddListFlipper',\n 'proxyServerAddListFlipperId': 'proxyServerAddListFlipper',\n 'userAddTabsId': 'userAddTabs',\n 'proxyServerAddFormId': 'serverAddFormHolder',\n }", "def registered_urls(self):\n from pkg_resources import iter_entry_points\n\n entries = ['Priority', 'EP Name', 'Module', 'Class']\n for ep in iter_entry_points('appurl.urls'):\n c = ep.load()\n entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])\n\n return entries", "def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"application_urls\", language, fallback, version_id, force_reload)", "def get_url(self):\n return staticfiles_storage.url(self._name)", "def _GetServers(self) -> List[Dict[str, str]]:\n return [\n {\n \"url\": \"/\",\n \"description\": \"Root path of the GRR API\",\n },\n ]", "def get_product_urls(self, page):\n return self.__url_list(page)", "def FlavorUrls(options, versions, flavor):\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return [toolchainbinaries.EncodeToolchainUrl(\n options.base_once_url, i, 'new') for i in ids]\n else:\n return [toolchainbinaries.EncodeToolchainUrl(\n options.base_url, VersionSelect(versions, flavor), flavor)]", "def get_website_URLs():\n\tfilepath = os.path.dirname(os.path.realpath(__file__)) +\"/web_sources\"\n\tf = open(filepath, 'r')\n\twebsites = []\n\tfor line in f:\n\t\tif line != \"\\n\":\n\t\t\tendl_index = line.index('\\n')\n\t\t\tclean_line = line[:endl_index]\n\t\t\tnew_list = clean_line.split(' ', 1)\n\t\t\twebsites.append(new_list)\n\tf.close()\n\treturn websites", "def getBookmarkableURLs(self):\n return getattr(CONFIG, 'zmi_bookmarkable_urls', True)", "def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def base_urls(self):\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<pk_list>\\w[\\w;-]*)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<pk>\\w[\\w-]*)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def get_showreel_item_urls(self):\n\n links = []\n rel_path = \"../\"\n if self.display:\n rel_path = rel_path * 2\n for item in self.showreel_document[\"reels\"]:\n if item[\"item_type\"] == 'dashboard':\n link = \"../%sdisplay/dashboard/%s\" % (rel_path, item[\"title\"])\n links.append(json.dumps(link))\n elif item[\"item_type\"] == 'graph':\n link = \"../%sdisplay/graph/%s\" % (rel_path, item[\"title\"])\n links.append(json.dumps(link))\n\n return links", "def urls(self) -> Dict[str, str]:\n url_bases = self.url_bases\n unformatted_paths = self._url_module.url_paths\n\n urls = {}\n for url_base in url_bases:\n # The default URL_base will look like: http://service.[..].amazonaws.com/...\n # This extension ensures support for the China & ISO regions\n alt_dns_suffixes = {\"cn\": \"amazonaws.com.cn\"}\n if enable_iso_regions():\n alt_dns_suffixes.update(\n {\n \"iso\": \"c2s.ic.gov\",\n \"isob\": \"sc2s.sgov.gov\",\n \"isoe\": \"cloud.adc-e.uk\",\n \"isof\": \"csp.hci.ic.gov\",\n }\n )\n\n for url_path, handler in unformatted_paths.items():\n url = url_path.format(url_base)\n urls[url] = handler\n for dns_suffix in alt_dns_suffixes.values():\n alt_url_base = re.sub(r\"amazonaws\\\\?.com$\", dns_suffix, url_base)\n alt_url = url_path.format(alt_url_base)\n urls[alt_url] = handler\n\n return urls", "def getFrontend(self):\n return self.header['FRONTEND']", "def get_radiobrowser_base_urls():\n hosts = []\n # get all hosts from DNS\n ips = socket.getaddrinfo('all.api.radio-browser.info',\n 80, 0, 0, socket.IPPROTO_TCP)\n for ip_tupple in ips:\n ip = ip_tupple[4][0]\n\n # do a reverse lookup on every one of the ips to have a nice name for it\n host_addr = socket.gethostbyaddr(ip)\n # add the name to a list if not already in there\n if host_addr[0] not in hosts:\n hosts.append(host_addr[0])\n\n # sort list of names\n hosts.sort()\n # add \"https://\" in front to make it an url\n return list(map(lambda x: \"https://\" + x, hosts))", "def get_all_url(cls, service_subscription: \"ServiceSubscription\") -> str: # pylint: disable=arguments-differ\n return f\"{service_subscription.url}/service-instances/\"", "def getSiteExampleURLs(self):\r\n return 'no such example'", "def api_url(self):\n return self.get_api_url()", "def urls(self):\n days = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', ]\n url = 'http://www2.nngov.com/newport-news/offenses/%stxt.htm'\n return [url % day for day in days]", "def get_viewer_urls(self):\n return {name: viewer.top.url for name, viewer in self.viewers.items()}", "def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n url(\n r'^(?P<custom_field_id>.+)/formfield/$',\n CustomFieldFormfieldView.as_view(),\n name='customfield_formfield'\n ),\n ]\n return my_urls + urls", "def _sites(self):\n return self.properties.get('sites',\n SitePropertiesCollection(self.context, ResourcePath(\"sites\", self.resource_path)))", "def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True", "def getUrl(self):\n return self.url", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def url(request):\n return request.config.getoption(\"--url\")", "def get_urls(db):\n return db.meta.find_one({'name':\"urls\"})['urls']", "def get_url(self):\n return self.resource.url", "def __SetEndpoints(self,\n version):\n\n if version==2:\n endpoints = {\"heads\":'top-headlines?',\"search\":'everything?',\"source\":'sources?'}\n elif version==1:\n endpoints = {\"search\":'articles?',\"source\":'sources?'}\n\n return endpoints", "def get_urls(type, assets=\"default\"):\n return [\n default_asset_cache.get_url(asset)\n for asset in StaticAsset.load(type, assets)\n ]", "def urlpatterns(self):\n regex = r'^%s/' % self.label\n urls_module = '%s.urls' % self.name\n ns = self.label\n return [url(regex, include(urls_module, namespace=ns, app_name=ns))]", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def getListOfSites(self):\n with self.config.TaskWorker.envForCMSWEB:\n sites = self.resourceCatalog.getAllPSNs()\n filteredSites = [site for site in sites if not site.startswith(\"T1_\")]\n\n return filteredSites", "def get_homepage(resource):\n return resource.playlist.consumer_site.domain", "def get_shelflist_urls():\n def _get_shelflist_urls(records):\n locations = set([r['location_code'] for r in records])\n return { loc: ('{}locations/{}/shelflistitems/'.format(API_ROOT, loc)) \n for loc in locations }\n return _get_shelflist_urls", "def url(self):\n return self._client.url", "def url(self):\r\n return self.urlparts.geturl()", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def url(self):\n if not self._is_served:\n raise RuntimeError('Cannot determine app url if app is not yet \"served\".')\n elif not (_current_server and _current_server.serving):\n raise RuntimeError('Cannot determine app url if the server is not '\n 'yet running.')\n else:\n host, port = _current_server.serving\n return 'http://%s:%i/%s/' % (host, port, self._path)", "def get_scraper_url(self):\r\n \r\n return self.reformat_scraper_url()", "def sites(self):\n return self.properties.get('sites',\n SiteCollection(self.context, ResourcePath(\"sites\", self.resource_path)))", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def _get_path_to_front_end():\n dpath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'fe')\n log(\"Front-end static files @ {0}\".format(dpath))\n\n return dpath", "def urls(self):\n \n from django.conf.urls.defaults import url, include\n from tastypie.utils.urls import trailing_slash\n from client.views import checkin, checkout, login, logout, register, create_anonymous, delete_anonymous\n \n pattern_list = [\n url(r\"^(?P<api_name>%s)%s$\" % (self.api_name, trailing_slash()), self.wrap_view('top_level'), name=\"api_%s_top_level\" % self.api_name),\n ]\n\n for name in sorted(self._registry.keys()):\n self._registry[name].api_name = self.api_name\n pattern_list.append((r\"^(?P<api_name>%s)/resources/\" % self.api_name, include(self._registry[name].urls)))\n\n ## then add the actions\n pattern_list.extend([\n url(r\"^%s/actions/create_anonymous/$\" % self.api_name, create_anonymous, name=\"create_anonymous\"),\n url(r\"^%s/actions/delete_anonymous/$\" % self.api_name, delete_anonymous, name=\"delete_anonymous\"),\n url(r\"^%s/actions/register/$\" % self.api_name, register, name=\"register\"),\n url(r\"^%s/actions/login/$\" % self.api_name, login, name=\"login\"),\n url(r\"^%s/actions/logout/$\" % self.api_name, logout, name=\"logout\"),\n url(r\"^%s/actions/checkin/$\" % self.api_name, checkin, name=\"checkin\"),\n url(r\"^%s/actions/checkout/$\" % self.api_name, checkout, name=\"checkout\")\n ])\n\n urlpatterns = self.prepend_urls()\n \n urlpatterns += patterns('',\n *pattern_list\n )\n return urlpatterns", "async def Available_Endpoints() -> List[Dict[str, str]]:\n return [{\"path\": endpoint} for endpoint in busylightapi.endpoints]", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url" ]
[ "0.7023718", "0.6961256", "0.6898301", "0.6606221", "0.6593883", "0.65911895", "0.63784796", "0.63564336", "0.6309869", "0.62928236", "0.6277014", "0.6192597", "0.6181306", "0.61599916", "0.6134845", "0.6119912", "0.61194247", "0.6110606", "0.6094108", "0.60856676", "0.6055435", "0.60481465", "0.60197115", "0.5982778", "0.5956312", "0.58959556", "0.5895859", "0.58916897", "0.58548075", "0.585221", "0.5848152", "0.58328724", "0.58267653", "0.5825248", "0.5824162", "0.5814541", "0.5812121", "0.58036053", "0.576387", "0.57621074", "0.5761369", "0.5740037", "0.5735573", "0.5729677", "0.5713639", "0.57080215", "0.56940943", "0.56898534", "0.5682416", "0.56519675", "0.56494313", "0.5646689", "0.5646204", "0.5623661", "0.5581402", "0.5575144", "0.556603", "0.5553611", "0.5539206", "0.5527143", "0.5512516", "0.5505015", "0.55020857", "0.54980916", "0.5484211", "0.54801273", "0.5475969", "0.5461036", "0.545851", "0.5445205", "0.54366785", "0.54148173", "0.54077756", "0.5405664", "0.5405566", "0.54013586", "0.5390901", "0.53851646", "0.53815943", "0.5368304", "0.53664607", "0.53656816", "0.535055", "0.5343471", "0.53352237", "0.53259146", "0.5324807", "0.53229445", "0.5316979", "0.53066015", "0.5305521", "0.53028286", "0.52979183", "0.5292074", "0.52904046", "0.52897847", "0.52813214", "0.52727133", "0.5272082", "0.5272082" ]
0.6309068
9
get the list of dbservers managed by this starter
def get_dbservers(self): ret = [] for i in self.all_instances: if i.is_dbserver(): ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_servers(self):\n\t\treturn self.__servers", "def get_all_servers(self) -> List[Server]:\n pass", "def servers(self):\n return self._servers", "def get_databases(self):\n pass", "def databases(self):\n return self._databases", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def discover_servers():\n servers = set()\n for p in glob.glob1(SPDK_SERVER_APP_DIR, \"*\"):\n m = SERVERS_PATTERN.match(p)\n if m:\n servers.add(m.group())\n return list(servers)", "def get_databases ():\n return _dbobjects[:]", "def list_servers(self, all_tenants=False):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers/detail\"\n if all_tenants:\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + self.project_info[\n \"project_id\"] + \"/servers/detail?all_tenants=1\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while listing servers.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List servers Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Servers List :%s \" % output)\n return output[\"servers\"]", "def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()", "def list_databases(self):\n r = self.__get_response(settings.LST_DBS)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def get_reachable_servers(self) -> List[Server]:\n pass", "def get_servers(self):\n url = '%s/servers/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['servers']\n else:\n LOG.error('Get servers failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_dns_servers(self):\n self.__not_implemented()", "def list_dbs(self):\n return self.get('_all_dbs').json()", "def mmo_mongos_servers(self, mmo_connection):\n mongos_servers = []\n c = mmo_connection[\"config\"].mongos.find({}, { \"_id\": 1 } )\n for doc in c:\n hostname, port = doc[\"_id\"].split(\":\")\n mongos_servers.append({ \"hostname\": hostname, \"port\": int(port) })\n return mongos_servers", "def list_servers():\n (code, message) = rest_api.list_servers(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def mmo_config_servers(self, mmo_connection):\n config_servers = []\n c = mmo_connection[\"admin\"].command(\"getCmdLineOpts\")[\"parsed\"][\"sharding\"][\"configDB\"]\n for item in c.split(\",\"):\n hostname, port = item.split(\":\")\n if \"/\" in hostname: # cfg Replset server\n hostname = hostname.partition(\"/\")[2]\n config_servers.append( { \"hostname\": hostname, \"port\": int(port) } )\n return config_servers", "def list_databases(self):\n end_point = '/'.join([self.host, 'api', 'databases', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n if resp.status_code != 200:\n raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))\n return resp.json()", "def run(self):\n self._list_servers()", "def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers", "def server_names(self):\n return self._server_names", "def list_server(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='WildFly Server')\n resources.extend(self.list_resource(\n feed_id=feed_id,\n resource_type_id='Domain WildFly Server'))\n servers = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id,\n resource_id=self._get_resource_id(resource.path.resource_id))\n server_data = resource_data.value\n servers.append(Server(resource.id, resource.name, resource.path, server_data))\n return servers", "def all_dbs(self):\n return self.cloudant_client.all_dbs()", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]", "def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_databases(self):\n query = mssqlqueries.get_databases()\n logger.info(u'Databases query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def list_servers(self, request, paginate):\n raise NotImplementedError", "def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])", "def list(self):\n res = self.db.execute(select([model.imaging_servers.c.fqdn]))\n return self.column(res)", "def list_databases():\n response = houston.get(\"/history/databases\")\n houston.raise_for_status_with_json(response)\n return response.json()", "def get_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_dbs = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if scope[\"@type\"] == \"system:Database\":\n all_dbs.append(scope)\n return all_dbs", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "def servers(self):\n response = self._request(\"GET\", [ROUTE_SERVERS])\n\n return CBWParser().parse_response(CBWServer, response)", "def getDatabases(self):\n query = 'SELECT name FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n return df", "def _GetServers(self) -> List[Dict[str, str]]:\n return [\n {\n \"url\": \"/\",\n \"description\": \"Root path of the GRR API\",\n },\n ]", "def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def name_servers(self) -> Sequence[str]:\n return pulumi.get(self, \"name_servers\")", "def list_servers(self, request, tenant_id):\n server_name = ''\n if 'name' in request.args:\n server_name = request.args['name'][0]\n response_data = list_server(tenant_id, server_name, details=False)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])", "def get_datasource_list():\n global datasource_list\n\n if not datasource_list:\n datasource_list = stixhelpers.get_datasources(get_srcs())\n\n return datasource_list", "def get_all_index_servers(self):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM host WHERE type = 'Index Server';\")\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def api_get(self):\n sdc = DataCenter(location=self.joyent_uri, key_id=self.joyent_key_id, secret=self.joyent_secret,\n allow_agent=False, verbose=self.debug)\n servers = sdc.machines()\n return servers", "def list_server(self, feed_id=None):\n servers = self.list_resource(feed_id=feed_id,\n resource_type_id='WildFly Server',\n cls=Server,\n include_data=True)\n servers.extend(self.list_resource(\n feed_id=feed_id,\n resource_type_id='Domain WildFly Server',\n cls=Server,\n list_children=True,\n include_data=True))\n return servers", "def databases(self) -> Session:\n uri = f\"{self.uri}/databases\"\n return self.request(uri=uri, method=\"GET\").json()", "def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"dns_servers\")", "def list_databases(self, limit=None, marker=None):\n return self._database_manager.list(limit=limit, marker=marker)", "def get_dbs_obj(self):\n dbs_xml = self.get_DatabaseAndServer_XML()\n return self.get_DatabaseAndServer_obj(dbs_xml)", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def mmo_shard_servers(self, mmo_connection):\n shard_servers = []\n c = mmo_connection[\"config\"].shards.find({})\n for doc in c:\n shard = doc[\"_id\"]\n for host in doc[\"host\"].split(shard + \"/\", 1)[1].split(\",\"):\n hostname, port = host.split(\":\")\n shard_servers.append({ \"shard\": shard, \"hostname\": hostname, \"port\": int(port) })\n return shard_servers", "def hosts(self) -> t.List[str]:\n if not self._hosts:\n self._hosts = self._get_db_hosts()\n return self._hosts", "def _scoped_servers(self):\n\n # If project scoped explicitly set the project list\n projects = None if utils.all_projects() else [pecan.request.token.project_id]\n\n # Must do a detailed search here as it returns the tenant_id field\n servers = self.compute.servers.list(search_opts={'all_tenants': 'True'})\n\n servers = Scope.filter(servers, projects=projects)\n return utils.paginate(servers, pecan.request.GET.get('marker'),\n pecan.request.GET.get('limit'))", "def load_servers_from_db(self):\r\n db = self.getDB()\r\n cursor = db.cursor()\r\n\r\n res = cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s;\"\"\", (self.userID,))\r\n\r\n result = cursor.fetchall()\r\n db.close()\r\n print(\"RESULT\", result)\r\n servers = list()\r\n\r\n for res in result:\r\n server_dict_temp = {\"serverID\": res[0],\r\n \"serverSessionID\": res[1],\r\n \"nickname\": res[2],\r\n \"isAway\": res[3],\r\n \"isConnected\": res[4],\r\n \"Registred_users_userID\": res[5],\r\n \"serverName\": res[6],\r\n \"serverIP\": res[7],\r\n \"serverPort\": res[8],\r\n \"useSSL\": res[9]}\r\n servers.append(server_dict_temp)\r\n self.server_list_text = servers", "def getServerInterfaces(self):\n return self.servers", "def GetSlavesForHost():\n hostname = os.getenv('TESTING_SLAVENAME')\n if not hostname:\n hostname = socket.getfqdn().split('.', 1)[0].lower()\n return [s for s in GetAllSlaves() if s.get('hostname') == hostname]", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def getConnectionList(self):\n return []", "def servers(self, details=True, **query):\n srv = _server.ServerDetail if details else _server.Server\n return list(self._list(srv, paginated=True, **query))", "def list_all_databases():\n with _superuser_connection() as conn:\n result = conn.list_all_databases()\n return result", "def list_server_datasource(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Datasource')\n resources.extend(self.list_resource(\n feed_id=feed_id,\n resource_type_id='XA Datasource'))\n datasources = []\n if resources:\n for resource in resources:\n datasources.append(Datasource(resource.id, resource.name, resource.path))\n return datasources", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def list(self):\n\n s = self.cloudman.list_servers()\n\n servers = self.get_list(self.cloudman.list_servers(), kind=\"vm\")\n\n result = []\n for server in servers:\n\n if 'cm' in server['metadata']:\n metadata = server['metadata']['cm']\n cm = literal_eval(metadata)\n if 'cm' in server:\n server['cm'].update(cm)\n try:\n server['ip_public'] = self.get_public_ip(server=server)\n except:\n pass\n try:\n server['ip_private'] = self.get_private_ip(server=server)\n except:\n pass\n result.append(server)\n\n return result", "def get_server_list():\n\n if file_downloaded(output_file):\n server_list = load_server_list_json()\n printer('Server list loaded from JSON')\n\n #server_list = load_server_list_json()\n #printer('Server list loaded from JSON')\n\n else:\n # Connect to RS\n rsconn = object\n rsconn = connect()\n\n # Store the JSON response from list_servers\n printer(\"Fetching server list from Rackspace...\")\n\n server_list = rsconn.list_servers(detailed = DETAILED)\n save_server_list_json(server_list)\n\n printer('Server list loaded via API call')\n\n return server_list", "def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)", "def databases(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n relation_data = rel.data[rel.app]\n dbs = relation_data.get(\"databases\")\n return json.loads(dbs) if dbs else []", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def getAllFileServers():\n servers = None\n session = Queries.createSession()\n try:\n servers = session.execute(sqlalchemy.select([FileServer]))\n servers = servers.fetchall()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return servers", "def get_all_vms(self):\n available_servers = self.connection.compute.servers()\n if available_servers:\n vm_names = [server.name for server in available_servers]\n return vm_names\n else:\n return []", "def get(self):\n deploys = get_heroku_deployments('SERVER GOES HERE!')\n write_msg(deploys)\n return deploys", "def get_objects(self):\n has_more = True\n marker = None\n while has_more:\n servers = openstack_clients.get_novaclient().servers.list(\n limit=LIST_LIMIT,\n search_opts={'all_tenants': True},\n marker=marker\n )\n\n if not servers:\n # Definitely no more; break straight away\n break\n\n # servers.list always returns a list so we can grab the last id\n has_more = len(servers) == LIST_LIMIT\n marker = servers[-1].id\n\n for server in servers:\n yield server", "def __init__(self):\n self.databases = []", "def list_server_datasource(self, feed_id=None):\n datasources = self.list_resource(feed_id=feed_id,\n resource_type_id='Datasource',\n cls=Datasource,\n list_children=True)\n datasources.extend(self.list_resource(\n feed_id=feed_id,\n resource_type_id='XA Datasource',\n cls=Datasource,\n list_children=True))\n return datasources", "def get_num_servers():\n return 1", "def list_servers(self, request):\n token = request.form.get('token')\n if token is None:\n token = request.args.get('token')\n\n rest_client = RestClient.instance()\n if (not rest_client.validate_token(token)):\n return (401, 'Unauthorized')\n\n game_servers = GameServers.instance().get_servers()\n out = []\n for game_server in game_servers.values():\n out.append({\n 'name': game_server.get_name(),\n 'host': game_server.get_host(),\n 'port': game_server.get_port(),\n 'owner': game_server.get_owner()\n })\n return (200, json.dumps(out))", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def Servers(self, server=None):\n if server:\n self.current = server\n return \"successful\"\n\n servers = []\n for x in XbmcServers.select():\n servers.append({'name': x.name, 'id': x.id})\n if len(servers) < 1:\n return\n return {'current': self.current, 'servers': servers}", "def list_sites():\n result = []\n querystring = 'select sitename from {};'.format(TABLES[0]))\n res = execute_query(querystring)\n if res:\n result = [x[0] for x in res]\n return result", "def get_list_servers(p_id_guilda):\r\n server_list = select_data.get_guild_servers(p_id_guilda)\r\n #css_mensagem = '```css\\n####### SERVERS ################'\r\n list_server = []\r\n for server in server_list:\r\n if server['description'] != None:\r\n description_server = server['description']\r\n else:\r\n description_server = ''\r\n return_data = '\\n### Id Server: ' + str(server['id_server_sk']) + ' - ' + server['name_guild'] + '\\n### Map: ' + server['map_name'] + '\\n### Modo: ' + server['mode_server'] + '\\n### Patreon: ' + server['map_patreon'] + '\\n### Description: ' + description_server + '\\n -----------------------------------------------------------------------------------'\r\n list_server.append(return_data)\r\n #css_mensagem = css_mensagem + return_data\r\n #css_mensagem = css_mensagem + '\\n##############################```'\r\n return list_server #css_mensagem\r", "def _context_getservers(disabled=None, config=None, getlineinfo=True):\n if not config or config['_isdirty']:\n config = loadconfig(HTTPD_CONF, getlineinfo)\n http = config['_'][0]['http'][0]\n if not 'server' in http:\n return []\n servers = http['server']\n if disabled == None or not getlineinfo:\n return servers\n else:\n return [server for server in servers\n if server['_param']['disabled'] == disabled]", "def get_srv_list():\n srv_list = [splitext(basename(sock))[0] \\\n for sock in glob.glob(CEPH_SOCKET_PATH + \"*.asok\")]\n return srv_list", "def get_available_databases() -> List[str]:\r\n\tcur = psycopg2.connect(dbname='postgres').cursor()\r\n\tcur.execute(\"SELECT datname FROM pg_database WHERE datistemplate=FALSE;\")\r\n\treturn [row[0][:-6] for row in cur if row[0].endswith('wikidb')]", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def getTables(self):\n\treturn self.dbNames", "def list_sites():\n sites = db_client.query_sites_list()\n return jsonify(sites)", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "async def discover(self):\n\n def get_discovered_servers(discovery):\n servers = discovery.all()\n discovery.stop()\n return servers\n\n discovery = RoonDiscovery(None)\n servers = await self._hass.async_add_executor_job(\n get_discovered_servers, discovery\n )\n _LOGGER.debug(\"Servers = %s\", servers)\n return servers", "def list_servers(active=True):\n params = {'active': 1} if active else {}\n servers_response = requests.get('https://bootstrap.fetch.ai/networks/', params=params)\n if servers_response.status_code != 200:\n raise requests.ConnectionError('Failed to get network status from bootstrap')\n\n return servers_response.json()", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def get_servers_list(opts):\n servers_list = {}\n\n options = opts.get(PACKAGE_NAME, {})\n\n if options: # If no label given [fn_splunk_integration]\n server_list = {PACKAGE_NAME}\n else: # If label given [fn_splunk_integration:label]\n servers = SplunkServers(opts)\n server_list = servers.get_server_name_list()\n\n # Creates a dictionary that is filled with the splunk servers\n # and there configurations \n for server_name in server_list:\n servers_list[server_name] = opts.get(server_name, {})\n validate_fields([\"host\", \"port\"], servers_list[server_name])\n user = servers_list[server_name].get(\"username\", None)\n splunk_pass = servers_list[server_name].get(\"splunkpassword\", None)\n token = servers_list[server_name].get(\"token\", None)\n if not ((user and splunk_pass) or token):\n raise ValueError(\"Either username/splunkpassword or token need to be given\")\n elif token:\n servers_list[server_name][\"username\"] = None\n servers_list[server_name][\"splunkpassword\"] = None\n\n return servers_list", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def ns_list(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.authoritative_servers)", "def create_server_list(user, apikey, account_id, region=None, path=os.path.expanduser('~/.fabrackservers')):\n if region == 'uk':\n auth = (uk_authurl_v1_0, uk_authurl_v2_0)\n next_gen_dc = ['lon']\n else:\n auth = (us_authurl_v1_0, us_authurl_v2_0)\n next_gen_dc = ['ord', 'dfw']\n servers = []\n first_gen = openstack.compute.Client(username=user, apikey=apikey, \n auth_url=auth[1], service_type='compute')\n for server in first_gen.servers.list():\n servers.append({ 'name': server.name, 'addresses': server.addresses, \n 'generation': 1 })\n\n for r in next_gen_dc:\n next_gen = novaclient.Client(user, apikey, account_id, auth_url=auth[2], \n region_name=r, service_name='cloudServersOpenStack')\n for server in next_gen.servers.list():\n servers.append({ 'name': server.name, 'addresses': server.addresses, \n 'generation': 2 })\n with open(path, 'w') as fh:\n pickle.dump(servers, fh)\n return servers", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())" ]
[ "0.7709002", "0.73207456", "0.7205934", "0.72003657", "0.70308435", "0.701856", "0.70004505", "0.69486785", "0.6863328", "0.6820925", "0.6803907", "0.6767398", "0.67585117", "0.67491573", "0.6748919", "0.6736712", "0.67257047", "0.6720556", "0.6672018", "0.6666036", "0.6558561", "0.6557067", "0.6550705", "0.6550474", "0.6539223", "0.65242845", "0.6523582", "0.6475993", "0.64628273", "0.64537567", "0.6444253", "0.6438602", "0.64296186", "0.6422156", "0.6418806", "0.64001906", "0.6369435", "0.63261807", "0.6311589", "0.6308331", "0.62931323", "0.62674886", "0.62648004", "0.6248918", "0.6244403", "0.62254566", "0.6221179", "0.6212231", "0.6203569", "0.620293", "0.61846614", "0.6183466", "0.6182109", "0.617519", "0.611078", "0.60898876", "0.6039105", "0.6033401", "0.60248363", "0.60231346", "0.6007346", "0.6007346", "0.60012937", "0.60000956", "0.5989276", "0.59885335", "0.5974792", "0.59606683", "0.5956414", "0.5946442", "0.59428596", "0.59249085", "0.5921925", "0.5904974", "0.5890462", "0.5873722", "0.58637846", "0.5852684", "0.58493525", "0.58443624", "0.5842392", "0.5841936", "0.58410436", "0.5826624", "0.5824068", "0.5821539", "0.5808387", "0.58065456", "0.57978207", "0.57927024", "0.57902354", "0.57534665", "0.5751659", "0.574655", "0.57425684", "0.57353294", "0.5734535", "0.57300276", "0.57132083", "0.56891036" ]
0.85010105
0
get the list of agents managed by this starter
def get_agents(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.AGENT: ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_agents(self):\n return self.get(\"manager_agents\")", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def agents(self):\n return AgentManager(session=self._session)", "def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def oc(self, stimulusID):\r\n global stimulusAPI\r\n try:\r\n pageList = stimulusAPI.getStimulusScope(stimulusID)\r\n agentSet = set([])\r\n for page in pageList:\r\n localAgentList = stimulusAPI.getAllAgentsWithViewOfSpecifiedPage(page)\r\n localAgentSet = set(localAgentList)\r\n agentSet.update(localAgentSet)\r\n agentList = list(agentSet)\r\n return agentList\r\n except Exceptions.InvalidStimulusProcessingType as e:\r\n raise e\r\n except Exceptions.ScriptError as e:\r\n raise e\r\n #self.execute(stimulusID)\r\n except Exception as e:\r\n raise Exceptions.ScriptError(e)", "def station_agents(self):\n return self.get(\"station_agents\")", "def list_agents(self, platform_uuid):\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def transport_agents(self):\n return self.get(\"transport_agents\")", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def agent_arns(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"agent_arns\")", "def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_possible_absentees(self) -> List[QualifiedAgent]:\n wum: WorklistUpdateManagerApi = self._service_provider.get_service(WorklistUpdateManagerApi)\n return self._rem_iter_handler.consume(\n wum.get_possible_absentees(),\n \"agents\",\n PossAbsRemoteIteratorApi,\n PossAbsRemoteIteratorApi.poss_abs_get_next,\n )", "def _get_agents(self, instance_uuid, groups):\n _log.debug('_get_agents')\n connected_to_pa = self._platform_connections[instance_uuid]\n\n agents = connected_to_pa.agent.vip.rpc.call(\n 'platform.agent', 'list_agents').get(timeout=30)\n\n for a in agents:\n if 'admin' in groups:\n if \"platformagent\" in a['name'] or \\\n \"volttroncentral\" in a['name']:\n a['vc_can_start'] = False\n a['vc_can_stop'] = False\n a['vc_can_restart'] = True\n else:\n a['vc_can_start'] = True\n a['vc_can_stop'] = True\n a['vc_can_restart'] = True\n else:\n # Handle the permissions that are not admin.\n a['vc_can_start'] = False\n a['vc_can_stop'] = False\n a['vc_can_restart'] = False\n\n _log.debug('Agents returned: {}'.format(agents))\n return agents", "def agents_status(self):\n return self._get('agents/status')", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def _get_partner_agent(self):\n obj_partner = self.env['res.partner']\n args = [('parent_id', '=', False)]\n context = self._context or {}\n res = []\n\n if context.get('type') in ('out_invoice',):\n args.append(('wh_src_agent', '=', True))\n partner_ids = obj_partner.search(args)\n if partner_ids:\n partner_brw = obj_partner.browse(\n partner_ids)\n res = [item.id for item in partner_brw]\n return res", "def load_agents(self, agents):\n self.agents = agents", "def run(self, agent_args=None):\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)", "def test_list_l3_agents_on_router(self):\n with self.override_role():\n # NOTE: It is not empty list since it's a special case where\n # policy.enforce is called from the controller.\n self.ntp_client.list_l3_agents_hosting_router(self.router['id'])", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def getAllAgents(self):\n agent_dict ={}\n for member in self.membership.listMembers():\n if member.has_role('Agent'):\n agent_id = member.getUserName()\n agent_dict[agent_id]={}\n agent_dict[agent_id]['email'] = member.getProperty('email')\n agent_dict[agent_id]['areas'] = self.__wrapAreas(member.getProperty('areas'))\n agent_dict[agent_id]['fullname'] = member.getProperty('fullname')\n \n return agent_dict", "def get_all_actuators(self):\n\n actuators = []\n for part in self.parts:\n actuators = actuators + part.actuators\n\n return actuators", "def getActuators(self):\n return self.listener.actuators", "def agent(self):\n return self.__agent", "def get(self):\n return get_clientes()", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def get_agents(self, account_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents', filters)", "def test_can_get_agentlist(pa_instance):\n wrapper, agent_uuid = pa_instance\n publickey, secretkey = get_new_keypair()\n\n agent = wrapper.build_agent(\n serverkey=wrapper.publickey, publickey=publickey, secretkey=secretkey)\n peers = agent.vip.peerlist().get(timeout=2)\n assert VOLTTRON_CENTRAL_PLATFORM in peers\n\n # Make a call to manage which should return to us the publickey of the\n # platform.agent on the instance.\n papublickey = agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM, 'manage', wrapper.vip_address,\n wrapper.publickey, agent.core.publickey).get(timeout=2)\n assert papublickey\n\n agentlist = agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM, \"list_agents\"\n ).get(timeout=2)\n\n assert isinstance(agentlist, list)\n assert len(agentlist) == 1\n retagent = agentlist[0]\n assert retagent['uuid'] == agent_uuid\n checkkeys = ('process_id', 'error_code', 'is_running', 'permissions',\n 'health')\n for k in checkkeys:\n assert k in retagent.keys()\n\n # make sure can stop is determined to be false\n assert retagent['permissions']['can_stop'] == False", "def get_agent(drs):\n agents = []\n for line in drs:\n if line.strip().startswith('sem'):\n datalist = line.split(':')\n for word in datalist:\n if word.count('agent') > 0:\n variable = word[6:7]\n for word in datalist:\n if word.startswith('pred({0}'.format(variable)):\n agents.append(word.split(',')[1])\n return agents", "def services(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/services')", "def init_agents(self, config):\n agents = []\n os.chdir(self.load_directory) # Changes current working directory to load_directory\n file_list = glob.glob(\"*.h5\") # Returns all files in directory with ending .h5\n file_list = sorted(file_list, key=return_episode_num)\n for file in file_list:\n filename = \"../models/\" + file # The path is used in actor which is placed in agent\n actor = Actor(config.learning_rate, config.epsilon, config.decay_rate, config.board_size, config.nn_dims, config.activation, config.optimizer, config.loss_function, filename)\n actor.load(filename)\n agents.append(actor)\n return agents", "async def entities_controller(self, request):\n result = {\n \"transports\": [transport.to_json() for transport in self.transport_agents.values()],\n \"customers\": [customer.to_json() for customer in self.customer_agents.values()],\n \"tree\": self.generate_tree(),\n \"stats\": self.get_stats(),\n \"stations\": [station.to_json() for station in self.station_agents.values()]\n }\n return result", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_all_agents(solution_id, context, user_role=None):\n try:\n agents = list()\n us_obj = UserServices()\n user_details = us_obj.get_users_list()\n if user_details and user_details['status_code'] == 200:\n if 'result' in user_details \\\n and 'data' in user_details['result']:\n users_data = user_details['result']['data']\n for user_data in users_data:\n user_mapped = False\n added = False\n if 'solutions' in user_data:\n solns = user_data['solutions']\n for soln in solns:\n if 'id' in soln \\\n and soln['id'] == solution_id:\n user_mapped = True\n break\n if user_role and user_mapped:\n user_roles = user_data['userRoles']\n for role in user_roles:\n if 'name' in role \\\n and role['name'] == user_role:\n agents.append(user_data['userName'])\n added = True\n break\n if user_mapped and not added:\n agents.append(user_data['userName'])\n return agents\n except Exception as e:\n context.log(message=str(e),\n obj={'tb': traceback.format_exc()})\n return list()", "def _get_wh_agent(self):\n context = self._context or {}\n res = {}.fromkeys(self.ids, self._get_uid_wh_agent(\n ))\n return res", "def arc_clients(self):\n return self.__get_option('arc_client_tools')", "def load_agents(agent_files):\r\n agents = []\r\n\r\n for a in agent_files:\r\n\r\n # Grab the module\r\n # Need to strip out .py, hence the [:-3]\r\n agent_module = importlib.import_module('agents.'+a[:-3])\r\n\r\n # find_class\r\n # Now we can find the class as part of that module\r\n agent_class_name = [\r\n x for x in dir(agent_module) if\r\n not ('template' in x.lower()) and not ('__' in x)\r\n ][0]\r\n agent_class = getattr(agent_module, agent_class_name)\r\n\r\n # instantiate\r\n agent = agent_class()\r\n agents.append(agent)\r\n\r\n return agents", "def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers", "def reset(self):\n agent_info = []\n\n for a in self.agents:\n agent_info.append(a.reset())\n print('agent_info', agent_info)\n return agent_info", "def _get_p_agent(self):\n context = self._context or {}\n res = {}.fromkeys(self.ids, self._get_partner_agent(\n ))\n return res", "def server_agent_show(ctx, args):\n for agent_id in args:\n data = ctx.obj.get_agent_by_agent_id(agent_id)\n output_json_data(data)", "def get(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.db.get_agent(agent_id)\n if agent is not None:\n response = cloud_verifier_common.process_get_status(agent)\n common.echo_json_response(self, 200, \"Success\", response)\n #logger.info('GET returning 200 response for agent_id: ' + agent_id)\n\n else:\n #logger.info('GET returning 404 response. agent id: ' + agent_id + ' not found.')\n common.echo_json_response(self, 404, \"agent id not found\")\n else:\n # return the available keys in the DB\n json_response = self.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')", "def server_agent_list(ctx, output_format, columns):\n data = ctx.obj.get_agents()\n\n for agent in data['agent']:\n agent_info = ctx.obj.get_agent_by_agent_id(agent['id'])\n agent['ip'] = agent_info['ip']\n agent['pool'] = agent_info['pool']['name']\n agent['build_type'] = ctx.obj.get_agent_build_type(agent['id'])\n agent['build_text'] = ctx.obj.get_agent_build_text(agent['id'])\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['agent'])\n elif output_format == 'json':\n output_json_data(data)", "def agent(self) -> Entity:\n return self.__agent", "def get_critics(self):\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors", "def do_GET(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.server.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent_id not found\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')\n return\n\n if not agent['active']:\n common.echo_json_response(self, 404, \"agent_id not yet active\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')\n return\n\n response = {\n 'aik': agent['aik'],\n 'ek': agent['ek'],\n 'ekcert': agent['ekcert'],\n 'regcount': agent['regcount'],\n }\n\n if agent['virtual']:\n response['provider_keys']= agent['provider_keys']\n\n common.echo_json_response(self, 200, \"Success\", response)\n logger.info('GET returning 200 response for agent_id:' + agent_id)\n else:\n # return the available registered uuids from the DB\n json_response = self.server.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')\n\n return", "def find_agents(\n self,\n status: Optional[str] = None,\n unit_id: Optional[str] = None,\n worker_id: Optional[str] = None,\n task_id: Optional[str] = None,\n task_run_id: Optional[str] = None,\n assignment_id: Optional[str] = None,\n task_type: Optional[str] = None,\n provider_type: Optional[str] = None,\n ) -> List[Agent]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from agents\n WHERE (?1 IS NULL OR status = ?1)\n AND (?2 IS NULL OR unit_id = ?2)\n AND (?3 IS NULL OR worker_id = ?3)\n AND (?4 IS NULL OR task_id = ?4)\n AND (?5 IS NULL OR task_run_id = ?5)\n AND (?6 IS NULL OR assignment_id = ?6)\n AND (?7 IS NULL OR task_type = ?7)\n AND (?8 IS NULL OR provider_type = ?8)\n \"\"\",\n (\n status,\n nonesafe_int(unit_id),\n nonesafe_int(worker_id),\n nonesafe_int(task_id),\n nonesafe_int(task_run_id),\n nonesafe_int(assignment_id),\n task_type,\n provider_type,\n ),\n )\n rows = c.fetchall()\n return [\n Agent(self, str(r[\"agent_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def get_all_environments():\n return ENVIRONMENTS", "def sample(self):\n return [agent_observation_space.sample() for agent_observation_space in self._agents_observation_space]", "def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors", "def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors", "def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors", "def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors", "def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body", "def getTargetRobots(self):\n # self.log(\"find targets\")\n robots = self.get_visible_robots()\n enemyRobots = []\n if len(robots) > 0:\n for bot in robots:\n # self.log(\"target bot team \" + str(bot['team']))\n # self.log(\"my team \" + str(self.me['team']))\n if bot['team'] != self.me['team']:\n self.log(\"adding bot to enemy list\")\n enemyRobots.append(bot)\n return enemyRobots", "def selectAgents(self, stimulusID, excludeSet = None, subjectIDList = []):\r\n method = moduleName + '.' + self.className + '.' + 'selectAgents'\r\n global stimulusAPI\r\n \r\n subjectIDSet = set([])\r\n if len(subjectIDList) > 0: subjectIDSet = set(subjectIDList)\r\n if excludeSet is None: excludeSet = set([])\r\n agentSet = set([])\r\n \r\n try:\r\n viewList = stimulusAPI.getAgentsWithViewOfStimulusScope(stimulusID)\r\n \r\n if len(subjectIDList) > 0:\r\n #We made the stimulus request with a specific list of agents in mind. make sure they can see the page\r\n agentViewSet = set(viewList)\r\n agentSet = subjectIDSet.intersection(agentViewSet)\r\n \r\n else:\r\n agentSet = set(viewList)\r\n agentSet.difference_update(excludeSet)\r\n return agentSet\r\n except Exceptions.ScriptError as e:\r\n Graph.logQ.put( [logType , logLevel.WARNING , method , e])\r\n except Exception as e:\r\n errorMsg = \"Unknown error selecting observer agents for stimulus %s. rtParams = %s Traceback = %s\" %(stimulusID, e)\r\n Graph.logQ.put( [logType , logLevel.WARNING , method , errorMsg])\r\n return set([])", "def get_masters():\n\n # note: ListMasters uses master.cfg hardcoded as part of its search path\n def parse_master_name(masterpath):\n \"\"\"Returns a mastername from a pathname to a master.\"\"\"\n _, tail = os.path.split(masterpath)\n sep = '.'\n hdr = 'master'\n chunks = tail.split(sep)\n if not chunks or chunks[0] != hdr or len(chunks) < 2:\n raise ValueError('unable to parse mastername from path! (%s)' % tail)\n return sep.join(chunks[1:])\n\n return [(parse_master_name(m), m) for m in chromium_utils.ListMasters()]", "def RetrieveAllAgent(**argd):\n flag, ret = CGateway.core.RetrieveAllAgent(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n hmBuilder = []\n for hm in ret:\n hmBuilder.append(hm.ToJsonDict())\n return CGateway._SuccessResponse({'return': hmBuilder})", "def get_agent_terms(self):\n return # osid.authentication.AgentQueryInspector", "def getMonitoringHosts(self):\r\n return self.monitoringClients.values()", "def get_agent_keys(logger=None):\n paramiko_agent = paramiko.Agent()\n agent_keys = paramiko_agent.get_keys()\n if logger:\n logger.info('{0} keys loaded from agent'.format(len(agent_keys)))\n return list(agent_keys)", "def get_target_agents(self, source: Tuple[str, str], relation: str) -> List[Agent]:\n targets = self.get_targets(source, relation)\n agents = [self.node_to_agent(target) for target in targets]\n return agents", "def group_members(self) -> list[str] | None:\n\n zone_clients = [\n zone.ZoneID for zone in self.coordinator.data.zones if zone.SharedRoomID\n ]\n\n if self.is_master:\n entities = self._casatunes_entities()\n clients = [\n entity.entity_id\n for entity in entities\n if entity.is_client and entity.zone_id in zone_clients\n ]\n return [self.entity_id] + clients", "def get_agent_locations(self) -> Tuple[Dict[str, float], ...]:\n return tuple(self.get_agent_location(i) for i in range(self.num_agents))", "def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])", "def actuators(self):\n return self._actuators", "def mechanisms(self):\n return list(self)", "def getAgents(issuer=False, dbn='core', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.first(): # first key in database\n while True:\n key = cursor.key().decode()\n if len(key) == DID_LENGTH and \"/\" not in key:\n value = cursor.value().decode()\n ser, sep, sig = value.partition(SEPARATOR)\n try:\n dat = json.loads(ser, object_pairs_hook=ODict)\n except ValueError as ex:\n if cursor.next():\n continue\n else:\n break\n try:\n did, index = dat[\"signer\"].rsplit(\"#\", maxsplit=1)\n except (AttributeError, ValueError) as ex:\n if cursor.next():\n continue\n else:\n break\n\n if did == key: # self signed so agent\n if issuer:\n if \"issuants\" in dat:\n entries.append(key)\n else:\n entries.append(key)\n if not cursor.next(): # next key in database if any\n break\n return entries", "def clients(self):\n return self._clients", "def clients(self):\n self.update_results()\n return self._clients", "def getAminos(self):\n\t\treturn self.aminos", "async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def get_roles():\r\n global _roles\r\n return _roles", "def get_effective_agent(self):\n raise Unimplemented()", "def get_agent_types_present(self) -> Set[KappaAgent]:\n agent_types = set()\n for key in self._complexes.keys():\n agent_types.update(key.get_agent_types())\n return agent_types", "def get_acs_agent(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def sensors(self):\n url = \"%s/state/teams/%s/sensors\" % (self.url, self.identifier, )\n data = perform_request(url)\n return data.get('sensors', [])", "def actors(self):\n return self._actors", "def get_tools(self):\r\n\t\tlogger.debug(\"Getting the tools\")\r\n\t\t\r\n\t\treturn db.get_items('tools')", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())", "def router_list_on_l3_agent(mgr_or_client, *args, **kwargs):\n raise (\"Not implemented yet!\")", "def endorsements(self) -> t.Sequence[str]:\n return self.shards(\"endorsements\")[\"endorsements\"].split(\",\")", "def get_enemy_list() -> list:\n with open(\"data.json\", \"r\") as file:\n data = json.load(file)\n names = data[\"enemy_data\"][\"names\"]\n\n return names", "def get_hosts(self):\n\n raise NotImplementedError", "def getBottoms(self):\n\t\treturn self.bottoms", "def get_source_agents(self, target: Tuple[str, str], relation: str) -> List[Agent]:\n sources = self.get_sources(target, relation)\n agents = [self.node_to_agent(source) for source in sources]\n return agents", "def my_objects(self):\n\n matches = [_object for _object in self.object_store if str(_object.OwnerID) == str(self.agent.agent_id)]\n\n return matches", "def read_annotations(file):\n\n with open(file) as f:\n lines = f.read().splitlines()\n\n annotations_by_agent = divide_annotations(lines)\n agent_list = []\n for annotation_set in annotations_by_agent:\n agent_list.append(Agent(annotation_set))\n\n return agent_list", "def list_all_agencies():\n return JsonResponse.create(StatusCode.OK, get_all_agencies())" ]
[ "0.77033234", "0.7416487", "0.7300729", "0.7252941", "0.72281355", "0.7117161", "0.70339304", "0.6690089", "0.666582", "0.66602963", "0.6639476", "0.6470405", "0.6329325", "0.62738615", "0.6230014", "0.61977404", "0.61977404", "0.61977404", "0.61977404", "0.6167947", "0.613414", "0.6121148", "0.61186093", "0.60759854", "0.60475534", "0.60275054", "0.6003428", "0.59731674", "0.59704596", "0.5945911", "0.5918371", "0.5899572", "0.5892125", "0.5883758", "0.5875584", "0.58731884", "0.58603644", "0.58380073", "0.57803595", "0.57792586", "0.5754354", "0.5725536", "0.57178485", "0.5715713", "0.5699589", "0.5686597", "0.56838584", "0.5659704", "0.56319326", "0.56054664", "0.5571919", "0.55703753", "0.5553085", "0.5545726", "0.5529194", "0.5526284", "0.5516892", "0.55111367", "0.5508983", "0.5508983", "0.5508983", "0.5508983", "0.5503884", "0.5501996", "0.5500277", "0.54897106", "0.5482085", "0.54776037", "0.5472219", "0.54694855", "0.54566383", "0.54548955", "0.54526395", "0.54525167", "0.54429525", "0.5439302", "0.5435134", "0.54291207", "0.54204005", "0.5406882", "0.5406544", "0.540289", "0.53844965", "0.53826463", "0.5379167", "0.53682566", "0.53652966", "0.5354765", "0.53372437", "0.5323266", "0.5322648", "0.5317253", "0.53019655", "0.5299499", "0.52985483", "0.52960134", "0.52923125", "0.5288025", "0.52801365", "0.5279297" ]
0.76709455
1
get the list of arangosync masters managed by this starter
def get_sync_masters(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.SYNCMASTER: ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ListMasters(cue='master.cfg', include_public=True, include_internal=True):\n # Look for \"internal\" masters first.\n path_internal = os.path.join(\n BUILD_DIR, os.pardir, 'build_internal', 'masters/*/' + cue)\n path = os.path.join(BUILD_DIR, 'masters/*/' + cue)\n filenames = []\n if include_public:\n filenames += glob.glob(path)\n if include_internal:\n filenames += glob.glob(path_internal)\n return [os.path.abspath(os.path.dirname(f)) for f in filenames]", "def get_masters():\n\n # note: ListMasters uses master.cfg hardcoded as part of its search path\n def parse_master_name(masterpath):\n \"\"\"Returns a mastername from a pathname to a master.\"\"\"\n _, tail = os.path.split(masterpath)\n sep = '.'\n hdr = 'master'\n chunks = tail.split(sep)\n if not chunks or chunks[0] != hdr or len(chunks) < 2:\n raise ValueError('unable to parse mastername from path! (%s)' % tail)\n return sep.join(chunks[1:])\n\n return [(parse_master_name(m), m) for m in chromium_utils.ListMasters()]", "def masters(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.masters.all())", "def masters(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"masters\")", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def get_all_master_ids(self):\r\n return self._handler.get_all_master_ids()", "async def sync_master(self):\n if not [entity for entity in self._casatunes_entities() if entity.is_client]:\n await self.coordinator.data.zone_master(self.zone_master, False)\n await self.coordinator.async_refresh()\n _LOGGER.debug(\"%s zone is no longer master.\", self.zone_master)", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_global_submasters():\n global _submasters\n if _submasters is None:\n _submasters = Submasters()\n return _submasters", "def get_master_nodes(self):\n default = 3\n master_nodes_count = input('enter number of master nodes\\n'\n 'default [3]: ')\n master_nodes_count = set_values(master_nodes_count, default, check='integer')\n master_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['master_nodes'] = []\n for num in range(master_nodes_count):\n master_values = []\n default = 'etcd-{}'.format(num)\n master_name = input('enter the master {} node name \\n'\n 'default [{}]: '.format(num, default))\n master_name = set_values(master_name, default)\n master_ip = get_ip(node_name=master_name, ip_type='os')\n master_mac = get_network_device_mac(node_name=master_name, ip_type='idrac')\n master_values.append(master_name)\n master_values.append(master_ip)\n master_values.append(master_mac)\n master_node_dict_pairs = dict(zip(master_keys, master_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(master_name, master_name,\n master_ip, master_mac)) \n self.inventory_dict['csah']['vars']['master_nodes'].append(master_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_masters'] = master_nodes_count", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def addMasters(self,masterNames):\n #--Load Masters\n #--Master FileRefs\n proItems = []\n totSize = 0\n for masterName in masterNames:\n #--Don't have fileRef? FileRef out of date?\n masterInfo = modInfos[masterName]\n fileRefs = masterInfo.extras.get('FileRefs')\n if not fileRefs:\n fileRefs = masterInfo.extras['FileRefs'] = FileRefs(masterInfo,True,True)\n fileRefs.setDebrisIds()\n refreshSize = fileRefs.refreshSize()\n if refreshSize:\n proItems.append((fileRefs,refreshSize))\n totSize += refreshSize\n #--Refresh masters\n cumSize = 0\n for (fileRefs,size) in proItems:\n self.progress.setBaseScale(1.0*cumSize/totSize, 1.0*size/totSize)\n fileRefs.progress = self.progress\n fileRefs.refresh()\n cumSize += size\n #--Do Mapping\n del proItems[:]\n totSize = 0\n for masterName in masterNames:\n size = len(modInfos[masterName].extras['FileRefs'].cells)\n proItems.append((masterName,size))\n totSize += size\n cumSize = 0\n for (masterName,size) in proItems:\n if size: self.progress.setBaseScale(1.0*cumSize/totSize, 1.0*size/totSize)\n self.addMaster(masterName)\n cumSize += size", "def getAminos(self):\n\t\treturn self.aminos", "def getMasterMap(self,masterInfo):\n masterMap = [0]\n #--Map'em\n for mmName in masterInfo.masterNames:\n if mmName not in self.masterNames: \n raise MoshError(_(\"Misordered esm: %s should load before %s\") % (mmName, masterInfo.name))\n masterMap.append(self.masterNames.index(mmName)+1)\n #--Done\n return masterMap", "def get_master(self):\n\n def watcher(watched_event):\n if watched_event.type and watched_event.path:\n msg = \"child changed, try to get master again.type %s, state %s, path %s.\" % (\n watched_event.type, watched_event.state, watched_event.path)\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", msg))\n self.workers = self.get_workers()\n logger.debug(\"watcher call get_master start\")\n self.get_master()\n logger.debug(\"watcher call get_master end\")\n\n try:\n children = self.zk.get_children(self.LEADERSHIP_PATH, watcher)\n except:\n logger.error(traceback.format_exc())\n return\n\n # self register\n infos = []\n for child in children:\n data, stat = self.zk.get(self.LEADERSHIP_PATH + \"/\" + child)\n infos.append(data)\n\n # make sure leadship and services exists\n if self.info not in infos or \\\n not self.zk.exists(self.SERVICE_PATH + \"/\" + self.info):\n logger.debug(\"get_master call register start\")\n self.register_leadership()\n self.register_service()\n logger.debug(\"get_master call register end\")\n\n children.sort()\n logger.debug(\"%s's children: %s\" % (self.LEADERSHIP_PATH, children))\n # check if I'm master\n self.master = children[:self.MASTER_NUM]\n if self.path in self.master:\n self.is_master = True\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", \"I am master!\"))\n # get slave status and assign undone task to them\n online_workers = self.get_workers()\n self.assign_task(online_workers)\n self.workers = online_workers", "def get(self):\n return get_all_fuelmaster()", "def group_members(self) -> list[str] | None:\n\n zone_clients = [\n zone.ZoneID for zone in self.coordinator.data.zones if zone.SharedRoomID\n ]\n\n if self.is_master:\n entities = self._casatunes_entities()\n clients = [\n entity.entity_id\n for entity in entities\n if entity.is_client and entity.zone_id in zone_clients\n ]\n return [self.entity_id] + clients", "def master(self):\n return self.remappers[self._master_name]", "def get_mos_from_localhost(self):\n rewards = dict() # saves the reward of each client from each ap\n _, data = self.command_ap('localhost', 8080, '', \"/get_mos_client\") # the interface (3rd param) does not matter\n self.log.debug(\"data for MOS @ {} => {}\".format('all', data))\n stations = {'gnu-nb3': ['cloud'],\n 'fenrir': ['storm'],\n }\n for ap in self.aps:\n d = []\n for sta in stations[ap.name]:\n entries = [x[:4] for x in data if x[4] == sta]\n d.extend(entries)\n rs = self.get_rs(d)\n rewards[ap.id] = rs\n return rewards", "def get_objects(self):\n has_more = True\n marker = None\n while has_more:\n servers = openstack_clients.get_novaclient().servers.list(\n limit=LIST_LIMIT,\n search_opts={'all_tenants': True},\n marker=marker\n )\n\n if not servers:\n # Definitely no more; break straight away\n break\n\n # servers.list always returns a list so we can grab the last id\n has_more = len(servers) == LIST_LIMIT\n marker = servers[-1].id\n\n for server in servers:\n yield server", "def get_master_contracts(self, exchange):\n self._master_contracts = self.api_call(\n endpoint=ApiEndpoint.MASTER_CONTRACT,\n method=\"GET\",\n query_params={\"exchange\": exchange}\n )", "def get_quizmasters() -> list[int]:\n try:\n return [\n int(user_id[0]) for user_id in SESSION.query(QuizMaster.user_id)\n ]\n finally:\n SESSION.close()", "def get_servers(self):\n\t\treturn self.__servers", "def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)", "def FindMasterUsingChubby(ver):\n return core_utils.GetGSAMaster(ver, install_utilities.is_test(ver))", "def master_id(self):\r\n return self._arm.master_id", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def populateMasteredAssets(*args):\n #clear the lists first\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, ra=True)\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, ra=True)\n\n chars, props, sets = cFuncs.getProjectAssetList(pi.assetFolder)\n\n #check for rig masters\n for char in chars:\n cMstr = cFuncs.getAssetMaster(char, cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", char)), \"rig\")\n if cMstr:\n cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], e=True, a=char, dcc=showAssetImage)\n for prop in props:\n pMstr = cFuncs.getAssetMaster(prop, cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", prop)), \"rig\") \n if pMstr:\n cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], e=True, a=prop, dcc=showAssetImage)\n for sett in sets:\n sMstr = cFuncs.getAssetMaster(sett, cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", sett)), \"rig\") \n if sMstr:\n cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], e=True, a=sett, dcc=showAssetImage)\n\n #check for anim variants and masters\n varAnm = []\n shots = cFuncs.getProjectShotList(pi.currentProject)\n # print \"shotWin.populateMasteredAssets (line 937): shots =\", shots\n if shots:\n for shot in shots:\n shotVars = cFuncs.getShotVariantDict(os.path.join(pi.currentProject, \"shots\", shot))\n if shotVars[\"anm\"]:\n for anm in shotVars[\"anm\"]:\n aMstr = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm)))\n #print cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot, \"anm\", anm))\n if aMstr: \n varAnm.append(\"{0}.{1}\".format(anm, shot))\n\n for av in varAnm:\n cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], e=True, a=av)\n\n populateSceneRefs()", "def api_get(self):\n sdc = DataCenter(location=self.joyent_uri, key_id=self.joyent_key_id, secret=self.joyent_secret,\n allow_agent=False, verbose=self.debug)\n servers = sdc.machines()\n return servers", "def list_service(request):\n builder = http.ResponseBuilder()\n master_addr = request.GET.get('master',None)\n if not master_addr:\n return builder.error('master is required').build_json()\n\n client = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)\n status,jobs = client.list_jobs()\n LOG.info(status)\n if not status:\n return builder.error('fail to list jobs').build_json()\n ret = []\n for job in jobs:\n ret.append(job.__dict__)\n return builder.ok(data=ret).build_json()", "def get_all_servers(self) -> List[Server]:\n pass", "def arc_clients(self):\n return self.__get_option('arc_client_tools')", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def getMaster(self, base_path, filename='picloud.json'):\n\t\tmaster = None\n\t\tfor l in listdir(base_path) :\n\t\t\tpath = base_path + \"/\" + l\n\t\t\tmaster = self.checkIs('master', path, '', filename)\n\t\t\tif master != None :\n\t\t\t\treturn master\n\n\t\treturn None", "def test_master(self):\n m = self.d.master(4242)\n self.assertEqual(len(m.tracklist), 4)", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def slide_masters(self):\n return _SlideMasters(self)", "def _active_arms(self):\n return [self.arms[idx] for idx in self.arm_idx]", "def get_radius_servers(self):\n\n cmd = 'show aaa servers'\n output = self.iosapi.bcp_send_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) get_radius_servers() : Attempting to retrieve RADIUS servers\" %(__name__))\n\n return(self.iosapi.textfsm_extractor('cisco_ios_show_aaa_servers.template', output))", "def _get_cluster_list(self):\n return self.__cluster_list", "def is_master(self):\n return MPControl.is_master", "def list():\n rino.remote.list()", "def getmasternode_command(chat, message, args):\n get_masternodes = os.popen(path_to_bin + \"/bitcanna-cli masternode list\").read()\n loaded_json = json.loads(get_masternodes)\n msg = \"\"\n count = 0\n chat.send (\"List of online MASTERNODES\") \n print (\"List of online MASTERNODES\")\n print (\"==========================\")\n for tx in loaded_json:\n msg = msg + \"IP: \" + tx + \"\\n\"\n count = count + 1\n print (msg + \"\\nTotal: \" + str(count))\n chat.send(msg + \"\\nTotal: \" + str(count))", "def currentAntennaNames(carmaOnly=False) :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n cname = i.carmaAntennaName\n tname = i.typedAntennaName\n if (carmaOnly) :\n names = i.carmaAntennaName\n else :\n names = \"%s(%s)\" %(cname,tname)\n namelist.append(names)\n return namelist", "def clientlist(self) -> None:\n from bacommon.servermanager import ClientListCommand\n self._enqueue_server_command(ClientListCommand())\n self._block_for_command_completion()", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def get_campus_list(self, conn, offset=0, limit=100):\n path = urls.FLOOR_PLAN[\"GET_CAMPUS_LIST\"]\n params = {\n \"offset\": offset,\n \"limit\": limit\n }\n resp = conn.command(apiMethod=\"GET\", apiPath=path, apiParams=params)\n return resp", "def get_ammos(self):\n return self.__ammos", "def is_master(self) -> bool:\n return self.zone.SharedRoomID and self.zone.MasterMode", "def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body", "def stations():\n\n return station_list", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def list(args, config):\n\n api = config['API']\n headers = {}\n if args.stack_name:\n headers = {'stack-name': args.stack_name} # put stack name in headers\n r = requests.get(api['list'], headers=headers) # send the GET request\n print('\\nThe following clusters exist:\\n{}\\n'.format(r.json()))\n return", "def findMasterPods(directory):\n\tfiles=findFiles(directory,\".mp\")\n\t#Clear listbox\n\topenListbox.secureClear()\n\tif len(files) > 0:\n\t\t#Iterate\n\t\tfor file in files:\n\t\t\t#Get master pod name\n\t\t\tdisplayName=getRootName(file)\n\t\t\t#Create instance\n\t\t\tmasterPodInstance=loadMasterPodFromFile(file)\n\t\t\t#First check if the master pod is valid\n\t\t\tif masterPodInstance:\n\t\t\t\taddMasterPodToScreen(masterPodInstance)\n\t\t\telse:\n\t\t\t\tlog.report(\"Invalid Master Pod found\",tag=\"Important\")\n\n\telse:\n\t\tlog.report(\"No mp files found in directory\")\n\t\treturn None", "def get_tacacs_servers(self):\n\n cmd = 'show tacacs'\n output = self.iosapi.bcp_send_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) get_tacacs_servers() : Attempting to retrieve TACACS+ servers\" %(__name__))\n\n return(self.iosapi.textfsm_extractor('cisco_ios_show_tacacs.template', output))", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "async def _get_drone_list(self, apc_token: str) -> str:\n url = f\"{Cellular._ACADEMY_BASE_URL}/apiv1/drone/list\"\n headers = {\n HeaderField.authorization: f\"Bearer {apc_token}\",\n HeaderField.contentType: HeaderValue.appJson,\n HeaderField.xApiKey: Cellular._ACADEMY_SECRECT_KEY,\n HeaderField.userAgent: Cellular._USER_AGENT,\n }\n\n self.logger.info(\"get paired drone list\")\n response = await self._session.get(\n url,\n headers=headers,\n timeout=Cellular._TIMEOUT,\n )\n response.raise_for_status()\n drone_list = await response.text()\n\n return drone_list", "def atlas_projects():\n pass", "def get_master_menu_item_list():\n global _master_menu_item_list\n if _master_menu_item_list is None:\n _master_menu_item_list = [fn() for fn in hooks.get_hooks('register_admin_menu_item')]\n\n return _master_menu_item_list", "def test_return_to_assigned_master(\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n run_salt_cmds,\n):\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns", "async def async_set_master(self, master):\n self._master = master", "async def get_data(self) -> list[dict]:\n tasks = [self.miners[miner].get_api_data() for miner in self.miners]\n results = await asyncio.gather(*tasks)\n return results", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def list_servers(self, all_tenants=False):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers/detail\"\n if all_tenants:\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + self.project_info[\n \"project_id\"] + \"/servers/detail?all_tenants=1\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while listing servers.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List servers Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Servers List :%s \" % output)\n return output[\"servers\"]", "def listClusters():\n return [c['name'] for c in pymongo.Connection().clovr.clusters.find()]", "def getMyArmies(self):\n r = []\n for army in self.__armies:\n if (army.getOwner() == 1):\n r.append(army)\n return r", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def atlas_organizations():\n pass", "def start(self):\n\n dburl = dbconn.DbURL()\n gparray = GpArray.initFromCatalog(dburl, utility=True)\n numcontent = gparray.getNumSegmentContents()\n standby = gparray.standbyMaster\n master = gp.MasterStart(\"Starting Master Standby\",\n self.datadir, self.port, standby.dbid,\n 0, numcontent, None, None, None)\n # -w option would wait forever.\n master.cmdStr = master.cmdStr.replace(' -w', '')\n master.run(validateAfter=True)\n\n return master.get_results()", "def master_authorized_networks_config(self) -> 'outputs.MasterAuthorizedNetworksConfigResponse':\n return pulumi.get(self, \"master_authorized_networks_config\")", "async def async_join(self, master):\n master_entity = next(\n entity\n for entity in self.hass.data[DOMAIN][self._entry_id].clients\n if entity.entity_id == master\n )\n if not isinstance(master_entity, SnapcastClientDevice):\n raise TypeError(\"Master is not a client device. Can only join clients.\")\n\n master_group = next(\n group\n for group in self._client.groups_available()\n if master_entity.identifier in group.clients\n )\n await master_group.add_client(self._client.identifier)\n self.async_write_ha_state()", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "def _get_samba_clients(self):\n # TODO preferably use a library, don't assume localhost.\n logging.debug(\"inspecting samba...\")\n command = 'sudo smbstatus -p | sed -n 5p | tr -s \" \" | cut -d\" \" -f4'\n output = utils.run_os_command(command)\n return output.strip().split(\"\\n\")", "def get_possible_absentees(self) -> List[QualifiedAgent]:\n wum: WorklistUpdateManagerApi = self._service_provider.get_service(WorklistUpdateManagerApi)\n return self._rem_iter_handler.consume(\n wum.get_possible_absentees(),\n \"agents\",\n PossAbsRemoteIteratorApi,\n PossAbsRemoteIteratorApi.poss_abs_get_next,\n )", "def discover_servers():\n servers = set()\n for p in glob.glob1(SPDK_SERVER_APP_DIR, \"*\"):\n m = SERVERS_PATTERN.match(p)\n if m:\n servers.add(m.group())\n return list(servers)", "def get_cluster_mates(self):\n\n print \"Getting all cluster mates associated with cluster \", self.study_cluster\n cluster_mates = Study.query.filter(Study.study_cluster == self.study_cluster).all()\n\n return [cluster_mate.pmid for cluster_mate in cluster_mates]", "def is_master(self): \n\n master_access = (PermissionGroups.query\n .filter_by(group_name=\"Master\")\n .first())\n if self.has_auth_access(master_access):\n return True\n else:\n return False", "def get_owners_list(self):\n final_list = []\n for entry in self.bot_data_file[\"owners_data\"][\"owners_list\"]:\n final_list.append(str(entry[\"name\"]))\n if len(final_list) == 0:\n print(\"ERROR GETTING THE OWNERS LIST (i need at least 1 owner) - BOT ABORTING\")\n quit(1)\n else:\n return final_list", "def servers(self):\n return self._servers", "def Warmup(): # pylint: disable=unused-variable\n configs = luci_config.ListAllConfigs(\n datastore_client, _cache_timestamp=time.time() + 10)\n for _, revision, subscription in configs:\n luci_config.GetMatcher(revision, subscription)\n return jsonify({})", "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes", "def remotes():", "def master(self):\n return self._master", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def connect_to_master():", "def get_artist_list():\n return list(dmla.list_artists())", "def zoneadm():\n\n ret = run_cmd('/usr/sbin/zoneadm list -pc')\n\n if isinstance(ret, basestring):\n return [ret]\n else:\n return ret", "async def discover(self):\n\n def get_discovered_servers(discovery):\n servers = discovery.all()\n discovery.stop()\n return servers\n\n discovery = RoonDiscovery(None)\n servers = await self._hass.async_add_executor_job(\n get_discovered_servers, discovery\n )\n _LOGGER.debug(\"Servers = %s\", servers)\n return servers", "def GetActiveMaster(slavename=None, default=None):\n master_class_name = os.getenv('TESTING_MASTER')\n if master_class_name:\n return master_class_name\n\n master_class_name = os.getenv('INFRA_BUILDBOT_MASTER_CLASS_NAME')\n if master_class_name:\n return master_class_name\n\n slavename = slavename or GetActiveSlavename()\n for slave in GetAllSlaves():\n if slavename == EntryToSlaveName(slave):\n return slave['master']\n return default", "def test_controlplane_builder(os_info): # pylint disable=redefined-outer-name\n NOVA.servers.find = mock.MagicMock(\n return_value=DummyServer(\"master-1-test\",\n \"10.32.192.102\",\n Flavor('ECS.C1.4-8')))\n cpb = ControlPlaneBuilder(CONFIG, os_info)\n masters = cpb.get_masters()\n assert isinstance(masters[0], koris.cloud.openstack.Instance)\n assert masters[0].name == 'master-1-test'", "def _query(self):\n # When search matches no minions, salt prints to stdout.\n # Suppress stdout.\n _stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n\n self.local.cmd('*', 'saltutil.pillar_refresh')\n minions = self.local.cmd('*', 'pillar.get', ['minion_nodes'],\n tgt_type=\"compound\")\n sys.stdout = _stdout\n for minion in minions:\n if minions[minion]:\n return minions[minion]\n\n return []", "def master(self):\n\n return self._master", "def list_mc_servers(self, by_name=False, all_data=False):\n status, data, errors, messages = self._make_get_request(MCAPIRoutes.LIST)\n \n if status == 200:\n if by_name:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n return returnData\n if all_data:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n y += 1\n returnData[y] = items.get(\"running\", 0)\n y = y + 1\n returnData[y] = items.get(\"auto_start\", 0)\n return returnData\n del returnData\n else:\n return data['servers']\n elif status == 500:\n self._check_errors(errors, messages)", "def clients(self, server):\n servers = coordinator.get_job_servers(self.job)\n for sid, name in servers.iteritems():\n if name == server:\n # assuming sid is zero-based and all sid < number of servers:\n clpsv = (self.numclients + len(servers) - 1) / len(servers)\n clients = range(self.numclients)[sid * clpsv:(sid + 1) * clpsv]\n return [str(n) for n in clients]\n return []", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def getMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 1):\n r.append(p)\n return r", "def discover_master(self, service_name):\n for sentinel_no, sentinel in enumerate(self.sentinels):\n try:\n masters = sentinel.sentinel_masters()\n except (ConnectionError, TimeoutError):\n continue\n state = masters.get(service_name)\n if state and self.check_master_state(state, service_name):\n # Put this sentinel at the top of the list\n self.sentinels[0], self.sentinels[sentinel_no] = (\n sentinel, self.sentinels[0])\n return state['ip'], state['port']\n raise MasterNotFoundError(\"No master found for %r\" % (service_name,))" ]
[ "0.7359204", "0.7229313", "0.72076976", "0.67820305", "0.63927853", "0.63927853", "0.6004608", "0.5842549", "0.5835515", "0.5802098", "0.5770662", "0.57044643", "0.56874114", "0.564752", "0.5564717", "0.5553358", "0.55246377", "0.55142653", "0.5474071", "0.52970237", "0.52841556", "0.5279004", "0.52701896", "0.5219899", "0.51992273", "0.519834", "0.51799685", "0.5163755", "0.5155392", "0.510683", "0.51053786", "0.508178", "0.5063591", "0.50445443", "0.5025634", "0.5023599", "0.5019401", "0.50193334", "0.49959904", "0.49900404", "0.49838054", "0.49744368", "0.49737406", "0.49712473", "0.4969979", "0.49660802", "0.4937941", "0.49368918", "0.49272674", "0.49210274", "0.49145573", "0.49136958", "0.49091414", "0.490723", "0.48963004", "0.48917443", "0.48912492", "0.4888058", "0.4881354", "0.4876403", "0.4869053", "0.48534024", "0.48487228", "0.48481274", "0.48435667", "0.48428956", "0.48367372", "0.48328942", "0.4827956", "0.482407", "0.48144633", "0.48130238", "0.48125607", "0.4803403", "0.48018485", "0.4793487", "0.4792433", "0.47920573", "0.4787458", "0.47863162", "0.47839996", "0.47776702", "0.4771707", "0.47686243", "0.47595793", "0.47581127", "0.47511858", "0.47452396", "0.47438115", "0.47298938", "0.4727082", "0.47178003", "0.47169536", "0.4713791", "0.4701664", "0.46855924", "0.46853024", "0.4670567", "0.46665367", "0.4665924" ]
0.6722324
4
get the first frontendhost of this starter
def get_frontend(self): servers = self.get_frontends() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrontend(self):\n return self.header['FRONTEND']", "def head_host(self) -> str:\n return self.head_args.host if self.head_args else None", "def get_host(self):\r\n return self.host", "def getHost():", "def getHost():", "def get_host(self):\n return self.host", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def nscaweb_host(self):\n return self.__get_option('nscaweb_host')", "def host():\n return platform.node()", "def get_homepage(resource):\n return resource.playlist.consumer_site.domain", "def get_host(request):\n return request.META[\"HTTP_HOST\"].split(\":\")[0]", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def getHost(self):\n host = self.url[self.host_head:self.host_tail]\n return host", "def host(self) -> str:\n return self.first_pod_args.host", "def getHostHead(self):\n return self.host_head", "def getHost(self):\n\n\t\treturn HOST", "def home(environ, start_response):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n http_host = 'frontpage.' + http_host\n return serve_space(environ, start_response, http_host)", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def getHost(self): #$NON-NLS-1$\r", "def host(self) -> \"IStageHost\":\n return self._values.get(\"host\")", "def host(self) :\n\t\ttry :\n\t\t\treturn self._host\n\t\texcept Exception as e:\n\t\t\traise e", "def getHost(self):\n return self._host", "def master_host(self) -> str:\n raise NotImplementedError", "def getBackend(self):\n return self.header['BACKEND']", "def host(self):\r\n return self._environ.get('HTTP_HOST', '')", "def get_host(req):\n return req.META[\"HTTP_HOST\"].split(\":\")[0]", "def wagtail_site():\n return Site.objects.get(is_default_site=True)", "def get_backend():\n global _ACTIVE_BACKEND\n if not _ACTIVE_BACKEND:\n _ACTIVE_BACKEND = locate(settings.SITE_BACKEND)()\n return _ACTIVE_BACKEND", "def host(self):\n return self._host[CONF_HOST]", "def host(self):\n return self._environ.get('HTTP_HOST', '')", "def getControllingHost(self):\r\n if len(self.controllingClient) > 0:\r\n return self.controllingClient.values()[0]\r\n else:\r\n return None", "def getHost(self):\n return self._host", "def fallback_host(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fallback_host\")", "def host(self):\n return self._host", "def host(self):\n return self._host", "def host(self):\n return self._host", "def host(self):\n return self._host", "def host(self):\n return self._host", "def host(self):\n return self._host", "def host(self):\n return self._host", "def __get_host(self) -> str:\n\t\treturn os.getenv('FLASK_DRIVER_HOST', '0.0.0.0')", "def url(self):\n return self.hs.hostname if self.active else None", "def horizon_server(horizon_servers):\n return horizon_servers[0]", "def host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"host\")", "def host(self) -> str:\n return pulumi.get(self, \"host\")", "def GetServerHost():\n return GetHostName(True)", "def get_compss_home(self):\n return self.compss_home", "def get_host_master_id(self):\r\n return self._handler.get_host_master_id()", "def host(self) -> str:\n return self._builder.host", "def host(self):\n return '127.0.0.1'", "def get_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def get_server_host(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetServerHost', self.handle)", "def get_server():\n pass", "def __get_host(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_HOST', 'localhost')", "def host(self):\n\n return self._host", "def getHost(self):\n return _libsbml.SBMLUri_getHost(self)", "def get_mail_host():\n portal = getSite()\n if portal is None:\n return None\n request = portal.REQUEST\n ctrlOverview = getMultiAdapter((portal, request), name='overview-controlpanel')\n mail_settings_correct = not ctrlOverview.mailhost_warning()\n if mail_settings_correct:\n mail_host = getToolByName(portal, 'MailHost', None)\n return mail_host", "def getHome(self):\n # host = getHostWithPort(url)\n # ui = UrlInfo(url)\n # host = url[ui.getHostHead():ui.getHostTail()]\n host = self.url[self.host_head:self.host_tail]\n return \"http://\" + host + \"/\"", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def primary_name_server(self) -> str:\n return pulumi.get(self, \"primary_name_server\")", "def get_hostname():\n global HOST\n if '.' in HOST:\n HOST = HOST.split('.')[0]\n return HOST", "def get(self, host):\n return self.__locusts__[host]", "def halo_host(self, index):\n halo = self.get_halo(index)\n return (\n halo\n if halo.name == halo[\"hostIndex\"]\n else self.halo_host(self.get_halo(halo[\"hostIndex\"]).name)\n )", "def halo_host(self, index):\n halo = self.get_halo(index)\n return (\n halo\n if halo.name == halo[\"hostIndex\"]\n else self.halo_host(self.get_halo(halo[\"hostIndex\"]).name)\n )", "def get_toolforge_hostname() -> Optional[str]:\n if socket.getfqdn().endswith('.tools.eqiad1.wikimedia.cloud'):\n return socket.gethostname()\n return None", "def __get_host(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_HOST', 'localhost')", "def host(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"host\")", "def get_hostname(self):\n # We set a default in install.py in case it isn't preseeded but when we\n # preseed, we are looking for None anyhow.\n return ''", "def fusion_api_get_hypervisor_host(self, uri=None, param='', api=None, headers=None): # pylint: disable=unused-argument\n return self.hypervisor_host.get(uri, api, headers, param='')", "def get_host_name():\n return socket.gethostname()", "def get_backend():\n return __SETTINGS__._BACKEND", "def get_frontends(self):\n ret = []\n for i in self.all_instances:\n if i.is_frontend():\n ret.append(i)\n return ret", "def get_host(name):\n raise NotImplementedError('derived class should overload me')", "def get_frontend_port(self):\n if self.frontend_port:\n return self.frontend_port\n return self.get_frontend().port", "def gethost(self):\n return self.__host", "def get_site(self):\n raise NotImplementedError", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def get_run_host(self):\n comp = self.get_run_from()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def get_run_host(self):\n comp = self.get_run_from()\n host = (comp.host_ref\n if isinstance(comp.host_ref, basestring)\n else comp.host_ref.value())\n if isinstance(host, IPAddressable):\n host.fix_arguments()\n host = host.get_ip()\n return host", "def host(self) -> str:\n return self._host", "def host(self) -> str:\n return self._host", "def get_host(self) -> str:\n return self.socket.getsockname()[0]", "def app_url(self):\n return self.request.host_url", "def get_server(self):\n\n pass", "def localhost(self):\n return self.__get_option('localhost')", "def getCloudhost(self,systemObject):\n\n prefix = \"https://\"+systemObject[\"name\"]+systemObject[\"hcpAccount\"]+\".\"\n return self.removePrefix(systemObject[\"rootUrl\"],prefix)", "def get_from_host(cls, host, silent=False):\n if cls.search([], count=True) == 1:\n return cls.search([])[0]\n try:\n website, = cls.search([('name', '=', host)])\n except ValueError:\n if not silent:\n raise WebsiteNotFound()\n else:\n return website", "def host(self) -> str:\n return self.proto.host", "def get_home(self):\n return self.home", "def getHost(uniq):\n return Host(Cuebot.getStub('host').GetHost(\n host_pb2.HostGetHostRequest(id=uniq), timeout=Cuebot.Timeout).host)", "def get_time_server_host(self):\n if self.am_leader:\n return self.host\n if not self.time_server_set:\n return False\n try:\n return self.global_time_server.get_host()\n except socket.error:\n return False", "def host(self) -> str:\n return self.user.host", "def getSite(self, url):\n hostname = urlparse(urlparser).hostname\n site = sites.getSite(hostname)\n return site", "def hostname(self) -> Optional[str]:\n return pulumi.get(self, \"hostname\")" ]
[ "0.6992638", "0.6583565", "0.65169436", "0.6433285", "0.6433285", "0.6352371", "0.63019204", "0.63019204", "0.63019204", "0.62645936", "0.6246942", "0.62244636", "0.61690867", "0.6142307", "0.6142307", "0.61053056", "0.61049336", "0.6103162", "0.60861087", "0.60844123", "0.608103", "0.6038698", "0.6038698", "0.6038698", "0.60303277", "0.5974593", "0.596966", "0.5962287", "0.59497374", "0.5947828", "0.594498", "0.59398144", "0.5937128", "0.5931249", "0.59250146", "0.59152627", "0.5914271", "0.5897444", "0.5884887", "0.58832973", "0.58832973", "0.58832973", "0.58832973", "0.58832973", "0.58832973", "0.58832973", "0.58609825", "0.58580637", "0.58520293", "0.58213234", "0.5802797", "0.57961386", "0.5775358", "0.5734687", "0.57315713", "0.5729638", "0.57131606", "0.57043445", "0.5679449", "0.56465787", "0.56266826", "0.561557", "0.56015605", "0.55934966", "0.5586367", "0.5577473", "0.55722344", "0.5568957", "0.5567816", "0.5567816", "0.55490065", "0.5536024", "0.55342215", "0.5527158", "0.551946", "0.5494013", "0.54928404", "0.548189", "0.5476695", "0.5467477", "0.5464024", "0.5457891", "0.54507095", "0.54468256", "0.54468256", "0.54379886", "0.54379886", "0.54239005", "0.54197496", "0.54166603", "0.5409873", "0.54093397", "0.54068", "0.5402697", "0.53894705", "0.53876555", "0.5383029", "0.53814393", "0.5379885", "0.537952" ]
0.7496468
0
get the first dbserver of this starter
def get_dbserver(self): servers = self.get_dbservers() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stored_primary_server_name(db):\n if \"last_primary_server\" in db.collection_names():\n stored_primary_server = db.last_primary_server.find_one()[\"server\"]\n else:\n stored_primary_server = None\n\n return stored_primary_server", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_db(server_id):\n DATABASE = \"DATABASE\" + str(server_id)\n print(DATABASE)\n tracktop = _app_ctx_stack.top\n if not hasattr(tracktop, 'track_db0') and server_id == 0:\n tracktop.track_db0 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db0.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db1') and server_id == 1:\n tracktop.track_db1 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db1.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db2') and server_id == 2:\n tracktop.track_db2 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db2.row_factory = sqlite3.Row\n\n if server_id == 0:\n return tracktop.track_db0\n elif server_id == 1:\n return tracktop.track_db1\n else:\n return tracktop.track_db2", "def current_server():\n if not _current_server:\n create_server()\n return _current_server", "def get_server():\n pass", "def get_server(self):\n\n pass", "def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db", "def get_server(self):\n return self.__server", "def horizon_server(horizon_servers):\n return horizon_servers[0]", "def get_server(name):\n if name in SERVERZ:\n return SERVERZ[name]\n\n server = server_from_config(name)\n return start_server_thread(server)", "def get_server(index=-1):\n #returns a random server\n keys=__servers.keys()\n if index<0 or index>len(keys):key=random.choice(keys)\n else:key=keys[index]\n return (key,__servers[key])", "def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))", "def get_primary_db(force_new=False):\n defaults = get_defaults()\n if 'primary' in defaults.keys():\n primary_host = defaults['primary']\n else:\n raise IndraDatabaseError(\"No primary host available in defaults file.\")\n\n global __PRIMARY_DB\n if __PRIMARY_DB is None or force_new:\n __PRIMARY_DB = DatabaseManager(primary_host, label='primary')\n __PRIMARY_DB.grab_session()\n return __PRIMARY_DB", "def get_db():\n top = flask._app_ctx_stack.top\n if not hasattr(top, 'shelve'):\n top.shelve = MODEL\n\n return top.shelve", "def get_server(self, server):\n return self._get(_server.Server, server)", "def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()", "def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_server(self, id):\n\t\treturn self.__servers.get_server(id)", "def server(self):\n return self.the_server", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_single_db_name():\n expected_db_name = os.environ.get(\"MONGO_DB\")\n if not expected_db_name and is_testing():\n expected_db_name = f\"Test-{time.time_ns() // 1000000}\"\n\n return expected_db_name", "def get_dbs_obj(self):\n dbs_xml = self.get_DatabaseAndServer_XML()\n return self.get_DatabaseAndServer_obj(dbs_xml)", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def db_host(self) -> Optional[str]:\n return pulumi.get(self, \"db_host\")", "def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_db(_config):\n if not hasattr(flask.g, 'client'):\n flask.g.client, flask.g.db = connect_to_db(_config)\n return flask.g.client, flask.g.db", "def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def init(dbname=\"shiftspace\"):\n server = core.server()\n if not server.__contains__(dbname):\n print \"Creating database %s.\" % dbname\n server.create(dbname)\n else:\n print \"%s database already exists.\" % dbname\n db = server[dbname]\n loadDocs(db)", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def server(self):\n return self._server", "def server(self):\n return self._server", "def get_server():\n\n instance = Ceic._get_instance()\n\n return instance._ceic_configuration.server", "def _get_db(self):\n return DB(\n ClientStorage.ClientStorage((self.server, self.port))\n )", "def getDBSApi():\n if 'testbed' in dbs3_url:\n dbs3_url_reader = dbs3_url + '/dbs/int/global/DBSReader'\n else:\n dbs3_url_reader = dbs3_url + '/dbs/prod/global/DBSReader'\n\n from dbs.apis.dbsClient import DbsApi\n\n\n #this needs to come after /data/srv/wmagent/current/apps/wmagent/etc/profile.d/init.sh is sourced \n dbsApi = DbsApi(url = dbs3_url_reader)\n return dbsApi", "def get_database_dsn():\n return getattr(config, f\"POSTGRES_DSN_{config.SERVER_MODE}\")", "def server(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server\")", "def snitun_server(self) -> Optional[str]:\n return self._snitun_server", "def get_database(self):\n return self.database", "def select_server(self):\n pass", "def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:Saran1!@ds113736.mlab.com:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db", "def getDb(self):\n return self.db", "def get_db():\n if not hasattr(g, \"site_db\"):\n connection = pg.connect(\n dbname=\"dollsite\",\n user=\"dollsite\",\n password=app.config[\"DS_DB_PASSW\"]\n )\n g.site_db = connection\n return g.site_db", "def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(str(current_app.config['DATABASE']))\n return g.db", "def get_db():\n if not hasattr(g, 'mysql_db'):\n g.mysql_db = connect_db()\n return g.mysql_db", "def get_db(db=None):\n if db is None:\n db = ideagenstest\n return get_mongodb(db['url'],\n db['port'],\n db['dbName'],\n db['user'],\n db['pswd'])", "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db", "def get_db():\n if not hasattr(g, 'postgres_db'):\n g.postgres_db = connect_db()\n return g.postgres_db", "def _get_db(self):\n gt_db = ...\n return gt_db", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db", "def get_datasource_of():\n global datasource_of\n\n if not datasource_of:\n datasource_of = stixhelpers.datasource_of()\n \n return datasource_of", "def get_tgis_database():\n global tgis_database\n return tgis_database", "def get_db_conn(server, database, version='sde.DEFAULT'):\n scratch_work = arcpy.env.scratchFolder\n conn_name = 'temp__{}_{}'.format(server, database)\n conn_path = '{}//{}.sde'.format(scratch_work, conn_name)\n\n with TempOverwrite():\n arcpy.CreateDatabaseConnection_management(\n scratch_work,\n conn_name,\n database_platform='SQL_SERVER',\n instance=server,\n account_authentication='OPERATING_SYSTEM_AUTH',\n database=database,\n version=version\n )\n\n return conn_path", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db", "def primary_name_server(self) -> str:\n return pulumi.get(self, \"primary_name_server\")", "def current_db(self):\n return self._current_db", "def get_db():\n if not hasattr(g, \"sqlite_db\"):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n conn = g.get('sqlite_db', None)\n if conn is None:\n conn = g.sqlite_db = connect_db()\n return conn", "def find_best_server(filename):\n servers = [si for si in notebookapp.list_running_servers()\n if filename.startswith(si['notebook_dir'])]\n try:\n return max(servers, key=lambda si: len(si['notebook_dir']))\n except ValueError:\n return None", "def server():\n\n server = client.Server(host=host, auth=auth)\n try:\n server.delete_db(test_db_name)\n except excepts.DBNotExists:\n pass\n return server", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def getDbHostName():\n\n if \"DB_HOST\" in controller.CONF.keys():\n return controller.CONF[\"DB_HOST\"]\n\n return basedefs.DB_HOST", "def get_db():\r\n if not hasattr(g, 'sqlite_db'):\r\n g.sqlite_db = connect_db()\r\n return g.sqlite_db", "def get_server(self, request, server_id):\n raise NotImplementedError", "def get_db():\n global _cached\n if not _cached:\n _cached = MongoClient(config.DB_URI).get_database()\n return _cached", "def get_time_server_host(self):\n if self.am_leader:\n return self.host\n if not self.time_server_set:\n return False\n try:\n return self.global_time_server.get_host()\n except socket.error:\n return False", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(current_app.config['DB_NAME'])\n return db", "def isolate_server(self):\n assert self._server\n return self._server", "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def get_default_database(self):\n attr_name = mangle_delegate_name(self.__class__, '__default_database_name')\n default_db_name = getattr(self.delegate, attr_name)\n if default_db_name is None:\n raise ConfigurationError('No default database defined')\n\n return self[default_db_name]", "def getserver(self, id):\n try:\n server = XbmcServers.selectBy(id=id).getOne()\n return {\n 'id': server.id,\n 'name': server.name,\n 'host': server.host,\n 'port': server.port,\n 'username': server.username,\n 'password': server.password,\n 'mac': server.mac\n }\n except SQLObjectNotFound:\n return None", "def database():\n return conf().database" ]
[ "0.7175073", "0.68898433", "0.6632575", "0.6597116", "0.65586126", "0.6534296", "0.6407233", "0.63766783", "0.63378835", "0.6261345", "0.6257781", "0.61878526", "0.6167295", "0.6121482", "0.61021817", "0.61021364", "0.6087449", "0.6070265", "0.6051888", "0.599604", "0.59832543", "0.59824157", "0.5965016", "0.59570634", "0.5956557", "0.5927891", "0.5926108", "0.5922536", "0.59124905", "0.59088445", "0.59036994", "0.5898408", "0.58968306", "0.58630055", "0.58576536", "0.5829562", "0.5829562", "0.5814553", "0.58102715", "0.5805612", "0.5793248", "0.5793108", "0.57897514", "0.578638", "0.5777807", "0.57757676", "0.5769913", "0.5763525", "0.5755215", "0.5748638", "0.5732535", "0.57322085", "0.57309675", "0.57280344", "0.57068735", "0.5675356", "0.5670649", "0.56705946", "0.5651922", "0.5646559", "0.5635445", "0.56349707", "0.562959", "0.56277543", "0.5623728", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.5606548", "0.56044257", "0.560295", "0.5595893", "0.5595391", "0.5595391", "0.5588754", "0.5585445", "0.55735826", "0.5573167", "0.55611914", "0.55472755", "0.5546713", "0.55456626", "0.5539873", "0.55337936", "0.5530717", "0.551405" ]
0.8454486
0
get the first agent of this starter
def get_agent(self): servers = self.get_agents() assert servers, "starter: have no instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent(self):\n return self.__agent", "def agent(self) -> Entity:\n return self.__agent", "def getfirstbot(self):\n\n return self.bots[0]", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def get_effective_agent(self):\n raise Unimplemented()", "def _get_solver_agent(self):\n # Determine selectable agent(s)\n sctx = self.context.solver\n\n alist = sctx.agent\n if alist is None:\n # Return empty solver agent\n return CpoSolverAgent(self, sctx.params, sctx)\n elif not (is_string(alist) or is_array(alist)):\n raise CpoException(\"Agent identifier in config.context.solver.agent should be a string or a list of strings.\")\n\n # Create agent\n if is_string(alist):\n aname = alist\n agent = self._create_solver_agent(alist)\n else:\n # Search first available agent in the list\n agent = None\n aname = None\n errors = []\n for aname in alist:\n try:\n agent = self._create_solver_agent(aname)\n break\n except Exception as e:\n errors.append((aname, str(e)))\n # Agent not found\n errstr = ', '.join(a + \": \" + str(e) for (a, e) in errors)\n raise CpoException(\"Agent creation error: \" + errstr)\n\n # Log solver agent\n sctx.log(1, \"Solve model '\", self.model.get_name(), \"' with agent '\", aname, \"'\")\n agent.process_infos[CpoProcessInfos.SOLVER_AGENT] = aname\n return agent", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def _deads_step_first(self) -> AgentID:\n _deads_order = [\n agent\n for agent in self.agents\n if (self.terminations[agent] or self.truncations[agent])\n ]\n if _deads_order:\n self._skip_agent_selection = self.agent_selection\n self.agent_selection = _deads_order[0]\n return self.agent_selection", "def agent_class(self):\r\n return self._agent_class", "def getFirstWorker(self):\n return self.entries[0]", "def get_first_incident_node(self):\n return self.first_incident_node # return the first incident node", "def whoGoesFirst(self):\n\t\treturn random.randint(1, 2)", "def getAgentID(self):\n\t\treturn self.agentID", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def start(self):\n return self._args[0]", "def retrieve(cls: Type[T], agent_id: int, datastore: Datastore) -> T:\n agent = cls.optionally_retrieve(agent_id, datastore)\n if agent is None:\n raise NotFound\n return agent", "def take_min(self):\n return self.get_first()", "def agent_init(self):\n pass", "def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)", "def get_first_step(self):\n return self.get_step_by_index(0)", "def first(self) -> Task:\n return self._tasks[0]", "def agent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_id\")", "def first(self):", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def reserve_next_agent_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('agents', 'agent_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def first(self, trace):\n return trace[0]", "def systems_manager_agent(self) -> Optional['outputs.ImageRecipeSystemsManagerAgent']:\n return pulumi.get(self, \"systems_manager_agent\")", "def __init__(self, agent):\n self.agent = agent", "def first_attempt(self) -> 'outputs.AttemptResponse':\n return pulumi.get(self, \"first_attempt\")", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]", "def get_first(self):\n return self.A[1][0] if self.n > 0 else None", "def get_first_lesson(module):\n try:\n return get_all_lessons(module)[0]\n except IndexError:\n return None", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def agent_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"agent_id\")", "def get_first_item(cls):\n return cls.query.first()", "def get_agent_of_model(self, model):\n if model.id in self.agents:\n return self.agents[model.id]\n elif model.id in self.dead_agent_store:\n return self.dead_agent_store[model.id]\n raise ValueError('agent of given model does not exist')", "def get(self):\n payload = {\n 'started': agentstart.isStarted,\n 'running': agentstart._connected, # TODO: Private variable exposed here (soft)\n 'modules': ['discovery', 'identification', 'cau_client', 'categorization', 'policies'],\n 'discovery': not agentstart.discovery_failed if agentstart.discovery_failed is not None else False,\n 'identification': not agentstart.identification_failed if agentstart.identification_failed is not None else False,\n 'cau_client': not agentstart.cauclient_failed if agentstart.cauclient_failed is not None else False,\n 'categorization': not agentstart.categorization_failed if agentstart.categorization_failed is not None else False,\n 'policies': not agentstart.policies_failed if agentstart.policies_failed is not None else False\n }\n payload.update({'discovery_description': 'detectedLeaderID: \\\"{}\\\", MACaddr: \\\"{}\\\"'.format(\n agentstart.detectedLeaderID, agentstart.MACaddr) if payload.get(\n 'discovery') else 'Discovery not started or error on trigger.'})\n payload.update({'identification_description': 'IDKey: \\\"{}\\\", deviceID: \\\"{}\\\"'.format(agentstart.IDkey,\n agentstart.deviceID) if payload.get(\n 'identification') else 'Identification not started or error on trigger.'})\n payload.update(\n {'categorization_description': 'Started: {}'.format(agentstart.categorization_started) if payload.get(\n 'categorization') else 'RCategorization not started or error on trigger.'})\n payload.update({'policies_description': 'LPP: {}'.format(agentstart.arearesilience_started) if payload.get(\n 'policies') else 'Policies (LPP) not started or error on trigger.'})\n payload.update(\n {'cau_client_description': 'authenticated: {}, secureConnection: {}'.format(agentstart.isAuthenticated,\n agentstart.secureConnection) if payload.get(\n 'cau_client') else 'CAU_client not started or error on trigger.'})\n if agentstart.discovery_switched is not None:\n payload.update({'leader_discovery_description' : '{}'.format(agentstart.discovery_switched)})\n return payload, 200", "def get_center_agent_traj(argo_loader_obj: \"ArgoverseForecastingLoader\") -> np.ndarray:\n return argo_loader_obj.agent_traj", "def agent_id(self):\n return self._agent_id", "def agent_start(self, state):\n self.sum_rewards = 0\n self.episode_steps = 0\n self.last_state = np.array(state)\n self.last_action = self.policy(self.last_state)\n return self.last_action", "def min(self):\n return self.get_first()", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def get_target(self, agent, collective, topciprefix=None):", "def first(self):\n self._ll_tree.first()", "def _oneInteraction(self):\n self.stepid += 1\n self.agent.integrateObservation(self.task.getObservation())\n self.task.performAction(self.agent.getJointAction())\n reward = self.task.getReward()\n self.agent.giveJointReward(reward)\n return reward", "def who_goes_first():\r\n\r\n coin = random.choice([0, 1]) # flip a coin to decide who goes first\r\n return coin", "def first(self, **opts):\n return self.parser.first(search_inside=self, **opts)", "def take_first(info):\n return info[0]", "def get_initiator(self):\n out, err = self.execute('/usr/sbin/iscsiadm', 'list', 'initiator-node')\n\n # Sample first line of command output:\n # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217\n initiator_name_line = out.splitlines()[0]\n return initiator_name_line.rsplit(' ', 1)[1]", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global", "def agent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"agent_id\")", "def agent_start(self, state):\n\n # This agent doesn't care what state it's in, it always chooses\n # to move left or right randomly according to self.probLeft\n self.prevAction = self._choose_action()\n\n return self.prevAction", "def get_start_point(self):\n return self.first_point", "def who_plays_first():\n random = randrange(0, 2)\n if random == 0:\n return globals()['computer']\n else:\n return globals()['player']", "def getFirstInstruction(self) -> ghidra.program.model.listing.Instruction:\n ...", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def first_claim(self, key: str, default=None):\n if key not in self._itempage.claims:\n return default\n if not self._itempage.claims[key]:\n return default\n return self._itempage.claims[key][0].getTarget()", "def primary_step(self) -> 'outputs.PrimaryStepResponse':\n return pulumi.get(self, \"primary_step\")", "def get_first_seg(*args):\n return _ida_segment.get_first_seg(*args)", "def _get_actor_from_user(user: AbstractUser) -> Optional[Agent]:\n if user.lti_remote_user_id and user.lti_consumer.url:\n return Agent(\n account=AgentAccount(\n name=user.lti_remote_user_id, home_page=user.lti_consumer.url\n )\n )\n return None", "def who_goes_first(self):\n if random.randint(0, 1) == 0:\n return 'computer'\n return 'player'", "def agent_session(self):\n return self._agent_session", "def first(self):\n return self.__head", "def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)", "def get_agent_class(alg):\n if alg == \"PPORND\":\n # TODO: testing\n return rnd.PPORNDAgent\n else:\n return rllibagent.get_agent_class(alg)", "def _get_vehicle(self, vehicle):\n vehicle_bp = self.blueprint_lib.find(vehicle)\n spawn_point = random.choice(self.world.get_map().get_spawn_points())\n self.last_location = spawn_point.location\n vehicle = None\n for _ in range(10):\n vehicle = self.world.try_spawn_actor(vehicle_bp, spawn_point) # this will return None if spawning failed\n if vehicle:\n break\n if not vehicle:\n raise(\"Spawning failed, perhaps there are too many actors already in world\")\n \n return vehicle", "def arc_agent_profile(self) -> Optional[pulumi.Input['ArcAgentProfileArgs']]:\n return pulumi.get(self, \"arc_agent_profile\")", "def choose_first():\n rand = random.randint(1, 2)\n print(f\"The first is Player-{rand}\")\n return rand", "def do_get_target_for(self, args):\n if (len(args.split()) < 1):\n self.__bad_arguments(\"get_target_for\")\n else:\n AssassinsManager.get_target(self.assassins_manager, args.split()[0])", "def _get_start(self):\n return self._start", "def get(self, name):\n validate_inputs({'name': name})\n return get_storage_manager().get(models.Agent, name)", "def first_execution_from(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_execution_from\")", "def first(self, energy_type: Type[Energy]) -> Optional[TypeEnergy]:\n return self._next(self, energy_type=energy_type)", "def main():\n settings = Settings()\n agent = Agent(settings)\n agent.start()\n\n # The agent is executing\n try:\n agent.join()\n except KeyboardInterrupt:\n agent.stop()", "def _single_agent_step(self, action):\n reward = 0.0\n done = False\n self.timestep += 1\n state, player_id = self.game.step(action)\n while not self.game.is_over() and not player_id == self.active_player:\n self.timestep += 1\n action, _ = self.model.agents[player_id].eval_step(\n self._extract_state(state)\n )\n if not self.model.agents[player_id].use_raw:\n action = self._decode_action(action)\n state, player_id = self.game.step(action)\n\n if self.game.is_over():\n reward = self.get_payoffs()[self.active_player]\n done = True\n state = self.reset()\n return state, reward, done\n\n return self._extract_state(state), reward, done", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def server_agent():", "def start_episode(self):\n self.last_sensation = self.env()\n self.next_action = self.agent(self.last_sensation)", "def first(self):\r\n return self.__head", "def show_agent(self, agent, **_params):\r\n return self.get(self.agent_path % (agent), params=_params)", "def get_standard_agent(agents, id = None):\n _id = id\n if not _id:\n _id = Agent.DEFAULT_ID\n if len(agents) <= 0:\n raise ValueError('Debe ingresar almenos un agente')\n elif len(agents) == 1:\n return Agent(_id, agents[0].skills)\n\n id_skills = agents[0].skills.keys()\n cantidad_agentes = len(agents)\n skills_media = {id_habilidad: None for id_habilidad in id_skills}\n for id_habilidad in id_skills:\n skills_media[id_habilidad] = reduce((lambda x, y: x + y),\n [agent.skills[id_habilidad] for agent in agents]) / float(\n cantidad_agentes)\n return Agent(_id, skills_media)", "def first_execution_from(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"first_execution_from\")", "def __init__(self, agent: AEA) -> None:\n self._agent = agent\n super().__init__()", "def get_supervisor(self):\n return self.supervisor", "def first_potential(self) -> Optional[PotentialEnergy]:\n return self.first(energy_type=PotentialEnergy)", "def first_value(self):\n return self.samples[0]", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def self(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/self')", "def trainer(self):\n return self._trainer", "def male_first():\r\n cursor.execute('SELECT name FROM male order by RANDOM() limit 1')\r\n return cursor.fetchone()[0]", "def get_first_element(dataset):\n return dataset.first()", "def _hit_start_get(self):\n return self._hit_start", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def start(self):\n return self._get('start')", "def get_acs_agent(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)" ]
[ "0.66546506", "0.6309816", "0.61794835", "0.5938089", "0.5912412", "0.5894221", "0.58619547", "0.58138776", "0.57892704", "0.57603174", "0.5742176", "0.57214665", "0.5696438", "0.568106", "0.5669796", "0.5664195", "0.5583705", "0.55532354", "0.55385774", "0.5535078", "0.5532041", "0.5520238", "0.5484903", "0.5462891", "0.5456885", "0.5450928", "0.5431759", "0.54299176", "0.54169655", "0.53836733", "0.5365843", "0.5350818", "0.5316329", "0.53100103", "0.53082937", "0.5305096", "0.53049093", "0.53040695", "0.5301934", "0.5290734", "0.52859086", "0.52853537", "0.5276837", "0.5275766", "0.526981", "0.5266044", "0.5253071", "0.52470976", "0.5183495", "0.51794195", "0.51699626", "0.51668036", "0.516078", "0.516078", "0.51594883", "0.5152194", "0.51507306", "0.5135693", "0.51280266", "0.5126293", "0.5123966", "0.5118116", "0.5096514", "0.50927204", "0.50913215", "0.50868165", "0.5083615", "0.5078274", "0.5069969", "0.50556123", "0.5049033", "0.5044121", "0.5043173", "0.5033086", "0.50266755", "0.5022337", "0.5018636", "0.50168544", "0.50125587", "0.50053954", "0.49980697", "0.4991086", "0.49892768", "0.49879098", "0.4987221", "0.49816716", "0.49723622", "0.49693313", "0.49683365", "0.49559483", "0.49556485", "0.49506137", "0.49416175", "0.4935449", "0.49217355", "0.49187988", "0.49174228", "0.4916801", "0.49145895", "0.49137518" ]
0.76360667
0
get the first arangosync master of this starter
def get_sync_master(self): servers = self.get_sync_masters() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"master\")", "def getMaster(self, base_path, filename='picloud.json'):\n\t\tmaster = None\n\t\tfor l in listdir(base_path) :\n\t\t\tpath = base_path + \"/\" + l\n\t\t\tmaster = self.checkIs('master', path, '', filename)\n\t\t\tif master != None :\n\t\t\t\treturn master\n\n\t\treturn None", "def FindMasterUsingChubby(ver):\n return core_utils.GetGSAMaster(ver, install_utilities.is_test(ver))", "def master_id(self):\r\n return self._arm.master_id", "def master(self):\n return self._master", "def master(self):\n\n return self._master", "def master(self):\n return self.remappers[self._master_name]", "def getMain(self):\n\n if self.__projects:\n return self.__projects[0]\n else:\n return None", "def GetActiveMaster(slavename=None, default=None):\n master_class_name = os.getenv('TESTING_MASTER')\n if master_class_name:\n return master_class_name\n\n master_class_name = os.getenv('INFRA_BUILDBOT_MASTER_CLASS_NAME')\n if master_class_name:\n return master_class_name\n\n slavename = slavename or GetActiveSlavename()\n for slave in GetAllSlaves():\n if slavename == EntryToSlaveName(slave):\n return slave['master']\n return default", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'", "def current_master_version(self) -> str:\n return pulumi.get(self, \"current_master_version\")", "def stallone_master(machine: Machine) -> StalloneMaster:\n return StalloneMaster(machine, name=\"stallone\", add_to_default_env=True)", "def get_master(self):\n\n def watcher(watched_event):\n if watched_event.type and watched_event.path:\n msg = \"child changed, try to get master again.type %s, state %s, path %s.\" % (\n watched_event.type, watched_event.state, watched_event.path)\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", msg))\n self.workers = self.get_workers()\n logger.debug(\"watcher call get_master start\")\n self.get_master()\n logger.debug(\"watcher call get_master end\")\n\n try:\n children = self.zk.get_children(self.LEADERSHIP_PATH, watcher)\n except:\n logger.error(traceback.format_exc())\n return\n\n # self register\n infos = []\n for child in children:\n data, stat = self.zk.get(self.LEADERSHIP_PATH + \"/\" + child)\n infos.append(data)\n\n # make sure leadship and services exists\n if self.info not in infos or \\\n not self.zk.exists(self.SERVICE_PATH + \"/\" + self.info):\n logger.debug(\"get_master call register start\")\n self.register_leadership()\n self.register_service()\n logger.debug(\"get_master call register end\")\n\n children.sort()\n logger.debug(\"%s's children: %s\" % (self.LEADERSHIP_PATH, children))\n # check if I'm master\n self.master = children[:self.MASTER_NUM]\n if self.path in self.master:\n self.is_master = True\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", \"I am master!\"))\n # get slave status and assign undone task to them\n online_workers = self.get_workers()\n self.assign_task(online_workers)\n self.workers = online_workers", "def is_master(self):\n return self._is_master", "def organization_master_id(self) -> int:\n return pulumi.get(self, \"organization_master_id\")", "def master_mix(self):\n return composition_module.ReagentComposition(\n self._get_attr('master_mix_id'))", "def SyncClockMaster(self):\n if self.force_auto_sync:\n self.get('SyncClockMaster')\n return self._SyncClockMaster", "def get_host_master_id(self):\r\n return self._handler.get_host_master_id()", "def master_account(self):\n return self._master_account", "def get_master_url(self, identifier) -> None:\n # TODO(victorhc): Implement the following method to fetch the cluster\n # master_url from Dataproc.\n return '.'.join([\n self.cluster_metadata.project_id,\n self.cluster_metadata.region,\n self.cluster_metadata.cluster_name\n ])", "def is_master(self):\n return MPControl.is_master", "def choose_master(searchname):\n masters = get_masters()\n masternames = []\n master_lookup = {}\n for mn, path in masters:\n master = {}\n master['mastername'] = mn\n master_lookup[mn] = path\n masternames.append(master)\n\n candidates = [mn for mn in masternames if mn['mastername'] == searchname]\n\n errstring = 'string \\'%s\\' matches' % searchname\n master = only_get_one(candidates, 'mastername', errstring)\n if not master:\n return None\n\n return master_lookup[master]", "async def sync_master(self):\n if not [entity for entity in self._casatunes_entities() if entity.is_client]:\n await self.coordinator.data.zone_master(self.zone_master, False)\n await self.coordinator.async_refresh()\n _LOGGER.debug(\"%s zone is no longer master.\", self.zone_master)", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_master_key():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><masterkey-properties></masterkey-properties></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_sync_master_port(self):\n self.sync_master_port = None\n pos = None\n sm_port_text = \"Starting syncmaster on port\"\n sw_text = \"syncworker up and running\"\n worker_count = 0\n logging.info(\"detecting sync master port\")\n while worker_count < 3 and self.is_instance_running():\n progress(\"%\")\n lfs = self.get_log_file()\n npos = lfs.find(sw_text, pos)\n if npos >= 0:\n worker_count += 1\n pos = npos + len(sw_text)\n else:\n time.sleep(1)\n lfs = self.get_log_file()\n pos = lfs.find(sm_port_text)\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n if pos >= 0:\n pos = pos + len(sm_port_text) + 1\n self.sync_master_port = int(lfs[pos : pos + 4])\n return self.sync_master_port", "def get_master_offer(self):\n return Offer.objects.get(is_master=True)", "def master_screen(self):\n return self.screen_manager.master_screen", "async def async_set_master(self, master):\n self._master = master", "def series_master_id(self):\n if \"seriesMasterId\" in self._prop_dict:\n return self._prop_dict[\"seriesMasterId\"]\n else:\n return None", "def is_master(self):\n return self.itunesAttributes.get('Master', False)", "def getMain(self):\n\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n\n if appName.startswith(\"app:\"):\n appParent = appPath.rsplit(\"/\", 1)[0]\n relative = f\"{appParent}{relative}\"\n elif org is None or repo is None:\n appPathRep = f\"{appPath}/\" if appPath else \"\"\n relative = f\"{appPathRep}{appName}\"\n self.checkout = \"local\"\n\n if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):\n self.good = False", "def master_name(self):\n return self._LAUNCHPAD_NAME", "def first_rsync_uri(xia):\n\n return first_uri_matching_prefix(xia, \"rsync://\")", "def parse_master_name(masterpath):\n _, tail = os.path.split(masterpath)\n sep = '.'\n hdr = 'master'\n chunks = tail.split(sep)\n if not chunks or chunks[0] != hdr or len(chunks) < 2:\n raise ValueError('unable to parse mastername from path! (%s)' % tail)\n return sep.join(chunks[1:])", "def masterPath(self):\n\t\treturn fl.File( self._path + '/master.data' )", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def is_master(self) -> bool:\n return self.zone.SharedRoomID and self.zone.MasterMode", "def master_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master_instance_name\")", "def get_main(self):\n return self.main", "def get_master_blinding_key(self):\n return self._jadeRpc('get_master_blinding_key')", "def get_master_widget(self, top: Widget=None) -> Widget:\n if top is None:\n top = self.c.frame.top\n master = top.leo_master or top\n return master", "def connectMasterScene():\n try:\n nuke.toNode('Viewer1').setInput(0, nuke.toNode('MASTER_SCENE'))\n except:\n print 'no master scene found!'", "def compute_master(context):\n\n # combine the transformed key with the header master seed to find the master_key\n master_key = hashlib.sha256(\n context.header.value.dynamic_header.master_seed.data +\n context.transformed_key).digest()\n return master_key", "def get_master_token(user, repo, name, config):\n url = \"{}/repos/{}/{}/master_tokens\".format(config['url_base'], user, repo)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n tokens = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n for token in tokens:\n if token['name'] == name:\n return token\n\n return None", "def get_master_gui(use_bcrypt):\n try:\n pw = pinentry.get_pin(description=\"Enter hashpass master password:\",\n prompt=\"Password:\")\n while pw is None or not hashpasslib.is_correct_master(pw):\n if pw == None:\n logging.warn(\"User canceled password entry.\")\n return None\n pw = pinentry.get_pin(description=\"Enter hashpass master password:\",\n prompt=\"Password:\",\n errormsg=\"That doesn't match the stored master.\")\n hashpasslib.use_master(pw, use_bcrypt)\n except pinentry.PinEntryException:\n logging.critical(\"Cannot use pinentry.\")\n sys.exit(-1)\n return None", "def master_instance_name(self) -> str:\n return pulumi.get(self, \"master_instance_name\")", "def get_main(self):\n if self.main:\n try:\n # Using `filter()` and indexing to get the first item is more robust that\n # using `get()`. There should only be one item with path that is current\n # but this avoids a `MultipleObjectsReturned` in cases when there is not.\n return self.files.filter(path=self.main, current=True).order_by(\n \"-created\"\n )[0]\n except IndexError:\n pass\n\n candidates = self.files.filter(\n Q(path__startswith=\"main.\") | Q(path__startswith=\"README.\"), current=True\n ).order_by(\"-modified\")\n if len(candidates):\n return candidates[0]\n\n return None", "def get_primary(genesis_file: str,\n wallet_name: str = DEFAULT_CHAOS_WALLET_NAME,\n wallet_key: str = DEFAULT_CHAOS_WALLET_KEY,\n pool: str = DEFAULT_CHAOS_POOL,\n timeout: Union[str,int] = DEFAULT_CHAOS_LEDGER_TRANSACTION_TIMEOUT,\n ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE,\n compile_stats: bool = True) -> str:\n primary = None\n if compile_stats:\n detect_primary(genesis_file,\n wallet_name=wallet_name,\n wallet_key=wallet_key,\n pool=pool,\n timeout=timeout,\n ssh_config_file=ssh_config_file)\n\n output_dir = get_chaos_temp_dir()\n with open(\"{}/primaries\".format(output_dir), 'r') as primaries:\n primary_dict = json.load(primaries)\n primary = primary_dict.get(\"current_primary\", None)\n\n return primary", "def master_instance_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"master_instance_name\")", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def getnode():\n try:\n configfile = os.environ['GET-UNREPORTED-RC']\n except KeyError:\n configfile = 'puppet-reissue-certs.conf'\n config = ConfigParser.SafeConfigParser()\n config.read(configfile)\n puppetmaster_connection = config.get('main','puppetmaster')\n if '@' in puppetmaster_connection:\n puppetmaster = puppetmaster_connection.split('@')[1]\n else:\n puppetmaster = puppetmaster_connection\n return puppetmaster", "def ForceMaster(node, is_testver):\n gsaport = core_utils.GSAMasterPort(is_testver)\n # ignore the result of forcemaster\n port_talker.TCPTalk(node, gsaport, 30, command='GET /forcemaster\\n')", "def get_master_id_from_feed(self, feed_id):\r\n return self._handler.get_master_id_from_feed(feed_id)", "def connect_to_master():", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def select_master_template(self):\n # Get path\n path, file_type = QtWidgets.QFileDialog.getOpenFileName(self, 'Open bestand', '', \"SWN bestand (*.swn)\")\n if not path:\n return None\n\n # Save path to project structure\n self.input_elements['mastertemplate'].set_value(path)", "def getfirstbot(self):\n\n return self.bots[0]", "def main():\n setup()\n master = Master()\n master.start()", "def getRoot(self):\r\n if self.isFrame: return self.frame\r\n else: return self.master", "def getMasterScript(self,town):\n c3 = iff(self.sleep[town][3],'',';')\n c4 = iff(self.sleep[town][4],'',';')\n return self.tsMaster.substitute(town=town,c3=c3,c4=c4)", "def get_masters():\n\n # note: ListMasters uses master.cfg hardcoded as part of its search path\n def parse_master_name(masterpath):\n \"\"\"Returns a mastername from a pathname to a master.\"\"\"\n _, tail = os.path.split(masterpath)\n sep = '.'\n hdr = 'master'\n chunks = tail.split(sep)\n if not chunks or chunks[0] != hdr or len(chunks) < 2:\n raise ValueError('unable to parse mastername from path! (%s)' % tail)\n return sep.join(chunks[1:])\n\n return [(parse_master_name(m), m) for m in chromium_utils.ListMasters()]", "def currenthead(self):\n return self.repo.head.object", "async def async_set_is_master(self, is_master):\n self._is_master = is_master", "def _createMaster(self):\n port = self.from_config['port']\n baudrate = self.from_config['baudrate']\n master_port = Serial(port, baudrate=baudrate)\n master = modbus_rtu.RtuMaster(master_port)\n master.set_verbose(True)\n master.set_timeout(10)#BRFIX\n assert master._serial.timeout == 10\n return master", "def starter_node_func(starter):\n return starter", "def getFirstWorker(self):\n return self.entries[0]", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def get_master_state (master_uri):\n uri = \"http://\" + master_uri + \"/master/state.json\"\n\n try:\n response = urlopen(uri)\n return loads(response.read())\n except URLError as e:\n logging.critical(\"could not reach REST endpoint %s error: %s\", uri, str(e.reason))\n raise", "def get_arm_build(self):\n return self.parent._build[1]", "def getSelectMasteredAsset(*args):\n path = \"\"\n #check that we have the correct tab open\n topTab = cmds.tabLayout(widgets[\"shotInfoAssListTLO\"], q=True, st=True)\n if topTab == \"ShotInfo\":\n cmds.warning(\"You must have the 'ProjAssets' tab open and select an asset or animation file\")\n return()\n if topTab == \"ProjAssets\":\n # determine which tab is open and get selected from that\n selTab = cmds.tabLayout(widgets[\"shotAssRigListTLO\"], q=True, st=True)\n assetRaw = \"\"\n asset = \"\"\n if selTab == \"Chars\":\n assetRaw = cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], q=True, si=True)\n if assetRaw:\n asset = assetRaw[0]\n assetPath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", asset))\n path = cFuncs.getAssetMaster(asset, assetPath, \"rig\")\n\n if selTab == \"Props\":\n assetRaw = cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], q=True, si=True)\n if assetRaw:\n asset = assetRaw[0]\n assetPath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", asset))\n path = cFuncs.getAssetMaster(asset, assetPath, \"rig\")\n \n if selTab == \"Sets\":\n assetRaw = cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], q=True, si=True)\n if assetRaw:\n asset = assetRaw[0]\n assetPath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", asset))\n path = cFuncs.getAssetMaster(asset, assetPath, \"rig\")\n \n if selTab == \"Anm\":\n #need to split this up\n var_shot = cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], q=True, si=True)\n if var_shot:\n asset, buf, shot = var_shot[0].partition(\".\")\n path = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.shotsFolder, shot, \"anm\", asset)))\n\n return(path)", "def primary(self) -> str:\n return pulumi.get(self, \"primary\")", "def get_master_address(self):\n if self.master_address:\n return self.master_address\n return super(CelerySentinelConnectionPool, self).get_master_address()", "def master_host(self) -> str:\n raise NotImplementedError", "def _master_evr(self):\n if not self._evr_alias:\n self._set_master_evr()\n\n return getattr(self, self._evr_alias)", "def get_initial_resource(client, api_id):\n response = client.get_resources(\n restApiId=api_id\n )\n return response['items'][0]", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def get_main_headline(self, default=''):\n for segment in self.segments:\n if segment.headlines:\n return segment.headlines[0]\n return default", "def _determine_next_ott_id(self):\n if self._doc_counter_lock is None:\n self._doc_counter_lock = Lock()\n with self._doc_counter_lock:\n _LOG.debug('Reading \"{}\"'.format(self._id_minting_file))\n noi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True)\n if noi_contents:\n self._next_ott_id = noi_contents['next_ott_id']\n else:\n raise RuntimeError('Stored ottid minting file not found (or invalid)!')", "def root(tree):\n\n return tree[0]", "def GetPrefix():\n m = BRANCH_REGEX.match(RCS_FILE)\n if m:\n return m.group(1)\n return DEFAULT_DEPOT", "def GetLatestMilestone():\n # Use CQ Master target to get latest milestone.\n latest_url = LATEST_URL % {'target': constants.CQ_MASTER}\n gs_ctx = gs.GSContext()\n\n logging.info('Getting latest milestone from %s', latest_url)\n try:\n content = gs_ctx.Cat(latest_url).strip()\n\n # Expected syntax is like the following: \"R35-1234.5.6-rc7\".\n assert content.startswith('R')\n milestone = content.split('-')[0][1:]\n logging.info('Latest milestone determined to be: %s', milestone)\n return int(milestone)\n\n except gs.GSNoSuchKey:\n raise GetMilestoneError('LATEST file missing: %s' % latest_url)", "def get_master_contracts(self, exchange):\n self._master_contracts = self.api_call(\n endpoint=ApiEndpoint.MASTER_CONTRACT,\n method=\"GET\",\n query_params={\"exchange\": exchange}\n )", "def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)", "def get_global_submasters():\n global _submasters\n if _submasters is None:\n _submasters = Submasters()\n return _submasters", "def swap_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n grp.layout.cmd_swap_main()\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_shuffle_down()\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)", "def onMaster(self):", "def take_min(self):\n return self.get_first()", "def SyncRoot(self) -> object:", "def take_first(info):\n return info[0]", "def _claim_master_member_sequence_number(self, community, meta):\n assert isinstance(meta.distribution, FullSyncDistribution), \"currently only FullSyncDistribution allows sequence numbers\"\n sequence_number, = self._database.execute(u\"SELECT COUNT(*) FROM sync WHERE member = ? AND sync.meta_message = ?\",\n (community.master_member.database_id, meta.database_id)).next()\n return sequence_number + 1", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def masters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"masters\")", "def is_master(self): \n\n master_access = (PermissionGroups.query\n .filter_by(group_name=\"Master\")\n .first())\n if self.has_auth_access(master_access):\n return True\n else:\n return False" ]
[ "0.6829638", "0.6829638", "0.6791485", "0.6693696", "0.6673793", "0.6628041", "0.657018", "0.63935375", "0.62487674", "0.61431384", "0.6016861", "0.5940178", "0.5940178", "0.58904886", "0.5833857", "0.5826365", "0.57743245", "0.5721033", "0.57085377", "0.5699552", "0.56985486", "0.5694681", "0.5671085", "0.5630063", "0.56257045", "0.5607661", "0.5592494", "0.55874914", "0.5582253", "0.5556315", "0.5546762", "0.5528712", "0.5516121", "0.5499092", "0.5493481", "0.5489252", "0.54879314", "0.54865795", "0.54652625", "0.5463506", "0.5463506", "0.5463506", "0.54601055", "0.5449439", "0.5434881", "0.54328763", "0.5419518", "0.5416452", "0.53767973", "0.53529096", "0.5348852", "0.53391474", "0.53356874", "0.53319883", "0.5324083", "0.5320858", "0.53155845", "0.5310475", "0.5284128", "0.5268346", "0.52621627", "0.52604765", "0.5226032", "0.5224494", "0.52236557", "0.52219075", "0.52209383", "0.5193354", "0.5178486", "0.5171506", "0.516518", "0.51650923", "0.5155676", "0.5143285", "0.5141089", "0.51342046", "0.5116392", "0.51135975", "0.51126695", "0.51048005", "0.5104428", "0.5085632", "0.5085361", "0.50751203", "0.5066892", "0.50516564", "0.5048526", "0.50449026", "0.50173336", "0.50134724", "0.5012492", "0.5010641", "0.4999994", "0.49900293", "0.49879003", "0.49789107", "0.49752483", "0.49736091", "0.49736091", "0.49724495" ]
0.72852707
0
detect whether this manager manages instance
def have_this_instance(self, instance): for i in self.all_instances: if i == instance: print("YES ITS ME!") return True print("NO S.B. ELSE") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active(self):\n return self in manager.handler", "def private_instance(self) -> bool:\n return pulumi.get(self, \"private_instance\")", "def is_running_manager(self) -> bool:\n return self.get_value(self._manager_running_attribute) == '1'", "def HasPerInstancePropertyProviders(self) -> bool:", "def __contains__(self, instance: object) -> bool:\n try:\n state = attributes.instance_state(instance)\n except exc.NO_STATE as err:\n raise exc.UnmappedInstanceError(instance) from err\n return self._contains_state(state)", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def is_alive(self):\n pass", "def is_alive(self):", "def isAlive(self):\n raise NotImplementedError", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def is_alive(self):\n return True", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def exists(self):\n\n if self:\n pass", "def needs_unique_instance(type_):\n return type_ in unique_instance_types", "def is_management(self):\n\n return self._is_management", "def is_running(self):\n\t\treturn self in _running", "def get_owner_object(self):\n return False", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False", "def testSingleton(self):\r\n self.assertEqual(id(self.res_mgr), id(ReservationManager()))", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_mgr():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.is_sac()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.is_xcat_mgr()\n\n return False", "def is_alive(self):\n return self.alive", "def is_alive(self):\n return self.alive", "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def get_is_active(self, instance):\n return instance.check_finish()", "def isAlive(self):\n return self.is_alive()", "def alive(self):\n return True", "def is_instantiated(self):\n if Session().checkSession(self.isp_id):\n session_id = Session().get_active_user_session(self.isp_id).id\n self.current_domain_id = Graph().get_last_graph(session_id).domain_id\n\n # TODO substitute this with a controller.get when the get status will be implemented on the UN\n # ask to orchestrator if the isp graph is instantiated\n user_nffg_file = User().getServiceGraph(self.isp_name)\n nffg = NFFG_Manager.getNF_FGFromFile(user_nffg_file)\n user_data = UserData(self.isp_name, self.isp_password, self.isp_tenant)\n orchestrator = GlobalOrchestrator(user_data, self.orchestrator_ip, self.orchestrator_port)\n return UserSession(self.isp_id, None).checkSession(nffg.id, orchestrator)", "def alive(self):\n return self._thread is not None", "def check(cls, control_instance):\n pass", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def isActive(self, handler):\n if self.plugin_manager:\n enable_manager = self.plugin_manager.EnableManager()\n enable_manager.initFrom(self.c,self.handler_path) \n return handler.__module__ in enable_manager.actives\n else:\n return True", "def can_start(cls, context):\n ob = context.active_object\n return ob is not None and ob.type == \"MESH\"", "def is_manager(self) -> bool:\n return self.role in EmployeeRole.manager_roles()", "def instance(self):\n return self.__instance", "def instance(self):\n return self._instance", "def is_actor(self):\n return True", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def is_usermanager(self):\n return False", "def instances_used(self):\n return None", "def get_instance(cls):\n if cls.__instance is None:\n cls.__guard = False\n cls.__instance = PvMonitors()\n cls.__guard = True\n return PvMonitors.__instance", "def isAlive(self):\n\n import wx\n\n if not fwidgets.isalive(self.parent):\n return False\n\n if isinstance(self.widget, wx.MenuItem):\n return fwidgets.isalive(self.menu)\n\n else:\n return fwidgets.isalive(self.widget)", "def isAlive(self):\n return self.state", "def isAlive(self):\n return self.state", "def GetManager(self):\r\n\r\n return self.manager", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False", "def IsAlive(self, *args, **kwargs):\n pass", "def is_swarm_manager(client: DockerClient) -> bool:\n info = client.info()\n swarm = info['Swarm']\n return swarm['LocalNodeState'] == 'active' and swarm['ControlAvailable']", "def test_manager(self):\n manager = ISubscriptionManager(self.root.document, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n manager = ISubscriptionManager(self.root, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n # They are not available on asset\n manager = ISubscriptionManager(self.root.file, None)\n self.assertEqual(manager, None)", "def is_alive(self):\n return self._is_alive", "def isManaged(self, QWidget): # real signature unknown; restored from __doc__\n return False", "def __instancecheck__(cls, instance):\r\n # Inline the cache checking when it's simple.\r\n subclass = getattr(instance, '__class__', None)\r\n if subclass in cls._abc_cache:\r\n return True\r\n subtype = type(instance)\r\n if subtype is subclass or subclass is None:\r\n if (cls._abc_negative_cache_version ==\r\n ABCMeta._abc_invalidation_counter and\r\n subtype in cls._abc_negative_cache):\r\n return False\r\n # Fall back to the subclass check.\r\n return cls.__subclasscheck__(subtype)\r\n return (cls.__subclasscheck__(subclass) or\r\n cls.__subclasscheck__(subtype))", "def is_instance(self, thing: Any) -> bool:\n return isinstance(thing, self.underlying)", "def is_valid(self) -> bool:\n from redun.scheduler import get_current_scheduler\n\n if self.type_name != self.__handle__.class_name:\n # Handle class_name might be out of date from deserialization.\n return False\n\n scheduler = get_current_scheduler()\n assert scheduler\n return scheduler.backend.is_valid_handle(self)", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.registration.child.family:\n return True\n return False", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)", "def is_active(self):\r\n return True", "def GetAuiManager(self):\r\n\r\n try:\r\n return self._auiManager\r\n except AttributeError:\r\n return False", "def _get_instance(self):", "def _get_instance(self):", "def check_destroy(self) -> bool:\r\n raise NotImplementedError", "def running(self):\n\t\treturn self._start is not None", "def getManager(self):\n return self._manager", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def exists(self):\n return self.obj is not None", "def is_managed(self):\n return getattr(self.local, 'managed', False)", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE", "def is_owner(self):\n return self._is_owner", "def test_manager_instances_success(self):\n spider_name = 'valid_metadata'\n path = 'tests/sample_spiders/' + spider_name\n os.environ['SPIDER_PATH'] = path\n\n m = SpiderManager()\n inst = m.instance(spider_name)\n self.assertEqual(inst.__name__, ValidMetadataSpider.__name__)", "def is_existing(self):\n return self.backend.is_existing", "def is_active(self) -> bool:", "def is_alive(self):\r\n return self.visible", "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def use_instance_table(self, name, typename):\n if typename in ['VkInstance', 'VkPhysicalDevice']:\n return True\n # vkSetDebugUtilsObjectNameEXT and vkSetDebugUtilsObjectTagEXT\n # need to be probed from GetInstanceProcAddress due to a loader issue.\n # https://github.com/KhronosGroup/Vulkan-Loader/issues/1109\n # TODO : When loader with fix for issue is widely available, remove this\n # special case.\n if name in ['vkSetDebugUtilsObjectNameEXT', 'vkSetDebugUtilsObjectTagEXT']:\n return True\n return False", "def is_still_owner(self):\n raise tooz.NotImplemented", "def metrics_manager(self):\n return_var = False\n # check if default metrics already exist\n if not self.__check_default_metrics_exist():\n # default metrics must be created\n self.__create_default_metrics()\n return_var = True\n\n return return_var", "def is_Singleton(self):\n return self.size == 1", "def check_instance(self, class_name, inst_id, stored_objects):\n '''get '<class_name>.id' to FileStorage.__objects key format'''\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n \"\"\"given id does not exist\"\"\"\n print(\"** no instance found **\")\n instance = False\n return instance", "def running(self):\n # search for a PID file for the manager\n candidate_files = os.listdir(self.cache_dir_)\n for file_name in candidate_files:\n if file_name == MGR_PID_FILE:\n file_name = os.path.join(self.cache_dir_, file_name)\n\n try:\n f = open(file_name, 'r')\n self.server_pid_ = int(f.readline())\n except ValueError:\n logging.warning('Server PID file accessed while in creation')\n return False\n return True\n\n return False", "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def can_run(self):\n\t\treturn self._start is None", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True" ]
[ "0.678732", "0.6508173", "0.6324279", "0.6299621", "0.6208846", "0.62065923", "0.6194654", "0.6182245", "0.6136288", "0.6049219", "0.6000728", "0.5990074", "0.59753895", "0.59658927", "0.59321904", "0.59208906", "0.59013605", "0.5900331", "0.58949554", "0.5874805", "0.58741146", "0.58741146", "0.5842474", "0.5802455", "0.5802455", "0.5774061", "0.5773662", "0.574797", "0.5740506", "0.5731717", "0.5728709", "0.57283694", "0.5720846", "0.57112837", "0.5706347", "0.56924224", "0.56735784", "0.5664308", "0.56612206", "0.56503475", "0.56489503", "0.5642968", "0.5619748", "0.5608604", "0.5607637", "0.5607637", "0.56001437", "0.5582548", "0.558132", "0.5572643", "0.55719113", "0.5562404", "0.5562277", "0.5556215", "0.5549623", "0.5545654", "0.5542363", "0.5540422", "0.55396307", "0.55372083", "0.5534373", "0.5534373", "0.5528275", "0.55170757", "0.55164415", "0.55153936", "0.5504071", "0.5503663", "0.550313", "0.54987913", "0.54987913", "0.54987913", "0.54987556", "0.5476588", "0.54678637", "0.5467028", "0.5466322", "0.5457932", "0.5457812", "0.54544514", "0.5451658", "0.5437064", "0.5428123", "0.5422672", "0.54226124", "0.54124624", "0.541045", "0.54088867", "0.54023075", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589", "0.5392589" ]
0.58744586
20
get the essentials of all instances controlled by this starter
def get_instance_essentials(self): ret = [] for instance in self.all_instances: ret.append(instance.get_essentials()) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEssentialList(self):\n return self.essentials", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get_instances(cls):\n raise NotImplementedError", "def get_instance_classes():\n return Base_Instance.instance_classes", "def get_dev_examples(self):\n raise NotImplementedError()", "def get_sweeps(self):\n return self.master.get_sweeps()", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def show_instances():\n return get_instances()", "def all_experiments():\n elo_explain_experiments()\n alpha_beta_experiments()\n mtcs_experiments()", "def get_techniques_used_by_tools():\n global techniques_used_by_tools\n\n if not techniques_used_by_tools:\n techniques_used_by_tools = rsh.techniques_used_by_tools(get_srcs())\n \n return techniques_used_by_tools", "def instances(self):\n return self.get('instances')", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(cls):\n # clean garbage collected pkgs out of __instances\n cls.__instances[:] = [wkref for wkref in cls.__instances\n if wkref() is not None]\n # return instance references in a tuple\n pkgs = [wkref() for wkref in cls.__instances]\n return tuple(pkgs)", "def all_present_experiments(self):\n return _yield_subdir_names(self.exp_configs)", "def examples(self):\n return self._examples", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def experiments_init(self):\n pass", "def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def get_examples(self, env):\n return self.fam.c_get_examples(self, env)", "def get_used_instances(self, instance):\n\n instances = list()\n\n for el in self.net_root.iter('block'):\n inst = el.attrib['instance']\n if instance in inst:\n if len(el.getchildren()) != 0:\n instances.append(get_root_cluster(el).attrib['name'])\n\n return instances", "def instances_used(self):\n return None", "def get_all_exclusives(self):\r\n if self.exclusives is None:\r\n self._propagate_exclusives()\r\n return self.exclusives", "def get_all_instance(self):\n\t\tself.batch_h = Variable(torch.from_numpy(self.config.batch_h)).cuda()\n\t\tself.batch_t = Variable(torch.from_numpy(self.config.batch_t)).cuda()\n\t\tself.batch_r = Variable(torch.from_numpy(self.config.batch_r)).cuda()\n\t\treturn self.batch_h, self.batch_t, self.batch_r", "def requires(self):\n\n return [\n SpecimenLevelExperimentCleaner(),\n MouseSpecimenCrossRef(),\n EmbryoSpecimenCrossRef(),\n ImpressExtractor(),\n ]", "def _entrypoint_iterator(self):\n return self._entry_points", "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more instances we expect to spawn the more patient:\n tries = 10 * self.expect_instance_count\n\n # Wait for forntend to become alive.\n all_instances_up = False\n while not all_instances_up and tries:\n self.all_instances = []\n detected_instances = []\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n for root, dirs, files in os.walk(self.basedir):\n for onefile in files:\n # logging.debug(\"f: \" + root + os.path.sep + onefile)\n if onefile.endswith(\"log\"):\n logfiles.add(str(Path(root) / onefile))\n\n for name in dirs:\n # logging.debug(\"d: \" + root + os.path.sep + name)\n match = None\n instance_class = None\n if name.startswith(\"sync\"):\n match = re.match(r\"(syncmaster|syncworker)(\\d*)\", name)\n instance_class = SyncInstance\n else:\n match = re.match(\n r\"(agent|coordinator|dbserver|resilientsingle|single)(\\d*)\",\n name,\n )\n instance_class = ArangodInstance\n # directory = self.basedir / name\n if match and len(match.group(2)) > 0:\n # we may see a `local-slave-*` directory inbetween,\n # hence we need to choose the current directory not\n # the starter toplevel dir for this:\n instance = instance_class(\n match.group(1),\n match.group(2),\n self.cfg.localhost,\n self.cfg.publicip,\n Path(root) / name,\n self.passvoid,\n self.cfg.ssl,\n self.cfg.version,\n self.enterprise,\n jwt=jwt,\n )\n instance.wait_for_logfile(tries)\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n detected_instances.append(instance.instance_type)\n self.all_instances.append(instance)\n\n print(self.expect_instances)\n detected_instances.sort()\n print(detected_instances)\n attach(str(self.expect_instances), \"Expected instances\")\n attach(str(detected_instances), \"Detected instances\")\n if (self.expect_instances != detected_instances) or (not self.get_frontends()):\n tries -= 1\n time.sleep(5)\n else:\n all_instances_up = True\n\n if not self.get_frontends():\n print()\n logging.error(\"STARTER FAILED TO SPAWN ARANGOD\")\n self.show_all_instances()\n logging.error(\"can not continue without frontend instance\")\n logging.error(\"please check logs in\" + str(self.basedir))\n for logf in logfiles:\n logging.debug(logf)\n message = \"if that does not help try to delete: \" + str(self.basedir)\n logging.error(message)\n raise Exception(message)\n self.show_all_instances()", "def get_all_cur_site_insts():\n return models.Curation_SiteInstance.objects.all()", "def noise_application_instances(self):\n # Add some \"noise\" application instances to the DB for every test, to\n # make the tests more realistic.\n factories.ApplicationInstance.create_batch(size=3)", "def configured_instances(hass):\n return set(\n \"{0}, {1}\".format(entry.data[CONF_LATITUDE], entry.data[CONF_LONGITUDE])\n for entry in hass.config_entries.async_entries(DOMAIN)\n )", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_instances(self) -> List[Instance]:\n big_bench_task: Dict = BIGBenchScenario.download_and_get_task(self.output_path, self.task, self.subtask)\n\n # From https://github.com/google/BIG-bench/blob/main/docs/doc.md#json-schema,\n # \"keywords\", \"description\" and \"examples\" are all required fields for a BIG-bench task.\n # keywords: \"A list of strings, where each string contains a separate keyword describing the task\"\n self.tags = big_bench_task[\"keywords\"]\n\n # description: \"A plaintext description of the task, suitable for a non-expert to perform the task and\n # potentially generate new examples.\"\n # Append the task, subtask and task-specific description from BIG-bench to `description`.\n self.description = (\n f\"{self.description} Task: {self.task} \"\n f\"{f'Subtask: {self.subtask} ' if self.subtask else ''} \"\n f\"Description: {big_bench_task['description']}\"\n )\n\n # examples: \"A list of dicts\"\n examples: List[Dict] = big_bench_task[\"examples\"]\n # Before splitting the data, shuffle the examples with a fixed seed for reproducibility.\n random.seed(0)\n random.shuffle(examples)\n\n # BIG-bench split the data according to\n # https://github.com/google/BIG-bench/blob/main/bigbench/bbseqio/README.md#splits:\n # all: This contains all the examples.\n # validation: This contains 20% of the examples or at least 16 examples.\n # train: All examples that are not in the validation split (generally 80% of the examples)\n # For few-shot eval, use the all split.\n #\n # TODO: I'm not sure what they mean by \"for few-shot eval, use the all split.\"\n # Does that mean they don't draw in-context examples from a separate train split?\n #\n # We split the data as follows:\n # test: This contains 20% of the examples or at least 16 examples.\n # validation: Same size as the test split.\n # train: Remaining examples, not in the test and validation splits.\n total_examples: int = len(examples)\n num_test_examples: int = max(int(0.2 * total_examples), BIGBenchScenario.MIN_TEST_EXAMPLES)\n num_train_examples: int = total_examples - num_test_examples * 2\n\n # Build `Instance`s from `examples`.\n instances: List[Instance] = []\n for i, example in enumerate(examples):\n # Build references.\n references: List[Reference]\n\n # Each example has \"input\" and either \"target_scores\" or \"target\".\n if \"target_scores\" in example:\n # For \"target_scores\", BIG-bench compares target scores against the model's predicted probabilities:\n # \"The example score is then the target score (as specified in the target_scores dict) of the target\n # that received the highest probability. Scores are averaged across examples. Conventional\n # multiple-choice accuracy can be achieved by assigning the correct target a score of 1, and\n # all incorrect targets a score of 0.\"\n # It seems all BIG-bench Lite tasks with target scores either have a target score\n # of 0 (incorrect answer) or 1 (correct answer).\n # So, for now, `Reference`s with the highest target score are correct.\n highest_score = max(example[\"target_scores\"].values())\n references = [\n Reference(Output(text=target), tags=[CORRECT_TAG] if score == highest_score else [])\n for target, score in example[\"target_scores\"].items()\n ]\n elif \"target\" in example:\n # All the outputs in \"target\" are correct e.g., {\"input\": \"1 + 1 = \", \"target\": [\"two\",\"2\"]}.\n # \"target\" can either be a list of correct values or a single correct value.\n targets: List[str] = example[\"target\"] if type(example[\"target\"]) == list else [example[\"target\"]]\n references = [Reference(Output(text=target), tags=[CORRECT_TAG]) for target in targets]\n else:\n raise ValueError(f\"Invalid example that doesn't have `target` or `target_scores` field: {example}\")\n\n # Get split based on current index `i`.\n split: str\n if i < num_train_examples:\n split = TRAIN_SPLIT\n elif num_train_examples <= i < num_train_examples + num_test_examples:\n split = TEST_SPLIT\n else:\n split = VALID_SPLIT\n\n instances.append(Instance(Input(text=example[\"input\"]), references, split=split))\n\n return instances", "def get_scenarios(experiments):\n return {exp.scenario for exp in experiments}", "def get_shelves(self):\n return self.shelves.copy()", "def __iter__(self):\n return self.cli.essids.essids().__iter__()", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def knobs(self):\n return self.Knobs(self)", "def instance_classes(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_classes\")", "def generate_test_instances(self):\n for testproblem in self.testproblems:\n for solverconfig in self.solverconfigs:\n self.instances.append(TestInstance(testproblem, solverconfig))", "def get_frontends(self):\n ret = []\n for i in self.all_instances:\n if i.is_frontend():\n ret.append(i)\n return ret", "def get_instances(self) -> List[Instance]:\n\n def download_and_read_lines(file_name: str) -> List[str]:\n file_path: str = os.path.join(data_path, file_name)\n ensure_file_downloaded(\n source_url=MeQSumScenario.SOURCE_URL_TEMPLATE.format(file_name=file_name),\n target_path=file_path,\n unpack=False,\n )\n\n with open(file_path) as f:\n return f.read().splitlines()\n\n data_path: str = os.path.join(self.output_path, \"data\")\n ensure_directory_exists(data_path)\n\n instances: List[Instance] = []\n for split in ALL_SPLITS:\n dataset_split: str = \"val\" if split == VALID_SPLIT else split\n\n # The files with the questions end with \".source\"\n questions: List[str] = download_and_read_lines(f\"{dataset_split}.source\")\n\n # The files with the summaries end with \".target\"\n summaries: List[str] = download_and_read_lines(f\"{dataset_split}.target\")\n\n for question, summary in zip(questions, summaries):\n instances.append(\n Instance(\n input=Input(text=question),\n references=[Reference(output=Output(text=summary), tags=[CORRECT_TAG])],\n split=split,\n )\n )\n\n return instances", "def __call__(self):\n return self._main._items()", "def get_resources(self):\n return []", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def populate_instances(self):\n print \"Populating instances info...\"\n instances = self.get_all_instances()\n for i in instances:\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n PROD_tag=self.is_production(i), instance_type=i.instance_type,\n state=i.state, launched=i.launch_time, region=i.region.name)", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def _get_services(self):\n from googleapiclient.discovery import build as discovery_build\n from oauth2client.client import (\n GoogleCredentials,\n ApplicationDefaultCredentialsError,\n )\n from google.cloud import storage\n\n # Credentials must be exported to environment\n try:\n creds = GoogleCredentials.get_application_default()\n except ApplicationDefaultCredentialsError as ex:\n log_verbose_traceback(ex)\n raise ex\n\n # Discovery clients for Google Cloud Storage and Life Sciences API\n self._storage_cli = discovery_build(\"storage\", \"v1\", credentials=creds)\n self._compute_cli = discovery_build(\"compute\", \"v1\", credentials=creds)\n self._api = discovery_build(\"lifesciences\", \"v2beta\", credentials=creds)\n self._bucket_service = storage.Client()", "def update_instances():\n results = {}\n for ebs_support in {\"supported\", \"default\", \"unsupported\"}:\n results.update(get_instances(ebs_support=ebs_support))\n\n with open(\"main.tf\", \"w\") as wfile:\n wfile.writelines(\"locals {\\n ebs_optimized = {\\n\")\n for k, v in sorted(results.items()):\n wfile.writelines(f' \"{k}\" = {v}\\n')\n wfile.writelines(\" }\\n}\")", "def print_seeds(self):\n for key in self.CONFIG.keys():\n if \"EMI\" in key:\n print key", "def get_all_HEA_measurements(self):\n pass", "def get_techniques_used_by_groups():\n global techniques_used_by_groups\n\n if not techniques_used_by_groups:\n techniques_used_by_groups = rsh.techniques_used_by_groups(get_srcs())\n\n return techniques_used_by_groups", "def techniques(self):\n return self._get_child_page_of_type(LearningTechniquesPage)", "def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def find(self):\n self._get_ids()\n G.debug_(F'Experiments found with matching cross-experiment key and algorithm: {len(self.experiment_ids)}')\n self._get_scored_params()\n self._filter_by_space()\n G.debug_(F'Experiments whose hyperparameters fit in the currently defined space: {len(self.hyperparameters_and_scores)}')\n\n if self.module_name == 'keras':\n if ('model_init_params', 'compile_params', 'optimizer') in self.hyperparameter_space.get_names():\n self._filter_by_guidelines_multi(('model_init_params', 'compile_params', 'optimizer'))\n else:\n self._filter_by_guidelines()\n else:\n self._filter_by_guidelines()\n G.debug_(F'Experiments whose hyperparameters match the current guidelines: {len(self.similar_experiments)}')", "def getServices(self):\n pass", "def get_common_food(cls):\n objs = cls.objects\n return objs", "def get_ser_examples_supp(self):\n return np.broadcast_to(\n self.get_t_vec(),\n self.ser_examples[self.ser_examples.keys()[0]].shape,\n )", "def get_healthy_instances(self):\n return [instance for instance in self.instances.itervalues()\n if instance.health in _HEALTHY_STATES and\n instance.state < InstanceState.RUNNING_FAILED]", "def ListInstances(self, hvparams=None):\n return [name for name in os.listdir(self._ROOT_DIR)\n if self._IsDirLive(utils.PathJoin(self._ROOT_DIR, name))]", "def readInstances(\n self,\n makeGlyphs=True,\n makeKerning=True,\n makeInfo=True,\n bendLocations=False,\n ):\n for instanceElement in self.root.findall('.instances/instance'):\n self._readSingleInstanceElement(\n instanceElement,\n makeGlyphs=makeGlyphs,\n makeKerning=makeKerning,\n makeInfo=makeInfo,\n bendLocations=bendLocations,\n )", "def __init__(self):\n self._predefined_cluster_topics()\n self._gatherSEs()", "def _build_experiment_tsaseq_embedded_list():\n antibody_embeds = DependencyEmbedder.embed_defaults_for_type(\n base_path='antibody',\n t='antibody')\n secondary_antibody_embeds = DependencyEmbedder.embed_defaults_for_type(\n base_path='secondary_antibody',\n t='antibody')\n return (\n Experiment.embedded_list + antibody_embeds + secondary_antibody_embeds\n )", "def _get_all(cls):\r\n # BaseProvider does so have __subclassess__. pylint: disable-msg=no-member\r\n return {klass.NAME: klass for klass in BaseProvider.__subclasses__()}", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def get_engines_details(self):\n if not \"engines\" in self.data:\n raise ValueError(\"This recipe doesn't have engines\")\n return self.data[\"engines\"]", "def _generate_instances(self, single_traj):\n return [single_traj[:2].values]", "def get_techniques_used_by_malware():\n global techniques_used_by_malware\n \n if not techniques_used_by_malware:\n techniques_used_by_malware = rsh.techniques_used_by_malware(get_srcs())\n \n return techniques_used_by_malware", "def get_all_hep(self):\n return dict(\n before_request_hook=[plugin[\"plugin_hep\"][\"before_request_hook\"] for plugin in self.get_enabled_plugins if plugin[\"plugin_hep\"].get(\"before_request_hook\")],\n after_request_hook=[plugin[\"plugin_hep\"][\"after_request_hook\"] for plugin in self.get_enabled_plugins if plugin[\"plugin_hep\"].get(\"after_request_hook\")],\n teardown_request_hook=[plugin[\"plugin_hep\"][\"teardown_request_hook\"] for plugin in self.get_enabled_plugins if plugin[\"plugin_hep\"].get(\"teardown_request_hook\")],\n )", "def get_providers(self):\n return [\"Temperature\", \"Average Temperature Sea\", \"Average Temperature Land\"]", "def existing_analysis_sweeps(self):\n setup_list = self.existing_analysis_setups\n sweep_list = []\n s_type = self.solution_type\n for el in setup_list:\n sweep_list.append(el + \" : \" + s_type)\n return sweep_list", "def get_subtechniques_of():\n global subtechniques_of\n\n if not subtechniques_of:\n subtechniques_of = rsh.subtechniques_of(get_srcs())\n \n return subtechniques_of", "def components(self):\n return self._components", "def get_resources(self, **extra_args):\n return [lrms for lrms in self.resources.itervalues()]", "def get(self):\n return get_all_provider()", "def get_all(self):\n return [self.get(name) for name in self.factories.iterkeys()]", "def resources(self):\n return [self]", "def extracts(self):\n return self._extracts", "def _get_all_servers(self, key):\n hints = {}\n hosts = []\n for vm in NovaScheduler.vms:\n if vm['state'] == 'active':\n hosts.append(vm['id'])\n if len(hosts) > 0:\n hints[key] = hosts\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, key, hints)\n return hints", "def all_model_instances(self) -> Iterator['panda_core_data.model.Model']:\n for current_type in self.all_models:\n for current_instance in current_type.all_instances:\n yield current_instance", "def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())", "def get_all(self):\n # s = torch.FloatTensor(self._states).to(device)\n # a = torch.FloatTensor(self._actions).to(device)\n # r = torch.FloatTensor(self._rewards).to(device)\n return self._episodes", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def create_instances(self):\n disk_d = \"//\"+self.host+\"/d$\"\n mask = r\"^IBM$|^WebSphere.*\"\n root_flag = 0\n # print(os.listdir(disk_d)) #checkpoint\n for item in os.listdir(disk_d):\n searchObj = re.search(mask, item, re.M|re.I)\n if searchObj:\n root_flag = 1\n rootdir=disk_d+\"/\"+searchObj.group()\n # print(rootdir) #checkpoint\n\n if os.path.isdir(rootdir):\n candidates=os.listdir(rootdir)\n # print(candidates) #checkpoint\n for candidate in candidates:\n if os.path.isdir(rootdir+'/'+candidate+'/profiles'):\n user_install_root=rootdir+'/'+candidate\n candidate_instance=Instance(user_install_root)\n candidate_instance.get_profiles()\n if candidate_instance.profiles:\n self.instances.append(candidate_instance)\n # print(candidate_instance.uir+\": \"+str(candidate_instance.profiles)) #checkpoint\n\n if root_flag == 0: print(self.host+\" does not have IBM or WebSphere directory on disk D\")", "def _init_ext_info(self):\n IterativeCooperation._init_ext_info(self)\n\n for node in self._global_scenario.network.nodes:\n system = self._cluster_systems[node.id][0]\n for time_step in range(self.control_sequence_length):\n ctrl_limit = self._cluster_ctrl_limits[node.id][time_step]\n env_input = self._cluster_env_inputs[node.id][time_step]\n\n for app in system.apps:\n for ext_node in self._global_scenario.network.nodes:\n if node == ext_node:\n continue\n\n max_dispatch_load = 0.0\n if ext_node.is_cloud():\n max_dispatch_load = math.inf\n ctrl_limit.max_dispatch_load[app.id][ext_node.id] = max_dispatch_load\n\n env_input.generated_load[app.id][ext_node.id] = 0.0\n env_input.additional_received_load[app.id][ext_node.id] = 0.0\n env_input.nb_instances[app.id][ext_node.id] = 0\n if self._global_control_input is not None:\n nb_instances = self._global_control_input.get_max_app_placement(app.id, ext_node.id)\n env_input.nb_instances[app.id][ext_node.id] = nb_instances", "def get_all_environments():\n return ENVIRONMENTS", "def get_tools(cls):\n pass", "def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes", "def alldemos():\n rundemo(24,fig=True)\n rundemo(30,fig=True)\n rundemo(31,fig=True)\n rundemo(33)\n rundemo(34)", "def get(self) -> list:\n return self.__expedition", "def active_experiments():\n\n for instr in ('AMO', 'CXI', 'MEC', 'SXR', 'XCS', 'XPP'):\n for station_id in stations_instr(instr): \n info = expinfo.active_experiment(instr, station_id)\n yield info[1], instr.lower()", "def get_escalators(self, active=False, **endpoint_kwargs):\n return self._facilities('ESCALATOR', active, **endpoint_kwargs)", "def get(self):\n return get_all_fuelmaster()", "def configurables(cls):\n return find_class_instances(cls, Configurable)", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def get_trials(self):\n return self._trials", "def get_sensors(self):\n sensors = set()\n for er in self.exercise_recordings:\n for sensor in er.sensors:\n if sensor not in sensors:\n sensors.add(sensor)\n return list(sensors)" ]
[ "0.64952594", "0.61609066", "0.6024103", "0.57624537", "0.5747617", "0.5703971", "0.56971", "0.5676145", "0.56370413", "0.5501353", "0.547217", "0.54002124", "0.53892356", "0.53773415", "0.5376807", "0.53593886", "0.5353485", "0.5344247", "0.5336788", "0.5326446", "0.53020716", "0.5293048", "0.5292328", "0.52779114", "0.52238935", "0.5206132", "0.52019435", "0.52008635", "0.51954514", "0.5182392", "0.51676935", "0.5158658", "0.5155903", "0.5148513", "0.5148289", "0.51333815", "0.512897", "0.5116718", "0.5104309", "0.509651", "0.50880486", "0.5077147", "0.5064661", "0.50639397", "0.5062826", "0.50600237", "0.505731", "0.5055937", "0.5030312", "0.5022234", "0.50213814", "0.5002168", "0.49874213", "0.498003", "0.49758026", "0.49736726", "0.49733415", "0.49715877", "0.49693796", "0.496553", "0.49517596", "0.49514183", "0.49398988", "0.49378878", "0.49365672", "0.49191847", "0.49188316", "0.4914816", "0.49119577", "0.49097624", "0.49077404", "0.4904242", "0.49013335", "0.49009916", "0.48974946", "0.4896658", "0.48957962", "0.48954314", "0.48934716", "0.4888134", "0.48806232", "0.48784527", "0.48753992", "0.48751658", "0.48683733", "0.48669332", "0.48636046", "0.4863025", "0.48591504", "0.48580807", "0.48579", "0.48551092", "0.48529482", "0.48526382", "0.48514637", "0.48453748", "0.48451066", "0.48451066", "0.48411348", "0.4839563" ]
0.8199698
0
print all instances of this starter to the user
def show_all_instances(self): if not self.all_instances: logging.error("%s: no instances detected", self.name) return instances = "" for instance in self.all_instances: instances += " - {0.name} (pid: {0.pid})".format(instance) logging.info("arangod instances for starter: %s - %s", self.name, instances)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out():\n pass", "def print_results(self):\n pass", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def printResults(self):\n for tweet in self.tweets:\n print(tweet)\n print(\"---------------------\\n\")", "def printOutput(self):\n pass", "def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())", "def printStations(self):\n print(\"Bus numero \" + str(self._num) + \" :\")\n for i in range(len(self._stations)) :\n print(self._stations[i])\n print('\\n')", "def do_show(self, args):\n temp = args.split()\n\n if len(temp) == 0:\n print(\"** class name missing **\")\n return\n elif temp[0] not in self.myclasses:\n print(\"** class doesn't exist **\")\n return\n elif len(temp) < 2:\n print('** instance id missing **')\n return\n else:\n all_objs = storage.all()\n for i in all_objs.keys():\n if i == \"{}.{}\".format(temp[0], temp[1]):\n print(all_objs[i])\n return\n print('** no instance found **')", "def out(self):\n print(self.__class__.__name__)\n for prime in self.primes:\n print(prime)", "def out(self) -> None:\n print(self.__class__.__name__)\n for prime in self._primes:\n print(prime)", "def _display_examples(self):\n\n print(self._usage)\n print(self._examples)", "def display(self):\n print(self)", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def printall():\n print listAll()", "def __str__(self):\n #{{{ Nicely print of elements in class.\n\n if config.verbose: print \"Stations():\"\n\n for st in self.stachan_cache.keys():\n chans = self.stachan_cache[st].keys()\n print \"\\t%s: %s\" % (st,chans)", "def print_list(self):\r\n pass", "def out(self):\r\n print(self.__class__.__name__)\r\n for prime in self.primes:\r\n print(prime)", "def printStories(self):\n\t\tself.printHeader()\n\t\tfor i in range(self.firstStoryToShow, self.lastStoryToShow):\n\t\t\tself.outputStory(self.stories[i], self.showDomains, self.showFullTitles, self.collapseOldStories)\n\t\t\n\t\tif self.karmaChange:\n\t\t\tprint self.hnUserName + \"'s karma has changed since the last refresh.\"", "def print(self):\r\n self.print_avec_separateur()", "def printIns(self, stream):\n print(' ', str(self), file=stream)", "def print(self):\n for word in self.words:\n print(word)", "def printCars(self):\n for car in self.cars:\n self.logger.debug(car)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Test Case ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-JIRA URL:\", self.JIRA_URL, sep='')", "def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()", "def show(self):\n\n print(self._walk(self, depth=1))", "def print(self) -> None:\n\n print(\"Name: {}\".format(self.name))\n print(\"Input Queue: {}\".format(self.input_queue))\n print(\"Output Queue: {}\".format(self.output_queue))\n print(\"Restart Required: {}\".format(str(self.restart_required)))\n print(\"Number of Processes: {}\".format(str(self.num_processes)))\n print(\"Process Job: {}\".format(self.process_job.__name__))\n print(\"Timeout Duration: {}\".format(str(self.timeout_duration)))\n self.print_process_list()", "def print(self):\n\n print(self)", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)", "def print_experiments(self):\n first = True\n for env, agents in self._experiment_structure.items():\n if not first:\n self.logger.weak_line()\n first = False\n self.logger.info(f'Environment: {env}')\n for agent, _ in agents.items():\n self.logger.info('- ' + agent)", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def show(self):\n self._logger.debug(\"show\")", "def start(self):\n for circuit in self.circuits:\n self.modes[self.print_mode](circuit)", "def instances(args, config):\n print('Does something? More to come.')", "def print_instrumented(self) -> None:\n\n print(\n \"\\n\".join(\n f\"{t[1].__class__.__name__} of {t[1].__class__.__module__} component: \"\n f\"{str(t[0])}\" for t in self.instrumented()\n )\n )", "def do_show(self, argv):\n argument_split = argv.split()\n aux = 0\n if len(argument_split) == 0:\n print(\"** class name missing **\")\n elif not argument_split[0] in self.__names:\n print(\"** class doesn't exist **\")\n elif len(argument_split) < 2:\n print(\"** instance id missing **\")\n elif argument_split[0] in self.__names:\n for key, obj in models.storage.all().items():\n if key == argument_split[0]+\".\"+argument_split[1]:\n aux = 1\n print(obj)\n if aux == 0:\n print(\"** no instance found **\")", "def show_bench_player(self):\n if (len(self.bench_players) == 0):\n print(\"The bench is empty.\")\n else:\n for i in range(len(self.bench_players)):\n print(self.bench_players[i].name)", "def print(self):\n print(\"-----\", self.name, \"-----\")\n print(\"Enable index\", self.enable_index)\n print(\"End index:\", self.stop_index)\n print(\"Measurement length (from the very beginning to the end index):\", self.time_vec[self.stop_index])\n print()\n print(\"Start temperature (hot):\", self.temp_hot_start)\n print(\"Start temperature (cold):\", self.temp_cold_start)\n print(\"Start temperature (mean):\", self.temp_start)\n print(\"End temperature (hot):\", self.temp_hot[self.stop_index])\n print(\"End temperature (cold)\", self.temp_cold[self.stop_index])\n print()\n print(\"Max temperature\", self.temp_max)\n print(\"Min temperature\", self.temp_min)\n print()\n print(\"Heat pump\")\n print(\"Energy input:\", self.work_inp)\n print(\"Q_hot\", self.qhot_pump)\n print(\"Q_cold\", self.qcold_pump)\n print(\"Q_cold + W\", self.qcold_pump + self.work_inp)\n print(\"E_lost\", self.qcold_pump + self.work_inp - self.qhot_pump)\n print(\"Coefficient of performance COP_hot\", self.qhot_pump / self.work_inp)\n print(\"Coefficient of performance COP_cold\", self.qcold_pump / self.work_inp)\n print(\"Ideal COP_hot with the setup\", self.qhot_pump/(self.qhot_pump-self.qcold_pump))\n print(\"Ideal COP_cold with the setup\", self.qcold_pump / (self.qhot_pump - self.qcold_pump))\n print(\"Ideal Carnot COP_hot\", (self.temp_max+273.15)/(self.temp_max-self.temp_min))\n print(\"Ideal Carnot COP_cold\", (self.temp_min+273.15)/(self.temp_max-self.temp_min))\n print(\"Efficiency fraction out of ideal Carnot cooler\", (self.qcold_pump / self.work_inp)/((self.temp_min+273.15)/(self.temp_max-self.temp_min)))\n\n if self.not_air:\n print(\"Heat transfer through insulator, hot side\", self.heat_loss_pump_hot)\n print(\"Heat transfer through insulator, cold side\", self.heat_loss_pump_cold)\n print(\"Estimated Q_hot with resistor\", self.qhot_resistor)\n else:\n print(\"Estimated Q_hot with resistor (=energy input)\", self.qhot_resistor)\n #\n # I think it should be defined for Q_hot too. Yep, TODO that\n # Also calculate heatloss due to conduction TODO remove these comments when ready\n # Todo implement resistive heater calculations\n print()\n print(\"Heat engine\")\n print(\"Energy generated:\", self.work_gen)\n print(\"Q_hot\", self.qhot_engine)\n print(\"Q_cold\", self.qcold_engine)\n print(\"Q_hot - Q_cold\", self.qhot_engine - self.qcold_engine)\n print(\"E_lost\", -self.qcold_engine - self.work_gen + self.qhot_engine)\n print(\"\\\"Heat transfer efficiency\\\" (%)\", self.work_gen / (self.qhot_engine - self.qcold_engine) * 100)\n print(\"Efficiency e\", self.work_gen / self.qhot_engine)\n print(\"Ideal efficiency with the setup\", 1 - (self.qcold_engine / self.qhot_engine))\n print(\"Ideal Carnot efficiency\", (self.temp_max-self.temp_min)/(self.temp_max+273.15))\n if self.not_air:\n print(\"Heat transfer through insulator, hot side\", self.heat_loss_gen_hot)\n print(\"Heat transfer through insulator, cold side\", self.heat_loss_gen_cold)\n print()\n print(\"Total efficiency of cycle\", self.work_gen/self.work_inp)\n # About the efficiency of peltier elements (#telok@IRCnet, 2016-07-27)\n # 19:10 < AgenttiX> Oletteko kokeilleet TECin ohjaamista Arduinolla? Toimisiko tämä kytkentä? http://garagelab.com/profiles/blogs/how-to-use-a-peltier-with-arduino\n # --\n # 20:21 <@hrst> Ei toimi. Peltieriä ei voi ohjata PWM:llä.\n # 20:22 <@hrst> Hyötysuhde on PWM:llä paska, mikä on ongelma koska se on muutenkin liian paska, ja sen lisäksi se hajoaa mekaaniseen värähtelyyn ennemmin tai myöhemmin.\n print(\"-----\\n\")", "def print_entries(self):\n self.print_selected_entries(self.entries)", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def show_instances():\n return get_instances()", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)", "def print(self):\n\n for domino in self.hand:\n print(domino)", "def __show_all(self):\n print(\"\\nEvents:\\n\")\n self.__show_all_events()\n print(\"\\nMetrics:\\n\")\n self.__show_all_metrics()", "def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))", "def show(self):\n pass", "def display(self):\n print(str(self))", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))", "def show(self):\n\n pass", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def print_examples(self):\n print((\"Examples are not defined for command \" + self.command))", "def show(self, class_name, inst_id, stored_objects):\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n print(\"** no instance found **\")\n else:\n print(stored_objects[instance])", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "def display(self):\r\n os.system('cls')\r\n index = 0\r\n for i in self.list:\r\n print(str(index) + \" \" + i.showRule())\r\n index += 1", "def print(self):\n print(self.pretty_str())", "def print(self):\n self.print_avec_separateur(\" \")", "def print_users(self):\n for i, item in enumerate(self.users):\n print(\"{}. {}\".format(i, item.name))", "def print_the_contents_of_all_entries(self):\n\n if len(self.student_list):\n self.print_dataframe(self.student_list)\n else:\n print('There is no contents to show')", "def __str__(self):\n print(\"Welcome to our house\")\n for room in self.rooms:\n print(room.name, room.sqr_ft)", "def print(self):\n # Your implementation here", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def print_seeds(self):\n for key in self.CONFIG.keys():\n if \"EMI\" in key:\n print key", "def show(self):\n print(\"APKs in Session: {}\".format(len(self.analyzed_apk)))\n for d, a in self.analyzed_apk.items():\n print(\"\\t{}: {}\".format(d, a))\n print(\"DEXs in Session: {}\".format(len(self.analyzed_dex)))\n for d, dex in self.analyzed_dex.items():\n print(\"\\t{}: {}\".format(d, dex))\n print(\"Analysis in Session: {}\".format(len(self.analyzed_vms)))\n for d, a in self.analyzed_vms.items():\n print(\"\\t{}: {}\".format(d, a))", "def print_everything(self):\n def print_service(service):\n print\n print '====[ %s ]==== ' % service.__repr__(path_only=True)\n print\n\n print 'Actions:'\n for name, action in service.get_actions():\n print ' - ', name, action\n print\n\n for name, subservice in service.get_subservices():\n print_service(subservice)\n\n print_service(self.root)", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)", "def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")", "def stdout(self):\n pass", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def print_scores(self):\n ### FILL IN ###", "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")", "def display(self):\r\n print(self.title, 'written by', self.author)", "def display(self):\n print(\"----Player----\")\n print(\"Player {} is using {} as their mark\".format(self.name, self.mark))", "def console(self):\n fricas_console()", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Physical Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-MAC address:\", self.MAC_address, sep='')", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def printSummary(self):\n pass", "def main():\n student_info = prompt_student()\n display_student(student_info)", "def print_food(self):\n for dish in self.food:\n print(dish.get_name())", "def show_priveleges(self):\n print(\"This user:\")\n for privelege in self.priveleges:\n print(privelege)", "def DumpStudies():\n for name in myStudyManager.GetOpenStudies():\n s=myStudyManager.GetStudyByName(name)\n print \"study:\",name, s._get_StudyId()\n DumpStudy(s)", "def print(self) -> None:\n\n print('')\n print(f\"{self.get_name()}, {self.get_description()}\")\n print('-------------')\n for child in self._children:\n child.print()", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def print(self):\n self.__print_local(self.dataset, 0)", "def show_current(self):\n for packet in self.station.genLoopPackets():\n print(packet)\n break", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def show_classes():\n for obj in Classes.get_all_obj_list():\n print('\\033[33;1m[%s] [%s]校区 [%s]班级 学费[%s]\\033[0m'.center(60, '-') \\\n % (obj.school_nid.get_obj_by_uuid().name, obj.school_nid.get_obj_by_uuid().addr, \\\n obj.name, obj.tuition))", "def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))", "def print_list(self):\n self.print_avec_separateur(\" \")", "def printAllPion(self):\n idx = 0\n for pion in self.arrayPion:\n print(\"ID = \", idx, end=\" --> \")\n pion.printPion()\n idx += 1", "def print_drinks(self):\n for beverage in self.drinks:\n print(beverage.get_name())", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())" ]
[ "0.67142516", "0.6560021", "0.636873", "0.63599074", "0.6321213", "0.62887406", "0.6230574", "0.62254745", "0.6211867", "0.621094", "0.6190211", "0.6186281", "0.61838835", "0.61825544", "0.61808974", "0.6177476", "0.6169501", "0.6163866", "0.61381304", "0.61379516", "0.6135463", "0.6132603", "0.6111109", "0.6088436", "0.6058275", "0.60241723", "0.60188633", "0.6014221", "0.60008454", "0.6000054", "0.5996067", "0.5994845", "0.59886444", "0.5986782", "0.5979666", "0.5979662", "0.59786886", "0.59732956", "0.5972064", "0.5944779", "0.594468", "0.5918085", "0.59144694", "0.5905134", "0.5897206", "0.5896216", "0.5891858", "0.58712703", "0.5862167", "0.58581436", "0.58561254", "0.5855963", "0.5854107", "0.584989", "0.5848203", "0.58458835", "0.584351", "0.5836696", "0.5835023", "0.58347565", "0.5834619", "0.5834489", "0.5825753", "0.5824081", "0.5819278", "0.58191776", "0.5818338", "0.58181685", "0.5813587", "0.58072287", "0.5799089", "0.57977605", "0.5787693", "0.5771955", "0.5771642", "0.57672805", "0.57615507", "0.5761454", "0.5758163", "0.57546496", "0.5750358", "0.5744917", "0.5742552", "0.57396144", "0.57389086", "0.5730466", "0.57261646", "0.57233715", "0.57224417", "0.5720216", "0.57193726", "0.5716549", "0.5713796", "0.5713743", "0.57119983", "0.57095766", "0.57030874", "0.5699091", "0.5694302", "0.56940067" ]
0.7568982
0
launch the starter for this instance
def run_starter(self, expect_to_fail=False): logging.info("running starter " + self.name) args = [self.cfg.bin_dir / "arangodb"] + self.hotbackup_args + self.default_starter_args + self.arguments lh.log_cmd(args) self.instance = psutil.Popen(args) logging.info("my starter has PID:" + str(self.instance.pid)) if not expect_to_fail: self.wait_for_logfile() self.wait_for_port_bind()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def launch(self):", "def start_sml():\n launchfile = basepath + '/launch/teststarter.launch'\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n #print roslaunch.rlutil.check_roslaunch(launchfile)\n #roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [launchfile])\n launch.start()", "def start():\n trio.run(_main)", "def run(self):\n self.started()", "def start(self):\n return self.setup.start", "def start(self):\n\n self.app.go()", "def run_starter(self, expect_to_fail=False):", "def startapp():", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def start (self):\n pass", "def start (self):\n pass", "def main():\n driver = Driver()\n driver.start()", "def launch(config):\n \n launch_with_configs([config])", "def run(self):\n self.__power_on()\n\n self.__main()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def run(self):\n self.tp.launch_list()\n self.tp = None", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def start():", "def start():", "def start():", "def start():", "def _start(self):\n pass", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def start(self):\r\n pass", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def main():\n tng.api.runner()", "def launch_test():\n import sys\n from kothrak.envs.KothrakEnv import KothrakEnv\n from kothrak.envs.game.MyApp import style\n from PyQt5.QtWidgets import QApplication, QWidget\n\n qapp = QApplication(sys.argv)\n qapp.setStyleSheet(style)\n window = QWidget()\n window.setWindowTitle('Kothrak training')\n\n env = KothrakEnv(qapp, window)\n window.show()\n\n trainer = Trainer(env)\n # trainer.load('saves/031421-1523.zip')\n trainer.run()\n\n qapp.exec_()", "def activate(self):\n self.start()", "def start(self):\n ...", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def run(self):\n self.run()", "def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()", "def platform_start(self):\n self.platform.start()", "def start(self) -> None:", "def start(self) -> None:", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def run():\n main()", "def run():\n \n logger = logging.getLogger(\"galah.sheep.producer\")\n \n\t# Initialize the correct producer based on the selected virtual suite.\n virtual_suite = get_virtual_suite(config[\"VIRTUAL_SUITE\"])\n producer = virtual_suite.Producer(logger)\n\n logger.info(\"Producer is starting\")\n \n # Loop until the program is shutting down\n while not universal.exiting:\n producer.produce_vm()", "def startup(self) -> None:", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start( *args, **kwargs ):", "def main():\n setup()\n master = Master()\n master.start()", "def _start(self):", "def run(self):\n self.process.start()", "def launch_app(self):\n os.system (\"adb shell am start -n com.tencent.mm/com.tencent.mm.ui.LauncherUI/\")\n time.sleep (5)", "def launch(self):\n \n # Get local loggers from launchlogger decorator\n out_log = getattr(self, 'out_log', None)\n err_log = getattr(self, 'err_log', None)\n\n # Check the properties\n fu.check_properties(self, self.properties)\n\n if self.restart:\n output_file_list = [self.output_path]\n if fu.check_complete_files(output_file_list):\n fu.log('Restart is enabled, this step: %s will the skipped' % self.step, out_log, self.global_log)\n return 0\n\n # create command line instruction\n cmd = self.create_cmd(out_log, err_log)\n\n returncode = cmd_wrapper.CmdWrapper(cmd, out_log, err_log, self.global_log).launch()\n return returncode", "def start():\r\n\r\n userName = userLogin.login()\r\n runApp(userName)", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)", "def run():\r\n autostartup()", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def train_entry_point():", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def run(self):\n\n self.steer()\n self.drive()", "def run_profile(self):\n self.create_shelf_dir(self.profile.tc_temp_path)\n\n # run the App\n return self.run(self.profile.args)", "def startup(self):\n pass", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)", "def run(self):\n self.arbiter.start()", "def launch(self):\n return self._launch", "def main():\n app = RunSnakeRunApp(0)\n app.MainLoop()", "def start_import_tool(self):\n apps.albumsmatcher.MainFrame(app=self.app)", "def launch(self, launch):\n\n self._launch = launch", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []", "def launch(config_list):\n p = PyRosLaunch(config_list)\n p.start()\n p.spin()", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def start_launch(self, _, **kwargs):\n self._handle_lifecycle = False if self._rp.launch_id else True\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerun_of=self._cfg.rerun_of,\n **kwargs,\n )", "def start(self):\n self.p.start()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n \n self._dts = rift.tasklets.DTS(self.tasklet_info,\n UtCompositeYang.get_schema(),\n self._loop,\n self.on_dts_state_change) \n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.log.debug(\"Starting TestDriverTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING", "def launch (self):\n path = \"\"\n os.system(path + 'kidlogger_user.exe')", "def main(self, **kwargs) -> None:\n ...", "def dev():\n trio.run(_dev_main)", "def start():\n logging.info(\"Execution Started\")", "def main(self) -> None:\n pass", "def show(self) -> Any: # noqa: ANN401\n self.start()\n return self.join()", "def start_run(self):\n return mlflow.start_run(\n run_id=self.run_id,\n experiment_id=self.experiment_id,\n run_name=self.run_name,\n nested=self.nested)", "def DistEntry():\n flags.StartMain(main)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--workflow_path\", help='Path to workflow file')\n parser.add_argument(\"-g\", \"--galaxy\",\n dest=\"galaxy_url\",\n help=\"Target Galaxy instance URL/IP address (required \"\n \"if not defined in the tools list file)\",)\n parser.add_argument(\"-a\", \"--apikey\",\n dest=\"api_key\",\n help=\"Galaxy admin user API key (required if not \"\n \"defined in the tools list file)\",)\n args = parser.parse_args()\n\n gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=args.api_key)\n\n with open(args.workflow_path, 'r') as wf_file:\n import_uuid = json.load(wf_file).get('uuid')\n existing_uuids = [d.get('latest_workflow_uuid') for d in gi.workflows.get_workflows()]\n if import_uuid not in existing_uuids:\n gi.workflows.import_workflow_from_local_path(args.workflow_path)", "def run(self):\n \n pass" ]
[ "0.7489495", "0.7253172", "0.692011", "0.6630553", "0.65272653", "0.6522638", "0.6460976", "0.64253443", "0.6424377", "0.6397502", "0.6396763", "0.6396763", "0.63844043", "0.63730854", "0.6352418", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6313772", "0.63104457", "0.63104457", "0.6304974", "0.6304974", "0.6304974", "0.6304974", "0.63039505", "0.6283363", "0.62573063", "0.6255987", "0.6245705", "0.62416667", "0.622898", "0.62216055", "0.6221328", "0.61983895", "0.61953145", "0.616667", "0.6141501", "0.6141501", "0.6140031", "0.61240995", "0.6117783", "0.6113165", "0.61129934", "0.61129934", "0.6103722", "0.6096136", "0.6090023", "0.608504", "0.6072735", "0.6063263", "0.60545754", "0.60404706", "0.60323244", "0.6030204", "0.6020377", "0.5992984", "0.5991583", "0.59787035", "0.59779966", "0.5971345", "0.5966625", "0.596453", "0.59501374", "0.59475154", "0.593827", "0.59370816", "0.59305304", "0.5926969", "0.5926969", "0.5922935", "0.5922935", "0.5922935", "0.5921141", "0.59027314", "0.5901941", "0.589653", "0.58937037", "0.5887646", "0.58864427", "0.5880594", "0.5880594", "0.5880594", "0.5880594", "0.58782136", "0.58667284", "0.5854051", "0.58504194", "0.5832427", "0.5819819", "0.5818816", "0.58148026", "0.58131987", "0.580568", "0.5804618" ]
0.6387958
12
somebody else is running the party, but we also want to have a look
def attach_running_starter(self): # pylint disable=broad-except match_str = "--starter.data-dir={0.basedir}".format(self) if self.passvoidfile.exists(): self.passvoid = self.passvoidfile.read_text(errors="backslashreplace", encoding="utf-8") for process in psutil.process_iter(["pid", "name"]): try: name = process.name() if name.startswith("arangodb"): process = psutil.Process(process.pid) if any(match_str in s for s in process.cmdline()): print(process.cmdline()) print("attaching " + str(process.pid)) self.instance = process return except psutil.NoSuchProcess as ex: logging.error(ex) raise Exception("didn't find a starter for " + match_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def party(ctx):\n if ctx.invoked_subcommand is None:\n return await show_party(ctx)", "def like_to_party(msg):\n if message.rate_limit(msg.settings, 'like_to_party'):\n return\n return \"%s, I know for a fact you don't party. You do *not* party.\" \\\n % msg.name", "def think(self):\n pass", "def stink(self):\r\n print(\"Dear lord!\\n\")", "def hire(name):\r\n print(\"A CEO cannot be hired outright\")", "async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950", "def brain_status(self):\r\n return 'thinking...'", "def healthcare():", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "def party_id(self):\n pass", "def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False", "def surrender(self) -> Optional[str]:\n\n error_message: Optional[str] = None\n if len(self.hands[0].cards) > 2:\n error_message = \"Cannot surrender because you have already hit!\"\n\n elif len(self.hands) == 2:\n error_message = \"Cannot surrender because you have already splitted!\"\n\n else:\n self.bet //= 2\n self.hands[0]._points = 0\n\n return error_message", "async def devox(self, ctx):\n member = discord.utils.find(lambda m: m.id == 250865328194715658, ctx.channel.guild.members)\n await ctx.send(\"{} The great man who created this bot some people say he has too much power, but the truth is he doesnt have enough\".format(member.mention))", "def announce(outcome, who):\r\n print(who, 'rolled a', outcome)\r\n print(draw_number(outcome))", "def PartyCheckIn(sc, event):\n now = datetime.datetime.now()\n vice = event['text'][:5].lower()\n user = sc.api_call('users.info', user=event['user'])\n user = user['user']['name']\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n cursor.execute('DELETE FROM party WHERE date < '\n '(NOW() - INTERVAL 120 MINUTE) AND who=\"%s\"' % user)\n cursor.execute('INSERT INTO party (who, vice) VALUES (\"%s\", \"%s\")' % (user, vice))\n db.commit()\n cursor.execute('SELECT who FROM party WHERE who=\"%s\" AND vice=\"%s\"' % (user, vice))\n status = cursor.fetchall()\n party_message = PartyMessage(user, vice, len(status))\n sc.api_call('chat.postMessage', as_user='true', channel=event['channel'],\n text=party_message)\n db.close()", "def say_meow(self):\n\n pylog.info('My master calls me {} and meow!'.format(self.name))", "def user_story_4(self):\n for family in self.families.values():\n if family.married != 'NA':\n if family.wife_id != 'NA' and family.husb_id != 'NA' and family.divorced != 'NA':\n if family.divorced < family.married:\n print(\n f'US04 - {self.individuals[family.wife_id].name} and {self.individuals[family.husb_id].name} married after divorce on line {family._married_line}')", "def find_new_people(self):\n #greets people, only greets once while they're in the camera's view and are center of attention\n\n\n if (self.person is not None) and (self.person.acknowledged == False):\n self.person.acknowledged = True\n print \"I see you!\"\n self.idle_pub.publish(\"idle:stop\")\n time.sleep(2)\n\n greeting = [\"R_nudge\",\"R_look\"]\n for msg in greeting:\n self.behavior_pub.publish(msg)\n self.check_completion()\n\n\n self.detection_pub.publish('found')\n\n elif self.person is None:\n print \"I don't see you\"\n self.detection_pub.publish('nothing')", "def demote(name):\r\n print('Office Worker is the lowest position')", "def Peacekeaper(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def mystery_solved():\n print(\"\\nThe butler: The mystery is solved! I knew it was someone in the family. Well done!\")", "def _involves_this_party(self, d: Dag):\n for r in d.roots:\n for ps in r.out_rel.stored_with:\n if self.config.system_configs['CODEGEN'].pid in ps:\n return True\n return False", "def battle_resting(self):\n pass", "def first_entrance(self):\n print(\"A medium-sized room with a track in the center surrounded by benches. \"\n \"A tablet rests on a stand at the end of the track. \"\n \"A horrendous, rotten smell seems to be coming \"\n \"from the Terra Communications Room, which is, oddly, locked by a \"\n \"bulky, old-school mechanical lock...\")", "def shouting(self):\n return 'Whoa, chill out!'", "async def pending(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n description = \"\"\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n for member in lst:\r\n userobj = ctx.guild.get_member(int(member))\r\n description += (str(userobj.mention) + '\\n')\r\n embed = discord.Embed(color=0xFFFF00, title='Coaching Needed by following people', description=description)\r\n embed.set_footer(text=credit)\r\n await ctx.send(embed=embed)\r\n await ctx.send('Type \"{0}coaching done @<player name>\" if the player has been coached or type \"{0}coaching info <@playername>\" to view the details submitted by the user'.format(ctx.prefix))\r\n \r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def event11515373():\n header(11515373, 0)\n skip_if_this_event_on(3)\n if_event_flag_off(1, EVENT.JareelDead)\n if_player_inside_region(1, 1512956)\n if_condition_true(0, 1)\n skip_if_client(1)\n network.notify_boss_room_entry()\n chr.activate_npc_buffs(CHR.AbyssalPrinceJareel)", "def get_info(self):\n if self.own_home:\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. Currently I have {self.own_home} house')\n return print(f'Hi! My name is {self.name}, I\\'m {self.age}. I don\\'t have any home now!')", "def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)", "def won(self):\r\n return None", "def CatchPhase(self):\n print(\"{} said: Ain't that just the way!\".format(self.__name)) # str formatting format()\n # print(\"%s said: Ain't that just the way!\" % (self.__name)", "def take_leader(self):", "def iam(self):\n print(\"I am company\", self.ticker)", "def checkSelfReply(body):\n return 'WHAT IS MY PURPOSE' in body", "def inside(self):\n return self.throwaway", "def PartyMessage(user, vice, status_count):\n statuses = {'!weed': {1: 'smoking', 2: 'buzzed',\n 3: 'stoned', 4: 'stoned as fuck'},\n '!booz': {1: 'drinking', 2: 'tipsy',\n 3: 'drunk', 4: 'drunk as fuck'}}\n if user == 'christian':\n return '%s is now blotto!' % user\n for count, message in statuses[vice].iteritems():\n if count == status_count or (count == 4 and status_count > 4):\n return '%s is now %s.' % (user, message)", "def present(self):", "async def tod_status(self, ctx, *args):\n n = len(self.players)\n if n > 0:\n if n == 1:\n s = \"person\"\n else:\n s = \"people\"\n message = f\"A Truth or Dare game is currently taking place with {n} {s}!\"\n else:\n message = \"No Truth or Dare game is currently taking place.\"\n await ctx.send(message)", "def wife(backpack):\n print(\"\\nYour wife says: \")\n if \"corn\" in backpack:\n if backpack['corn'][0] < 20:\n print(\"-You need to gather 20 corn cob so get back to work! \")\n enter()\n else:\n print(\"-Ahh you are a bastard but I know your dream...\\nNow go to city and buy your ticket my love :* \")\n enter()\n return True # because of this we can change lvl\n if \"corn\" not in backpack:\n print(\"-Where have u been u f...... drunkard, \\nget back to work and collect 20 corn cobs! \")\n enter()", "def verse_1():\n print(\"Old MACDONALD had a farm\")\n print(\"E-I-E-I-O\")", "def mechanism(self):", "def goodmorning(self,guest):\n print(\"good morning,%s\"% guest)", "def get_in_act(self):\n in_act = self.act.get_in_act()\n in_act += ' This backfires and negatively affects {performer}.'\n return in_act", "def notify_of_death(dead_guy):\n if SMSing:\n send_death_announcement(dead_guy)", "def look_at(self):\n if self.visible == True:\n print('You look at the %s:' % self.alias)\n print(self.desc)", "def sit(self):\n print(f\"{self.name} is now sitting\")", "def player_tie(self):\r\n\r\n self.summary = (\" \"* 78) + \"TIE. TRY AGAIN\"\r\n print(\"Match ends in a draw.\\n\")", "def Death_Blossom(self):\t\t\n\t\tprint(self.name.Title() + \"Die Die Die!\")", "def is_actor():\n return False", "def print_status(self):\n print \"Zombie has\" + super(Zombie, self).print_status()", "def sing(self):\r\n print(\"I'm not your toy...\\n\")", "def is_summon(self):\n return False", "def fire(name):\r\n print('A CEO cannot be fired')", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def reckon(self):", "def substantiate():", "def should_keep_running(self):\n return len(self.party.active_users())", "def user_story_17(self):\n for f1 in self.families.values():\n for f2 in self.families.values():\n if f1.fid != f2.fid:\n if f1.husb_id == f2.husb_id and f2.wife_id in f1.children:\n print(f\"US17 - {self.individuals[f2.wife_id].name} and {self.individuals[f1.husb_id].name} are married on line {f1._married_line}\")\n if f1.wife_id == f2.wife_id and f2.husb_id in f1.children:\n print(f\"US17 - {self.individuals[f2.husb_id].name} and {self.individuals[f1.wife_id].name} are married on line {f1._married_line}\")", "def talk(self):\r\n super().talk()\r\n print(\"Good day, darling\")", "def DoMaliciousThings():\r\n\tprint(\"You are infected\")", "def display_people_in_space() -> bool:\n\n res = requests.get(\"http://api.open-notify.org/astros.json\").json()\n number: int = res.get(\"number\")\n craft: str = res[\"people\"][0][\"craft\"]\n names: list = [\n person.get(\"name\") for person in res.get(\"people\")\n if person.get(\"craft\") == craft\n ]\n names_str: str = \", \".join(names)\n print(f\"There are {number} people aboard the {craft}. They are {names_str}\")\n\n return True", "def inspect_melons(self,passed):\n if passed == True:\n self.passed_inspection = True\n # return True", "def user_appears(self, user):\n pass", "def event1923():\n header(1923)\n end_if_this_event_on()\n if_player_has_special_effect(0, SPEFFECT.ExileSoulEffect)\n item.award_item_to_host_only(ITEMLOT.ExileSoulReward)", "def bark(self):\n print(f\"{self.name} is now barking\")", "def do_hire(self):\n return f\"{self} is hiring employees\"", "def is_alive(self):", "def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())", "def goodmorning(host):", "def sit(self):\r\n print(self.name.title() + \" is now sitting.\")", "def is_onhold(self) -> bool:", "def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def useful():\n\n print('I do something.')", "def accuse(self, npc, index):\n\n solved = False\n dialogue = []\n dialogue.append(f\"You: I think the murderer is {npc} and they committed their crime at {self.time.index_to_string(index)}!\")\n if npc == self.murderer and index == self.murder_committed_index:\n dialogue.append(f\"{npc}: Not possible. I am sure I was in the {npc.fake_room_at_murder_time} at that time. There were no bodies.\")\n dialogue.append(f\"{self.whistle_blower}: You're lying! I was in the {self.whistle_blower.get_room_at_time(self.murder_committed_index)} at {self.time.index_to_string(index)}! You weren't there!\")\n solved = True\n else:\n dialogue.append(f\"{npc}: Not possible. I am sure I was in the {npc.get_room_at_time(index)} at that time. There were no bodies.\")\n return solved, dialogue", "def event1926():\n header(1926)\n\n if_player_has_special_effect(1, SPEFFECT.RuinousHand)\n if_has_tae_event(1, CHR.Player, 675)\n if_event_flag_on(1, 11025405)\n if_condition_true(0, 1)\n\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=1, behavior_id=2000)\n sound.play_sound_effect(CHR.Player, SoundType.s_sfx, 90010) # Bonfire resting sound.\n chr.set_special_effect(CHR.Player, SPEFFECT.RuinousHandPayment) # Lose two humanity.\n flag.disable_chunk(11025401, 11025405)\n if_does_not_have_tae_event(0, CHR.Player, 675)\n\n if_player_has_good(2, GOOD.PaleEyeOrb)\n if_event_flag_off(2, EVENT.BeyondWitness)\n skip_if_condition_false(2, 2)\n flag.enable(EVENT.BeyondWitness)\n message.status_explanation(TEXT.PaleEyeOrbQuivers)\n\n restart()", "def user_story_18(self):\n for f1 in self.families.values():\n for f2 in self.families.values():\n if f2.husb_id in f1.children and f2.wife_id in f1.children:\n try:\n print(f\"US18 - {self.individuals[f2.husb_id].name} and {self.individuals[f2.wife_id].name} are siblings and are married on line {f2._married_line}\")\n except KeyError:\n print(f'US18 - Siblings married each other.')", "def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"", "async def CelebrityMasterChef(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "async def whyplonked(ctx, user: typing.Union[discord.Member, discord.User]):\n data = await bot.plonk.find(user.id)\n if not data:\n return await r(ctx, f\"{user} is not plonked.\")\n \n await r(ctx, f\"**{user} was plonked for:**\\n{data['reason']}\")", "def is_rainbow(msg: str = 'I guess you are not my little pog champ :3'):\n\n async def check(ctx):\n rainbow = ctx.author.id == ctx.bot.owner_id\n if not rainbow:\n await ctx.send(msg)\n return rainbow\n\n return commands.check(check)", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )", "async def ign_whoami(self, ctx):\n user = ctx.message.author\n igns = self.names.get(user.mention)\n if not igns:\n await self.bot.say(\"You have not yet entered any IGN info. :cry:\".format(user.mention))\n else:\n await self.bot.say(self.format_igns(user, igns))", "def is_yummy(self):\n return False", "def display_line(self):\n line = self.line\n hosts = self.hosts\n if not self.check_line():\n return\n self.msg(\"|wThis line is hosted by:|n %s\" % \", \".join(str(ob) for ob in hosts))\n self.msg(\"|wCurrent line order:|n %s\" % \", \".join(str(ob) for ob in line))", "def join_line(self):\n if self.caller in self.line:\n self.msg(\"You are already in the line.\")\n return\n if not self.line and self.loop:\n self.line.append(\"|r*Loop Marker*|n\")\n self.line.append(self.caller)\n self.caller.location.msg_contents(\"%s has joined the line.\" % self.caller)", "def layEgg(self, time):\n\t\tif self.nestInfo.layEgg(time) == 1:\n\t\t\tprint(\"Laid egg at nest in agent \", self.agentID)", "def isalive():\n return 'alive'", "def is_party_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.PARTY", "def CatchPhase(self):\n print(\"{} said: What did you get on Question 3?\".format(self.GetName()))", "def wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n\r\n if hour >= 0 and hour < 12:\r\n speak(\"Good morning\" + MASTER)\r\n\r\n elif hour >= 12 and hour < 18:\r\n speak(\"Good afternoon\" + MASTER)\r\n else:\r\n speak(\"Good Evening\" + MASTER)\r\n # speak(\"I am VA. How may I help you?\")\r", "def leader(self):\n pass", "def leader(self):\n pass", "def game_over(self):\n if self.missed == 5:\n print(\"You Lost! Better Luck Next Time!\")\n else:\n print(\"You Won! Congratulations!\")\n self.print_full_phrase()", "def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")", "def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None", "def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None", "def breath_fire(self):\r\n print(\"$@#$#@$\\n\")", "def isspeech(phone):\n return phone not in OTHERS", "def user_story_6(self):\n for family in self.families.values():\n if family.divorced != 'NA':\n if family.wife_id != 'NA':\n if self.individuals[family.wife_id].death != 'NA':\n if self.individuals[family.wife_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.wife_id].name} divorce after individual death date on line {family._divorced_line}')\n\n if family.husb_id != 'NA':\n if self.individuals[family.husb_id].death != 'NA':\n if self.individuals[family.husb_id].death < family.divorced:\n print(f'US06 - {self.individuals[family.husb_id].name} divorce after individual death date on line {family._divorced_line}')" ]
[ "0.6360574", "0.60688215", "0.6012925", "0.59863085", "0.59315145", "0.589366", "0.5702806", "0.56775403", "0.56753576", "0.5657366", "0.5642449", "0.5641897", "0.5616109", "0.56132597", "0.56030726", "0.5584753", "0.555659", "0.5550408", "0.55344415", "0.5533722", "0.5528718", "0.55164725", "0.5489035", "0.54802823", "0.54800093", "0.5472914", "0.5466058", "0.5457374", "0.5456022", "0.5445301", "0.5439304", "0.54118675", "0.5403643", "0.53958106", "0.5384923", "0.5382346", "0.5361369", "0.5360139", "0.53423584", "0.53380036", "0.5337672", "0.5337473", "0.53296816", "0.5323856", "0.53216225", "0.5299696", "0.52975065", "0.5295357", "0.5293356", "0.52926946", "0.52913445", "0.5283159", "0.52827513", "0.5280633", "0.52735823", "0.5271594", "0.52590835", "0.52568525", "0.52544576", "0.52515465", "0.5237325", "0.52354836", "0.5233349", "0.52309847", "0.52199686", "0.5211211", "0.5208805", "0.52066857", "0.5205182", "0.51966196", "0.51937485", "0.5191254", "0.5187278", "0.51871026", "0.51799625", "0.5173655", "0.51694566", "0.51693213", "0.51638687", "0.5162013", "0.51599574", "0.5159757", "0.5152722", "0.51491547", "0.51464194", "0.514449", "0.5141026", "0.5140827", "0.5130415", "0.5129822", "0.5129318", "0.51278764", "0.51257735", "0.51257735", "0.51240873", "0.5111815", "0.51088893", "0.51088893", "0.51038325", "0.5101064", "0.5099429" ]
0.0
-1
some scenarios don't want to use the builtin jwt generation from the manager
def set_jwt_file(self, filename): self.jwtfile = filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def create_fake_JWT_token(userEmail):\n pass", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def get_custom_jwt(user, device):\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_otp_payload(user, device)\n return jwt_encode_handler(payload)", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def test_issue_token_creates_a_jwt_hat_is_key_specific(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = issue_token(self.demo_user)\n\n message = 'Failed to create a key-specific token.'\n with self.assertRaises(jwt.DecodeError, msg=message):\n jwt.decode(token, self.jwt_key_2, algorithms=['HS256'])", "def test_issue_token_creates_a_jwt(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = issue_token(self.demo_user)\n\n self.assertNotEqual(\n jwt.decode(token, self.jwt_key, algorithms=['HS256']),\n None,\n 'Invalid token created.'\n )", "def token_gen_call(username, password, exp=None):\n #pdb.set_trace()\n \n #username_set = params['AUTH']['username_set']\n #password_set = params['AUTH']['password_set']\n username_set = username\n password_set = password\n \"\"\"\n Creates JWT Token\n :return:\n \"\"\"\n if exp is None:\n exp = datetime.utcnow() + timedelta(seconds=3600)\n _token = {\n 'aud': JWT_AUDIENCE,\n 'exp': exp,\n 'iss': JWT_ISSUER,\n 'user': username,\n 'role': 'admin',\n 'time':time.time()\n }\n _token.update(_token)\n \n if password_set == password and username_set == username: # example, don't do this in production\n return {\"token\" : jwt.encode(_token, SECRET_KEY, algorithm=JWT_OPTIONS_ALGORITHM).decode('utf-8') }\n return 'Invalid username and/or password for user: {0}'.format(username)", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def get_token(public_key,delta):\n return jwt.encode(\n {\n 'public_key':public_key,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=delta)\n },\n current_app.config['SECRET_KEY'],\n algorithm=\"HS256\"\n )", "def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')", "def create_jwt(user_obj):\n return jwt.encode(\n user_serializer.GetUserInfoSerializer(user_obj).data,\n settings.SECRET_KEY, algorithm='HS256').decode('utf-8')", "def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)", "def UserToken(self) -> object:", "def get_token(cls, username):\n try:\n payload = {\n # 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n # 'iat': datetime.datetime.utcnow(),\n 'username': username\n }\n return jwt.encode(\n payload,\n 'meow',\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def make_id_jwt(sub, tenant=None):\n payload = {\"sub\": sub}\n if tenant is not None:\n payload[\"mender.tenant\"] = tenant\n payload = json.dumps(payload)\n payloadb64 = b64encode(payload.encode(\"utf-8\"))\n return \"bogus_header.\" + payloadb64.decode() + \".bogus_sign\"", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def get_jwt(self):\n if self.token is None or self.is_expired(self.token):\n self.token = self.create_jwt(self.audience, self.additional_claims)\n return self.token", "def get_token(self, obj):\n\n user = User.objects.get(email=obj.email)\n\n payload = jwt_payload_handler(user)\n\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n token = jwt_encode_handler(payload)\n\n return token", "def encode_auth_token(self,user_id): \n try: \n exp = datetime.utcnow() + timedelta(days=1)\n \n payload = {\n 'exp': exp, \n 'iat': datetime.utcnow(), \n 'sub': user_id\n }\n \n encoded_auth_token = jwt.encode(\n payload, \n getattr(settings, \"SECRET_KEY\",\"\"),\n algorithm='HS256'\n )\n return encoded_auth_token\n except Exception as e: \n print_exception(e)\n return e", "def generate_token_string(token):\n if JWT_AUTH:\n return 'JWT {}'.format(token)\n else:\n return 'Token {}'.format(token)", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def test_token_generation(self):\n self.assertIsInstance(self.user.generate_auth_token(34), bytes)", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def auth_token(self):", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'uid': userdata['uid'],\n 'pwd':userdata['pwd'],\n 'role': userdata['role']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def test_generate_and_validate_token(self):\n\n audience = 'http://www.service.teletracking.com/'\n roles = {'role': ['admin', 'user'], 'audience': audience}\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n token = AuthenticationHandler.generate_auth_token(roles, secret)\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'admin')\n self.assertTrue(decoded_token['role'][1] == 'user')", "def __init__(self, app=None, well_known_config=None, well_known_obj_cache=None, algorithms='RS256', jwks_uri=None, issuer=None, audience=None, client_secret=None, cache=None, caching_enabled=False, jwt_oidc_test_mode=False, jwt_oidc_test_keys=None, jwt_role_callback=None, jwt_oidc_test_private_key_pem=None):\n \n self.app = app\n self.well_known_config = well_known_config\n self.well_known_obj_cache = well_known_obj_cache\n self.algorithms = algorithms\n self.jwks_uri = jwks_uri\n self.issuer = issuer\n self.audience = audience\n self.client_secret = client_secret\n self.cache = cache\n self.caching_enabled = caching_enabled\n\n self.jwt_oidc_test_mode = jwt_oidc_test_mode\n self.jwt_oidc_test_keys = jwt_oidc_test_keys\n self.jwt_oidc_test_private_key_pem = jwt_oidc_test_private_key_pem\n self.jwt_role_callback = jwt_role_callback\n\n print(\"Running constructor\")\n if app is not None:\n self.init_app(app)", "def generate_jwt_key(self):\n return ''.join(random.choice(string.ascii_uppercase +\n string.digits +\n string.ascii_lowercase) for _ in range(50))", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def jwt_auth(client):\n return JwtAuthActions(client)", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def test_validate_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key, algorithm='HS256')\n\n self.assertTrue(\n validate_token(token)[0],\n 'Failed to validate token.'\n )", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token\n }", "def test_decoded_jwt_no_jwt_provided(\n self,\n mixin,\n mrequest,\n mdecode_jwt,\n ):\n with raises(HTTPUnauthorized):\n mixin.decoded_jwt()", "def jwt_manager(app, api=None):\n # https://flask-jwt-extended.readthedocs.io/en/stable/options\n app.config['JWT_TOKEN_LOCATION'] = ['headers', 'cookies']\n app.config['JWT_ACCESS_COOKIE_NAME'] = os.environ.get(\n 'JWT_ACCESS_COOKIE_NAME', 'access_token_cookie')\n app.config['JWT_COOKIE_CSRF_PROTECT'] = bool(os.environ.get(\n 'JWT_COOKIE_CSRF_PROTECT', False))\n app.config['JWT_SECRET_KEY'] = os.environ.get(\n 'JWT_SECRET_KEY', os.urandom(24))\n\n jwt = JWTManager(app)\n\n if api:\n # Delegate error handlers to RestPlus because of\n # https://github.com/vimalloc/flask-jwt-extended/issues/86\n jwt._set_error_handler_callbacks(api)\n\n @api.errorhandler\n def restplus_error_handler(error):\n # JWT error handler will be called afterwards\n return {}\n\n @jwt.expired_token_loader\n def handle_expired_token():\n # http://flask-jwt-extended.readthedocs.io/en/latest/changing_default_behavior.html\n # resp = redirect('/auth/login')\n # Automatic re-login does't work with SAML, so we prepare\n # for manual re-login\n resp = make_response()\n unset_jwt_cookies(resp)\n return resp\n\n return jwt", "def generate_jwt_token(private_pem: bytes, app_id: int) -> str:\n private_key = jwcrypto.jwk.JWK.from_pem(private_pem)\n payload = {\"iss\": app_id}\n duration = datetime.timedelta(minutes=10)\n return python_jwt.generate_jwt(payload, private_key, \"RS256\", duration)", "def jwt_required_extended(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n verify_jwt_in_request()\n except IndexError:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.UNAUTHORIZED,\n 'No token provided in the format of \"Bearer <JWT>\"')\n token = get_jwt_identity()\n if token['is_user_token'] is False:\n from api.services.data_source_token import \\\n DataSourceTokenService\n _token_usage_counter_add(token['data_source_token']['id'])\n if not DataSourceTokenService.check_if_token_is_active(\n DataSourceTokenService, token['data_source_token']['id']):\n return ErrorObject.create_response(ErrorObject,\n HTTPStatus.FORBIDDEN,\n 'Token has been revoked')\n return fn(*args, **kwargs)\n\n return wrapper", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def process_jwt_token(response):\n if response.status_code == HTTPStatus.OK and current_identity:\n response.headers['new_jwt'] = '{0}'.format(\n str(__encode_token().decode('utf-8'))\n )\n\n return response", "def generate_token(dictionary: dict, expiration: datetime.timedelta):\n\n dictionary['expiration'] = (datetime.datetime.utcnow() + expiration).timestamp()\n\n return jwt.encode(dictionary, current_app.config['TOKEN_SECRET_KEY'], algorithm='HS256')", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()", "def create_jwt(self, audience: List[str], additional_claims=None) -> str:\n iat = time.time()\n exp = iat + self.lifetime\n payload = additional_claims or {}\n payload.update({'iss': self.credentials[\"client_email\"],\n 'sub': self.credentials[\"client_email\"],\n 'aud': audience,\n 'iat': iat,\n 'exp': exp,\n 'scope': ['email', 'openid', 'offline_access'],\n 'email': self.credentials[\"client_email\"]\n })\n additional_headers = {'kid': self.credentials[\"private_key_id\"]}\n token = jwt.encode(\n payload,\n self.credentials[\"private_key\"],\n headers=additional_headers,\n algorithm='RS256').decode()\n return token", "def create_jwt(key, cert, systeminfo, metadata, requestdata):\n\n claims = {}\n claims[\"iat\"] = int(time.time())\n claims[\"systeminfo\"] = systeminfo\n claims[\"metadata\"] = metadata\n claims[\"requestdata\"] = requestdata\n\n logging.debug(\"Claims:{}\".format(json_encode(claims)))\n\n token = jwt.JWT(header=json_encode(jwt_header([cert])),\n claims=json_encode(claims))\n\n token.make_signed_token(key)\n\n return token.serialize()", "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated", "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated", "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n\n \"token\": JWT_AUTH.get('JWT_AUTH_HEADER_PREFIX') + ' ' + token,\n \"expires_in\": datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,\n \"user_info\": UsersLoginSerializer(user, context={'request': request}).data\n\n }", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'username': userdata['username'],\n 'password':userdata['password']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def check_token(fn):\n def response(self, *args, **kw):\n if not JWT_DISABLED:\n intoken = get_token_from_header()\n try:\n jwt.decode(intoken, SECRET_KEY)\n except jwt.exceptions.DecodeError:\n raise Error(FORBIDDEN)\n except jwt.ExpiredSignatureError:\n raise Error(UNAUTHORIZED, msg=\"Signature expired.\")\n except jwt.InvalidTokenError:\n raise Error(UNAUTHORIZED, msg=\"Invalid token.\")\n return fn(self, *args, **kw)\n return response", "def validateAgentJWTToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_token(self, user):\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n return token", "def generate_token(exp=None):\n\n secret_key = getenv('JWT_SECRET_KEY')\n user = {\n 'first_name': fake.name(),\n 'last_name': fake.name(),\n 'email': fake.email(),\n 'is_admin': IsAdmin.yes,\n 'password': fake.password()\n }\n\n payload = {'id': str(User.find_or_create(user, email=user['email']).id)}\n payload.__setitem__('exp', exp) if exp is not None else ''\n token = jwt.encode(payload, secret_key, algorithm='HS256').decode(CHARSET)\n return 'Bearer {0}'.format(token)", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def build_jwt(payload: dict) -> str:\n if 'sub' not in payload.keys():\n raise ValueError('sub not in payload keys')\n jwt_fields = {\n 'iss': JWT_DOMAIN,\n 'sub': None,\n 'iat': datetime.datetime.utcnow(),\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=JWT_EXPIRATION_MINUTES),\n **payload\n }\n return jwt.encode(jwt_fields, key=SECRET_KEY, json_encoder=JSONDataEncoder).decode(encoding='UTF-8')", "def test_encode_decode_token(create_user):\n user = create_user\n user_data = {\n \"email\": user.email,\n \"username\": user.username\n }\n jwt = JWTAuthentication()\n # encode token\n encoded_token = jwt.generate_token(user_data)\n assert type(encoded_token) is str # test encoding is 'utf-8'\n # decode token\n user_details = jwt.decode_token(encoded_token)\n assert user_details['userdata'] == user_data # test token details", "def test_api_jwt(self):\n url = reverse('token_obtain_pair')\n url_register = reverse('auth_register')\n resp = self.client.post(url_register, {\n \"username\": \"user\",\n \"password\": \"lol1lol1\",\n \"password2\": \"lol1lol1\",\n \"email\": \"lol@gmail.com\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"bio\": \"\"\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().username, 'user')\n\n resp = self.client.post(url, {'email':'lol@gmail.com', 'password':'lol1lol1'}, format='json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n resp = self.client.post(url, {'username':'user', 'password':'lol1lol1'}, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n print(resp.data)\n self.assertTrue('access' in resp.data)\n self.assertTrue('refresh' in resp.data)\n token = resp.data['access']\n\n verification_url = reverse('api:event-list')\n \n\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + 'abc')\n resp = client.post(verification_url, {\n \"title\": \"event24\",\n \"description\": \"jfgjgfjfg\",\n \"event_type\": \"PRI\",\n \"city\": \"Kyiv\",\n \"address\": \"dfsfsdfsdf\",\n \"date_and_time_of_event\": \"2021-07-30T15:09:00Z\"\n })\n self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)\n resp = client.post(verification_url, {\n \"title\": \"event24\",\n \"description\": \"jfgjgfjfg\",\n \"event_type\": \"PRI\",\n \"city\": \"Kyiv\",\n \"address\": \"dfsfsdfsdf\",\n \"date_and_time_of_event\": \"2021-07-30T15:09:00Z\"\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Event.objects.count(), 1)\n self.assertEqual(Event.objects.get().title, 'event24')", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "def generate_key():\n return get_token_generator().generate_token()", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def generate_token(user):\n try:\n # generate the auth token\n auth_token = User.encode_auth_token(user.id)\n response_object = {\n \"status\": \"success\",\n \"message\": \"Successfully registered.\",\n \"Authorization\": auth_token.decode(),\n }\n return response_object, 201\n except Exception as e:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"Some error occurred. Please try again.\",\n }\n return response_object, 401", "def jwt_permission(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n auth_token = request.headers.environ.get('HTTP_AUTHORIZATION', '').split(' ')\n if len(auth_token) < 2:\n abort(403, \"Authentication fails\")\n\n JwtAuth.decode_auth_token(auth_token[1])\n return func(*args, **kwargs)\n return wrapper", "def get_jwt_token_from_secret_file(self, filename):\n # pylint: disable=consider-iterating-dictionary\n if filename in self.jwt_tokens.keys():\n # token for that file was checked already.\n return self.jwt_tokens[filename]\n\n cmd = [\n self.cfg.bin_dir / \"arangodb\",\n \"auth\",\n \"header\",\n \"--auth.jwt-secret\",\n str(filename),\n ]\n print(cmd)\n jwt_proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n logging.info(\"JWT starter has PID:\" + str(jwt_proc.pid))\n\n (header, err) = jwt_proc.communicate()\n jwt_proc.wait()\n if len(str(err)) > 3:\n raise Exception(\"error invoking the starter \" \"to generate the jwt header token! \" + str(err))\n if len(str(header).split(\" \")) != 3:\n raise Exception(\"failed to parse the output\" \" of the header command: \" + str(header))\n\n self.jwt_tokens[filename] = str(header).split(\" \")[2].split(\"\\\\\")[0]\n return self.jwt_tokens[filename]", "def get_jwt_for_user(user: User):\n dt = datetime.datetime.now() + datetime.timedelta(hours=10)\n return jwt.encode({'username': user.username, 'exp': dt}, jwt_key_env).decode('UTF-8')", "async def jwt_secret() -> Optional[str]:\n if not jwt_secret_config:\n raise RuntimeError(\"jwt_secret_config not set in auth\")\n if hasattr(jwt_secret_config, \"get_secret_value\"):\n return jwt_secret_config.get_secret_value()\n else:\n return jwt_secret_config", "def create_object_access_jwt(obj, ttl=None):\n if not ttl:\n ttl = settings.JWT_AUTH['JWT_EXPIRATION_DELTA']\n\n now = datetime.utcnow()\n expiry = now + ttl\n content_type = ContentType.objects.get_for_model(obj).model\n token = jwt.encode({'id': obj.id,\n 'content_type': content_type,\n # The 'permission' field is currently unused, but\n # could be used to determine access type (eg read/write)\n 'permission': 'rw',\n 'exp': expiry,\n 'iat': now,\n 'nbf': now,\n # The 'jti' field is currently a placeholder, unused.\n # It is a unique identifier for the token.\n # We would use it if we wanted to revoke keys prior\n # to expiration (eg upon job completion). This would\n # require storing every JWT (+jti) issued (or\n # associating the jti with the Job model, eg via a\n # GenericForeignKey), and doing a database lookup for\n # revoked keys when we verify for decode incoming JWTs.\n 'jti': uuid.uuid4().hex,\n },\n settings.JWT_AUTH['JWT_SECRET_KEY'],\n settings.JWT_AUTH['JWT_ALGORITHM'])\n return token", "def validate_eve_jwt(self):\n\n\t\tres = self.session.get(self.settings['jwks_url'])\n\t\tres.raise_for_status()\n\n\t\tdata = res.json()\n\n\t\ttry:\n\t\t\tjwk_sets = data[\"keys\"]\n\t\texcept KeyError as e:\n\t\t\tself.p(\"Something went wrong when retrieving the JWK set. The returned \"\n\t\t\t\t\"payload did not have the expected key {}. \\nPayload returned \"\n\t\t\t\t\"from the SSO looks like: {}\".format(e, data))\n\t\t\treturn None\n\n\t\tjwk_set = next((item for item in jwk_sets if item[\"alg\"] == \"RS256\"))\n\n\t\ttry:\n\t\t\treturn jwt.decode(\n\t\t\t\tself.access_token,\n\t\t\t\tjwk_set,\n\t\t\t\talgorithms=jwk_set[\"alg\"],\n\t\t\t\tissuer=self.settings['login_host']\n\t\t\t)\n\t\texcept ExpiredSignatureError:\n\t\t\tself.p(\"The JWT token has expired: {}\")\n\t\t\treturn None\n\t\texcept JWTError as e:\n\t\t\tself.p(\"The JWT signature was invalid: {}\").format(str(e))\n\t\t\treturn None\n\t\texcept JWTClaimsError as e:\n\t\t\ttry:\n\t\t\t\treturn jwt.decode(\n\t\t\t\t\t\t\tself.access_token,\n\t\t\t\t\t\t\tjwk_set,\n\t\t\t\t\t\t\talgorithms=jwk_set[\"alg\"],\n\t\t\t\t\t\t\tissuer=urllib.parse.urlunparse([self.settings['esi_proto'],self.settings['login_host'],'','','',''])\n\t\t\t\t\t\t)\n\t\t\texcept JWTClaimsError as e:\n\t\t\t\tself.p(\"The issuer claim was not from login.eveonline.com or \"\n\t\t\t\t\t\"https://login.eveonline.com: {}\".format(str(e)))\n\t\t\t\treturn None", "def _standard_token(self):\n return {\n 'iss': 'https://iam-test.idc.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 200000\n }", "def gen_verification_token(self, user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def jwt_optional(self) -> None:\n if self._TOKEN and self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def _generate_token_value():\n return secrets.token_urlsafe()", "def generate_auth_token(self):\n s = Serializer(app.config['SECRET_KEY'])\n return s.dumps({'email': self.email})", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def test_generate_token_for_invalid_user(self):\n # setup: none\n user = {\"userName\": \"user\", \"password\": \"1234\"}\n\n # test\n resp = self.generate_token(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 200\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"token\"] is None\n assert resp_body[\"expires\"] is None\n assert resp_body[\"status\"] == \"Failed\"\n assert resp_body[\"result\"] == \"User authorization failed.\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown:", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_jwt_header(self):\n if self.jwt_header:\n return self.jwt_header\n self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile))\n return self.jwt_header", "def make_token():\n return secrets.token_urlsafe(36)", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def test_jwt_example(self):\n data = r'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'\n expected = json.loads(r'''{\"header\":{\"alg\":\"HS256\",\"typ\":\"JWT\"},\"payload\":{\"sub\":\"1234567890\",\"name\":\"John Doe\",\"iat\":1516239022},\"signature\":\"49:f9:4a:c7:04:49:48:c7:8a:28:5d:90:4f:87:f0:a4:c7:89:7f:7e:8f:3a:4e:b2:25:5f:da:75:0b:2c:c3:97\"}''')\n self.assertEqual(jc.parsers.jwt.parse(data, quiet=True), expected)", "def jwt_response_payload_handler(token, user=None, request=None):\n req = None\n print('jwt_response_payload_handler ', user)\n # Here you can use other serializers or custom logic, it's up to you!\n if isinstance(user, AnonymousUser):\n user = User.objects.get(id=user.id)\n req = {'profile': {'user': user}}\n else:\n req = {'request': request}\n return {\n 'token_decoded': jwt_decode_handler(token),\n 'token': token,\n 'user': UserSerializer(user, context=req).data\n\n }", "def generate_jwt(self, sa_keyfile):\n now = int(time.time())\n expires = now + 3600\n self._access_token_expires = expires\n\n with io.open(sa_keyfile, 'r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n iss = data['client_email']\n\n # build payload\n payload = {\n 'iss': iss,\n 'scope': HOMEGRAPH_SCOPE,\n 'aud': HOMEGRAPH_TOKEN_URL,\n 'iat': now,\n \"exp\": expires,\n }\n\n # sign with keyfile\n signer = google.auth.crypt.RSASigner.from_service_account_file(sa_keyfile)\n jwt = google.auth.jwt.encode(signer, payload)\n\n return jwt", "def test_create_o_auth_authorize_token(self):\n pass" ]
[ "0.736411", "0.70779586", "0.705047", "0.69427395", "0.6859739", "0.676506", "0.6741483", "0.67354095", "0.66105115", "0.65517974", "0.65479994", "0.6544189", "0.6534447", "0.65264636", "0.6466637", "0.6452001", "0.6433748", "0.6410727", "0.6399224", "0.6341143", "0.62983555", "0.6293985", "0.6287772", "0.6285721", "0.6282781", "0.62632847", "0.6251095", "0.6234913", "0.62334996", "0.62165695", "0.61946166", "0.6193931", "0.618168", "0.6150076", "0.61278945", "0.6125906", "0.6125092", "0.61187226", "0.61033857", "0.6102583", "0.6098235", "0.6088271", "0.60834", "0.60713077", "0.6068739", "0.6064125", "0.60603344", "0.6048475", "0.6048475", "0.6048475", "0.6044764", "0.60433847", "0.6043344", "0.6043135", "0.6041025", "0.60210204", "0.6018936", "0.6016778", "0.6005754", "0.6002728", "0.59948957", "0.59896654", "0.5972436", "0.5962614", "0.5962614", "0.5950208", "0.59315354", "0.592979", "0.58972967", "0.5890741", "0.58891684", "0.5885605", "0.58845305", "0.5884429", "0.58671224", "0.5865866", "0.5864507", "0.5862139", "0.5861075", "0.5857849", "0.5856055", "0.58373564", "0.5828902", "0.58240235", "0.5819974", "0.5818542", "0.57912135", "0.5779978", "0.5777168", "0.57683337", "0.57566607", "0.5754822", "0.5754", "0.5746405", "0.5737794", "0.5735704", "0.5732764", "0.57243675", "0.57216847", "0.5721361", "0.5719777" ]
0.0
-1
retrieve token from the JWT secret file which is cached for the future use
def get_jwt_token_from_secret_file(self, filename): # pylint: disable=consider-iterating-dictionary if filename in self.jwt_tokens.keys(): # token for that file was checked already. return self.jwt_tokens[filename] cmd = [ self.cfg.bin_dir / "arangodb", "auth", "header", "--auth.jwt-secret", str(filename), ] print(cmd) jwt_proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info("JWT starter has PID:" + str(jwt_proc.pid)) (header, err) = jwt_proc.communicate() jwt_proc.wait() if len(str(err)) > 3: raise Exception("error invoking the starter " "to generate the jwt header token! " + str(err)) if len(str(header).split(" ")) != 3: raise Exception("failed to parse the output" " of the header command: " + str(header)) self.jwt_tokens[filename] = str(header).split(" ")[2].split("\\")[0] return self.jwt_tokens[filename]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None", "def get_token_from_secret_file(secret_file_path):\n try:\n with open(secret_file_path, \"r\") as f:\n return f.readline()\n except FileNotFoundError:\n raise BaseSpaceDownloadError(\"Secret file not found\")\n except PermissionError:\n raise BaseSpaceDownloadError(\"No permissions to read secret file\")", "def getCachedToken( self ):\n if ( os.path.exists( TOKEN_PATH )):\n return open( TOKEN_PATH ).read()\n else :\n return None", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def get_live_token():\n token_file = open(os.path.dirname(__file__) + TOKEN_FILE_PATH, \"r\")\n keyword = \"GITLAB_API_SECRET\"\n for tokens in token_file:\n token = tokens.split(\"\\n\")\n for token_key in token:\n if keyword in token_key:\n gitlab_token = token_key.split(\"\\\"\")[1]\n token_file.close()\n return gitlab_token", "def __current_authentication_token(self):\n if os.path.isfile(self.token_filename):\n with open(self.token_filename, 'r') as f:\n (stored_token, expires) = f.read().split(' ')\n t = time.time()\n if int(expires) > t:\n return stored_token\n return None", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def get_token(filename='config.ini'):\n cp = ConfigParser()\n cp.read(filename)\n token = cp.get('githubapi', 'token')\n return token", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def fetch_token():\n bucket = os.environ[\"SPOTIFY_BUCKET_NAME\"]\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\", \"\")\n logger.info(\"Reading Spotify OAuth token from s3://%s/%s/token.json.\" %\n (bucket, path))\n s3 = boto3.client('s3')\n content_object = s3.get_object(Bucket=bucket, Key=\"%s/token.json\" % path)\n file_content = content_object['Body'].read().decode('utf-8')\n token = json.loads(file_content)\n return token", "def get_token(alias, reg_code, privKey):\n data = json.dumps({\n \"namespace\": alias,\n \"reg_code\": reg_code\n })\n url = endpoint('auth')\n r = requests.post(url,data=data) \n token_str = (r.__dict__['_content']).decode()\n r_token_obj = json.loads(token_str)\n token_cipher = ast.literal_eval( r_token_obj[\"token\"] )\n token_obj = dict()\n token_obj = {\n \"authToken\": decrypt_message( privKey, token_cipher),\n \"expiration_minutes\": r_token_obj[\"expiration_minutes\"],\n \"expiration\": str(datetime.datetime.now() + datetime.timedelta(minutes=r_token_obj[\"expiration_minutes\"]))\n }\n expiration = token_obj[\"expiration\"]\n expiration = parser.parse(expiration)\n if datetime.datetime.now() > expiration:\n print(\"Token has expired\")\n else:\n c = expiration - datetime.datetime.now()\n valid_minutes = str(divmod(c.total_seconds(), 60)[0])\n return token_obj[\"authToken\"]", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "async def jwt_secret() -> Optional[str]:\n if not jwt_secret_config:\n raise RuntimeError(\"jwt_secret_config not set in auth\")\n if hasattr(jwt_secret_config, \"get_secret_value\"):\n return jwt_secret_config.get_secret_value()\n else:\n return jwt_secret_config", "def get_jwt() -> str:\n LOGGER.debug(\"Retrieving JWT...\")\n\n args = {\n \"url\": \"{0}/auth\".format(CONFIG['dojot']['url']),\n \"data\": json.dumps({\n \"username\": CONFIG['dojot']['user'],\n \"passwd\": CONFIG['dojot']['passwd'],\n }),\n \"headers\": {\n \"Content-Type\": \"application/json\"\n },\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\".. retrieved JWT\")\n return res[\"jwt\"]", "def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token", "def get_stored_token():\n try:\n parser = SafeConfigParser()\n parser.read(OAUTH_FILE)\n user = parser.get('auth', 'user')\n token = parser.get('auth', 'token')\n token_date_str = parser.get('auth', 'token_date')\n except ConfigParser.Error as e:\n return None, None\n\n if user and token and token_date_str:\n date1 = datetime.datetime.strptime(token_date_str, '%Y-%m-%d').date()\n date2 = datetime.date.today()\n if (date2 - date1).days > OAUTH_EXP_DAYS:\n user, token = None, None\n\n return user, token", "def _get_token(self):\n return user.get_token()", "def get_jwt_header(self):\n if self.jwt_header:\n return self.jwt_header\n self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile))\n return self.jwt_header", "def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")", "def retrieve_token(filename):\n with open(filename, 'r') as f:\n token = f.readline()\n\n return token", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def load_token():\n try:\n ifile = open('access.cfg', 'r')\n token_string = ifile.read()\n ifile.close()\n return tweepy.oauth.OAuthToken.from_string(token_string)\n\n except IOError:\n print (\"Error: Unable to load credentials, please authenticate.\")\n return None\n\n except tweepy.TweepError:\n print (\"Error: Unable to parse credentials, please remove access.cfg \"\n \"file and try running app again.\")", "async def token(request: Request):\n return get_token()", "def get_token(path = os.getcwd()):\n\n path += \"\\\\.env\"\n load_dotenv(path)\n token = os.environ.get(\"token\")\n return token", "def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n else:\n auth_token_value = None\n return auth_token_value", "def _retrieve_jwt_token(self, user, password):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n # convert to json\n data = json.dumps({\"username\": user, \"password\": password})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n # get token\n self.jwt_access_token = dic['access']\n self.jwt_refresh_token = dic['refresh']\n\n if self.debug:\n print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token))\n\n return", "def getSecret(self):\n\n with open(self._secret_file) as f:\n secret=f.readline().rstrip()\n \n return secret", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def token():\n return os.environ.get('TOKEN', None)", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def load_token():\n with open(\"config.yaml\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config[\"token\"]", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def load_token(token):\n\n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n\n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n #max_age = app.config[\"REMEMBER_COOKIE_DURATION\"].total_seconds()\n\n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token)\n\n #Find the User\n user = User.query.filter_by(email = data[0]).first()\n\n #Check Password and return user or None\n if user and data[1] == user.pwd:\n \t#On update la derniere connection du user\n \tuser.update_last_connection()\n return user\n return None\n\n\n\n\n\n\n\n #######################################################\n ################# REQUETES ############################\n #######################################################", "def get_secret():\n if not DEFAULT_KEY_FILE.exists():\n raise Exception(\"Authentication key must be stored in a file named \" + DEFAULT_KEY_FILE.name)\n\n retval = DEFAULT_KEY_FILE.read_text().strip()\n if not retval or len(retval) < 10:\n raise Exception(\"Invalid authentication token\")\n return retval", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_token(public_key,delta):\n return jwt.encode(\n {\n 'public_key':public_key,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=delta)\n },\n current_app.config['SECRET_KEY'],\n algorithm=\"HS256\"\n )", "def read_discord_token():\n\ttoken_file = Path(\"./token\")\n\tif token_file.exists():\n\t\twith open(\"token\", \"r\") as f:\n\t\t\treturn \"\".join(f.readlines()).strip()", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_jwt(self):\n if self.token is None or self.is_expired(self.token):\n self.token = self.create_jwt(self.audience, self.additional_claims)\n return self.token", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def decode_jwt(self, token):\n key = self.master_secret\n public_key = self.public_key\n if self.public_key_file is not None:\n with open(self.public_key_file, 'r') as rsa_pub_file:\n public_key = rsa_pub_file.read()\n if public_key is not None:\n key = public_key\n if self.leeway is not None:\n leeway = self.leeway\n else:\n leeway = 0\n options = {\n 'verify_exp': self.verify_expiration,\n }\n try:\n claims_set = jwt.decode(\n token,\n key,\n options=options,\n leeway=leeway,\n issuer=self.issuer\n )\n except (jwt.DecodeError, jwt.ExpiredSignature):\n return None\n return claims_set", "def load_token(self):\n token = None\n\n if config.outlook_token:\n token = self.token_constructor(config.outlook_token)\n\n return token", "def check_token(token: str, secret: str | List[str], max_age_seconds: int = 60 * 60 * 24) -> Any:\n return URLSafeTimedSerializer(secret).loads(token, max_age=max_age_seconds, salt=\"token\")", "def _generate_token_value():\n return secrets.token_urlsafe()", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def meraki_vault_r_secret(mount, path):\n read_secret_result = client.secrets.kv.v1.read_secret(path=meraki_vault_path, mount_point=vault_mount_point)\n api_token = read_secret_result['data']['token']\n return api_token", "def get_token(self):\n logging.debug(\"In the Token get_token() class method.\")\n\n if datetime.datetime.now() > self.token_expiry:\n logging.info(\"Token Expired.\")\n self.generate_tokens()\n return self.access_token", "def get_cached_token(self):\n token_info = None\n try:\n token_info_string = get_spotify_token_info(self.discord_uid)\n token_info = json.loads(token_info_string)\n\n # if scopes don't match, then bail\n if \"scope\" not in token_info or not self._is_scope_subset(\n self.scope, token_info[\"scope\"]\n ):\n return None\n\n if self.is_token_expired(token_info):\n token_info = self.refresh_access_token(\n token_info[\"refresh_token\"]\n )\n except Exception as e:\n logger.warning(f\"Couldn't read cache: {e}\")\n\n return token_info", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def get_token(self, obj):\n\n user = User.objects.get(email=obj.email)\n\n payload = jwt_payload_handler(user)\n\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n token = jwt_encode_handler(payload)\n\n return token", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "def _get_token(token=None):\n if token is not None:\n return token\n else:\n return os.environ.get(\"MAPBOX_ACCESS_TOKEN\") or os.environ.get(\n \"MapboxAccessToken\"\n )", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def get_stored_credentials(user):\n with open(\"access.json\", \"r\") as f:\n credentials = json.load(f)\n user_creds = credentials[user]\n return user_creds[\"access_token\"], user_creds[\"access_secret\"]", "def get_key():\n try:\n return settings.get('backend')['secret_key']\n except AttributeError:\n raise AuthTokenGenerationException()", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def _read_token(token_file):\n try:\n return _fortworth.read(token_file).strip()\n except FileNotFoundError:\n raise _errors.TokenNotFoundError(token_file)", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def get_token():\n global vault_token\n global vault_token_time\n current_app.logger.info('************* GET TOKEN METHOD **************')\n return 'root'\n if validate_token():\n vault_duration = None\n try:\n auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')\n current_app.logger.info('*********** Auth Type: ' + auth_type)\n if auth_type == 'TOKEN':\n vault_token = current_app.config.get('VAULT_AUTH_TOKEN')\n elif auth_type == 'USERPASS':\n vault_token, vault_duration = authenticate_userpass()\n elif auth_type == 'LDAP':\n vault_token, vault_duration = authenticate_ldap()\n elif auth_type == 'CERT':\n vault_token, vault_duration = authenticate_certificate()\n elif auth_type == 'GCP':\n vault_token, vault_duration = authenticate_gcp()\n elif auth_type == 'APPROLE':\n vault_token, vault_duration = authenticate_approle()\n else:\n current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')\n raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')\n if vault_duration is not None:\n vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))\n \n current_app.logger.info('*********** TOKEN: ' + vault_token) \n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to Vault server.')\n raise ConnError\n\n return vault_token", "def load_bearer_token():\n\n # To set your environment variables in your terminal execute a command like\n # the one that you see below.\n\n # Example:\n # export 'TWITTER_BEARER_TOKEN'='<your_twitter_bearer_token>'\n\n # Do this for all of your tokens, and then load them with the commands\n # below, matching the string in the .get(\"string\") to the name you've\n # chosen to the left of the equal sign above.\n\n # Set Twitter tokens/keys.\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n bearer_token = os.environ.get(\"TWITTER_BEARER_TOKEN\")\n\n return bearer_token", "def __get_token(self):\n logger.debug(\"Try to get new token from server\")\n self.__refresh_token += 1\n data = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.__user_id,\n \"client_secret\": self.__secret,\n }\n response = self.__send_request(\"oauth/access_token\", \"POST\", data, False)\n if response.status_code != 200:\n return False\n self.__refresh_token = 0\n self.__token = response.json()['access_token']\n logger.debug(\"Got: '{}'\".format(self.__token, ))\n if self.__storage_type == \"MEMCACHED\":\n logger.debug(\"Try to set token '{}' into 'MEMCACHED'\".format(self.__token, ))\n mc = memcache.Client([self.__memcached_host])\n mc.set(self.__token_hash_name, self.__token, self.MEMCACHED_VALUE_TIMEOUT)\n else:\n filepath = \"{}{}\".format(self.__token_file_path, self.__token_hash_name)\n try:\n if not os.path.isdir(self.__token_file_path):\n os.makedirs(self.__token_file_path, exist_ok=True)\n\n with open(filepath, 'w') as f:\n f.write(self.__token)\n logger.debug(\"Set token '{}' into 'FILE' '{}'\".format(self.__token, filepath))\n except IOError:\n logger.warning(\"Can't create 'FILE' to store security token. Please, check your settings.\")\n if self.__token:\n return True\n return False", "def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token", "def get_jwt_value(self, request):\n auth = get_authorization_header(request).split()\n auth_header_prefix = settings.JWT_AUTH_HEADER_PREFIX.lower()\n if not auth:\n if settings.JWT_AUTH_COOKIE:\n return request.COOKIES.get(settings.JWT_AUTH_COOKIE)\n return None\n # compare JWT_AUTH_HEADER_PREFIX and extractd token refiex \"should be like WWW-athenticate\"\n if smart_text(auth[0].lower()) != auth_header_prefix:\n return None\n if len(auth) == 1:\n msg = _('Invalid Authorization header. No credentials provided.')\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _('Invalid Authorization header. Credentials string '\n 'should not contain spaces.')\n raise exceptions.AuthenticationFailed(msg)\n #the auth list should have only 2 element which are:\n # JWT_AUTH_HEADER_PREFIX and the token\n #return the actual token inside the header\n return auth[1]", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def _resolve_secret_token(name, key, model_context):\n global _secret_token_map\n\n if _secret_token_map is None:\n _init_secret_token_map(model_context)\n\n secret_token = name + ':' + key\n return dictionary_utils.get_element(_secret_token_map, secret_token)", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def get_token(client_id, client_secret, username, password):\r\n try:\r\n if oauth2db.check_client(client_id, client_secret):\r\n if oauth2db.check_user(username, password):\r\n token, refresh = oauth2db.generate_token(client_id, username)\r\n res = { \"token\": token }\r\n except:\r\n res = { \"error\": \"\" }\r\n \r\n if 'token' in res:\r\n return res['token']\r\n else:\r\n return None", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def update_token(token):\n try:\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n payload['exp'] = datetime.utcnow() + timedelta(days=100)\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n raise Exception(str(e))", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def get_client_secret(filename):\n with open(filename) as file:\n json_file = json.load(file)\n\n cyphertext = json_file['CiphertextBlob']\n blob = base64.b64decode(cyphertext)\n client = boto3.client('kms')\n secret = client.decrypt(CiphertextBlob=blob)['Plaintext']\n s = secret.decode('ascii')\n return json.loads(s)", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': 'thundoss@gmail.com', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def get_token(self, token_file: str = 'token') -> str:\n token = ''\n with open(self.path(token_file), 'r') as file:\n tokens = file.read().split('\\n')\n # Loop over all tokens in the file\n for t in tokens:\n # Check if name of token matches name of bot\n if self.name in t:\n token = t.split(' ')[1]\n return token", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def getToken():\n token = getenv(TOKEN_NAME)\n if token == None:\n raise SystemExit('No token found. Use env variable %s' % TOKEN_NAME)\n return token", "def read_key():\n path = os.path.join(os.path.dirname(__file__), 'data')\n f = open(os.path.join(path, 'credential.txt'), 'r')\n key = f.read()\n f.close()\n return key" ]
[ "0.77592856", "0.7595998", "0.75481737", "0.7526818", "0.69495", "0.69054925", "0.6903875", "0.6800849", "0.6775595", "0.6755065", "0.6746886", "0.6740278", "0.67240804", "0.6659499", "0.66390276", "0.6621728", "0.6608063", "0.659404", "0.6589575", "0.65768105", "0.6572895", "0.65722185", "0.656959", "0.6553014", "0.65402555", "0.65393645", "0.653337", "0.6527102", "0.6505145", "0.6501232", "0.6480563", "0.6471749", "0.6466603", "0.6466603", "0.6466603", "0.6466225", "0.64573157", "0.6438758", "0.64102775", "0.63986564", "0.63893586", "0.63849324", "0.6384232", "0.63571376", "0.63533217", "0.6338415", "0.63243496", "0.632367", "0.63024735", "0.63024735", "0.6293369", "0.62878406", "0.6275873", "0.62707084", "0.62692493", "0.62691396", "0.6268074", "0.6261919", "0.6252294", "0.6250575", "0.6248363", "0.62153786", "0.62148947", "0.62004", "0.61973023", "0.61910474", "0.61905754", "0.6188943", "0.6183778", "0.6152294", "0.6151645", "0.6138251", "0.61368513", "0.6131923", "0.6130286", "0.61224735", "0.61160356", "0.61037946", "0.61004007", "0.6094045", "0.608354", "0.608354", "0.6083001", "0.60789907", "0.60736537", "0.60724705", "0.60680926", "0.60656035", "0.6058533", "0.60572034", "0.6055855", "0.60535", "0.60465914", "0.60356766", "0.6024962", "0.60153186", "0.60103375", "0.60015154", "0.59935886", "0.5988047" ]
0.8024519
0
return jwt header from current installation
def get_jwt_header(self): if self.jwt_header: return self.jwt_header self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile)) return self.jwt_header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def get_jwt(self, request):\n auth_header_prefix = self.auth_header_prefix\n try:\n authorization = request.authorization\n except ValueError:\n return None\n if authorization is None:\n return None\n authtype, token = authorization\n if authtype.lower() != auth_header_prefix.lower():\n return None\n return token", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def jwt_header(cert):\n header = {}\n header[\"alg\"] = \"RS256\"\n header[\"x5c\"] = cert\n return header", "def get_jwt_value(self, request):\n auth = get_authorization_header(request).split()\n auth_header_prefix = settings.JWT_AUTH_HEADER_PREFIX.lower()\n if not auth:\n if settings.JWT_AUTH_COOKIE:\n return request.COOKIES.get(settings.JWT_AUTH_COOKIE)\n return None\n # compare JWT_AUTH_HEADER_PREFIX and extractd token refiex \"should be like WWW-athenticate\"\n if smart_text(auth[0].lower()) != auth_header_prefix:\n return None\n if len(auth) == 1:\n msg = _('Invalid Authorization header. No credentials provided.')\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _('Invalid Authorization header. Credentials string '\n 'should not contain spaces.')\n raise exceptions.AuthenticationFailed(msg)\n #the auth list should have only 2 element which are:\n # JWT_AUTH_HEADER_PREFIX and the token\n #return the actual token inside the header\n return auth[1]", "def get_headers(self):\n headers = self.headers\n\n if self.jwt_secret:\n current = int(time.time())\n params = {'exp': current + self.jwt_token_length}\n token = jwt.encode(params, self.jwt_secret, algorithm='HS256')\n headers = {\n **headers,\n 'Authorization': 'Bearer {}'.format(token.decode('utf-8')),\n }\n\n return headers", "def get_headers(self):\n return {\n 'Authorization': 'JWT {}'.format(self.token)\n }", "def _get_request_header() -> Dict:\n metas, envs = get_full_version()\n\n header = {\n **{f'jinameta-{k}': str(v) for k, v in metas.items()},\n **envs,\n }\n return header", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def auth_header(self):\n return self._auth_header", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def get_authenticate_header(self):\n pass", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "async def jwt_header(\n Authorization: Optional[str] = Header(None),\n) -> Optional[RawAuth]:\n if not Authorization:\n return None\n\n parts = Authorization.split()\n if parts[0].lower() != \"bearer\":\n log.debug(\"Authorization header Failed, lacked bearer\")\n return None\n if len(parts) != 2:\n log.debug(\"Authorization header Failed, not 2 parts\")\n return None\n else:\n log.debug(\"Got header:Authorization with a JWT\")\n log.debug(\"jwt_header(): %s\", Authorization)\n return RawAuth(\n rawjwt=parts[1],\n rawheader=Authorization,\n via=\"header\",\n key=\"Authorization\",\n )", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def extract_bearer_token(request):\n return request.headers['Authorization'].split(\" \")[-1].strip()", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def get_authorization_header(client, user):\n # obtain authorization token\n response = client.post(\n reverse('token-obtain'),\n data={'username': user.username, 'password': user.raw_password},\n content_type='application/json'\n )\n token = response.json()['access']\n return {'HTTP_AUTHORIZATION': f'Bearer {token}'}", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def get_token(self):\n self.register_user(self.user_data)\n result = self.login_user(self.login_data)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def jwt_token_verify(auth_header):\n # Hug do not extract Bearer prefix\n auth_token, payload = parse_header(auth_header)\n return payload", "def getHeader():\n return _HEADER", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def get_starter_header_info(authpt, hks, session: Session):\n resp = session.post(const.POST_STARTER_HEADER_INFO.format(authpt, hks))\n json_resp = json.loads(resp.content)\n\n return json_resp", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_kid_from_jwe_header(token: str) -> Optional[str]:\n import base64\n import json\n\n header = token.split(\".\")[0]\n deserialized_header = base64.urlsafe_b64decode(header + \"===\")\n jose_header = json.loads(deserialized_header)\n\n return jose_header.get(\"kid\")", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}", "def get_headers():\n return {'Authorization': f'token {settings.GITHUB_AUTH_TOKEN}'}", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def __header_base64(self):\n header_base64 = base64.b64encode(f'{self.client_id}:{self.client_secret}'.encode('ascii'))\n header_base64 = str(header_base64).split(\"'\")[1]\n return {'Authorization': f'Basic {header_base64}'}", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def get_new_token(self):\n self.register_user(self.user_data2)\n result = self.login_user(self.login_data2)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def asterisk_in_header():\n auth_token = get_auth_token()\n\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"*/*\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id, token=auth_token)", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def get_jwt() -> str:\n LOGGER.debug(\"Retrieving JWT...\")\n\n args = {\n \"url\": \"{0}/auth\".format(CONFIG['dojot']['url']),\n \"data\": json.dumps({\n \"username\": CONFIG['dojot']['user'],\n \"passwd\": CONFIG['dojot']['passwd'],\n }),\n \"headers\": {\n \"Content-Type\": \"application/json\"\n },\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\".. retrieved JWT\")\n return res[\"jwt\"]", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def get_token_auth_header(params):\n auth = get_token(params)\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must start with Bearer\"}, 401)\n\n if len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Token not found\"}, 401)\n\n if len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must be Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def _retrieve_token(request):\n auth_string = request.headers.get('Authorization')\n try:\n match = re.match(\"Bearer (.+)\", auth_string)\n except TypeError:\n match = None\n if match:\n return match.groups()[0]", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def Header(self):\n return chr(keyczar.VERSION) + util.Decode(self.hash)", "def get_jwt_token_from_secret_file(self, filename):\n # pylint: disable=consider-iterating-dictionary\n if filename in self.jwt_tokens.keys():\n # token for that file was checked already.\n return self.jwt_tokens[filename]\n\n cmd = [\n self.cfg.bin_dir / \"arangodb\",\n \"auth\",\n \"header\",\n \"--auth.jwt-secret\",\n str(filename),\n ]\n print(cmd)\n jwt_proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n logging.info(\"JWT starter has PID:\" + str(jwt_proc.pid))\n\n (header, err) = jwt_proc.communicate()\n jwt_proc.wait()\n if len(str(err)) > 3:\n raise Exception(\"error invoking the starter \" \"to generate the jwt header token! \" + str(err))\n if len(str(header).split(\" \")) != 3:\n raise Exception(\"failed to parse the output\" \" of the header command: \" + str(header))\n\n self.jwt_tokens[filename] = str(header).split(\" \")[2].split(\"\\\\\")[0]\n return self.jwt_tokens[filename]", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def _headers(helper):\n return {\n 'Authorization': 'Splunk {0}'.format(\n helper.context_meta['session_key'])}", "def get_headers():\n headers = {\n \"Authorization\": \"Token {}\".format(get_token()),\n }\n\n return headers", "def headers():\n return {\n 'user-agent': 'integration-tester',\n 'content-type': 'application/json',\n }", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def get_headers():\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'cfg', 'headers.json'))\n return open_json_file(file_path)", "def Header(self):\n return (bytes(bytearray([constants.VERSION])) +\n util.Base64WSDecode(self.hash_id))", "def get_token(public_key,delta):\n return jwt.encode(\n {\n 'public_key':public_key,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=delta)\n },\n current_app.config['SECRET_KEY'],\n algorithm=\"HS256\"\n )", "def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers", "def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data", "def get_auth_headers(key):\n return {\n 'Content-Type': 'Application/JSON',\n 'Authorization': key\n }", "def __getCopyrightHeader():\n\n # Get framework version\n file = open('package.json', 'r')\n version = json.loads(file.read())['version']\n file.close()\n\n # Get header template\n file = open('build/LICENSE.HEADER', 'r')\n header = file.read()\n file.close\n now = datetime.datetime.now()\n header = header.replace('$VERSION', version).replace('$YEAR', str(now.year)).replace('$DATE', now.ctime())\n\n return header", "def get_header(self):\n return self._header", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def authorization(self):\n token = self.create_auth_token(\n self.api_key.user, self.api_key.key, self.api_key.secret\n )\n return f'JWT {token}'", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def default_login_auth_header(self):\n return self.get_auth_header(self.default_login['login'], self.default_login['password'])", "def auth_headers(self, path, payload=\"\"):\n rand = hexlify(Random.new().read(16))\n auth = self.souma.sign(\"\".join([self.souma.id, rand, path, payload]))\n return [(\"Glia-Rand\", rand), (\"Glia-Auth\", auth), (\"Glia-Souma\", self.souma.id)]", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def get_custom_jwt(user, device):\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_otp_payload(user, device)\n return jwt_encode_handler(payload)", "def __generate_msl_header(self, is_handshake=False, is_key_request=False, compressionalgo='GZIP', encrypt=True, esn=None):\n global esn_manifest\n self.current_message_id = self.rndm.randint(0, pow(2, 52))\n header_data = {'sender':esn_manifest, \n 'handshake':is_handshake, \n 'nonreplayable':False, \n 'capabilities':{'languages':[\n 'en-US'], \n 'compressionalgos':[], 'encoderformats':[\n 'JSON']}, \n 'recipient':'Netflix', \n 'renewable':True, \n 'messageid':self.current_message_id, \n 'timestamp':time.time()}\n if compressionalgo is not '':\n header_data['capabilities']['compressionalgos'].append(compressionalgo)\n else:\n if is_key_request:\n public_key = base64.standard_b64encode(self.rsa_key.publickey().exportKey(format='DER')).decode('utf-8')\n header_data['keyrequestdata'] = [\n {'scheme':'ASYMMETRIC_WRAPPED', \n 'keydata':{'publickey':public_key, \n 'mechanism':'JWK_RSA', \n 'keypairid':'superKeyPair'}}]\n else:\n if 'usertoken' in self.tokens:\n pass\n else:\n account = account_info\n header_data['userauthdata'] = {'scheme':'EMAIL_PASSWORD', \n 'authdata':{'email':account['email'], \n 'password':account['password']}}\n return json.dumps(header_data)", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def build_jwt_headers(self, user):\n token = create_jwt_for_user(user)\n headers = {'HTTP_AUTHORIZATION': 'JWT ' + token}\n return headers", "def build_jwt_headers(self, user):\n token = create_jwt_for_user(user)\n headers = {'HTTP_AUTHORIZATION': 'JWT ' + token}\n return headers", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def get_header( self ):\n\t\tkey = self.key\n\t\tvalue = self.value\n\t\tpath = self.path\n\t\texpires = self.expires.strftime( \"%a, %d-%m-%y %H:%M:%S GMT\" )\n\t\treturn ( \"Set-Cookie\", \"%(key)s=%(value)s; Path=%(path)s; Expires=%(expires)s;\" % locals() )", "def get_raw_jwt(self) -> Optional[Dict[str,Union[str,int,bool]]]:\n if self._TOKEN:\n return self._verified_token(encoded_token=self._TOKEN)\n return None", "def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header", "def _get_token(self) -> str:\n if IS_SUPERVISOR:\n # On supervisor installs the token is provided by a environment variable\n return os.environ[\"HASSIO_TOKEN\"]\n return self._token", "def _headers(self):\n\n auth_token = SendbeeAuth(self.client.api_secret).get_auth_token()\n headers = {\n 'X-Auth-Token': auth_token,\n 'X-Api-Key': self.client.api_key,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Sendbee Python API Client'\n }\n self.debug.ok('headers', headers)\n\n return headers", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def get_auth(info) -> Tuple[Optional[str], Optional[str]]:\n auth = info.context[\"request\"].headers.get(\"Authorization\")\n if not auth:\n return None, None\n return auth.replace(\"Bearer \", \"\"), None", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def get_headers(token):\n return {\n \"Accept\": \"application/vnd.github+json\",\n \"Authorization\": f\"Bearer {token}\",\n \"X-GitHub-Api-Version\": \"2022-11-28\",\n }" ]
[ "0.7406444", "0.7215993", "0.7196089", "0.7186894", "0.7012713", "0.6979103", "0.6942568", "0.69397306", "0.69034165", "0.6893624", "0.6858153", "0.68385124", "0.6749719", "0.6743572", "0.6729134", "0.6724721", "0.6700563", "0.6690566", "0.6663717", "0.66617864", "0.663666", "0.6626147", "0.6617149", "0.6606582", "0.6585572", "0.65675414", "0.6566705", "0.6564615", "0.65532583", "0.65476197", "0.6528707", "0.6520214", "0.6515561", "0.6506507", "0.6463095", "0.64562744", "0.64542013", "0.6450903", "0.64483553", "0.64471865", "0.63974625", "0.6379737", "0.6377303", "0.6360824", "0.6354877", "0.6330175", "0.6320921", "0.631194", "0.62649834", "0.62647015", "0.6249979", "0.6248521", "0.6236706", "0.62262", "0.62141174", "0.62032545", "0.6191809", "0.61820406", "0.6181258", "0.6181005", "0.6176018", "0.6176018", "0.6168888", "0.61655277", "0.61562955", "0.614418", "0.6135391", "0.61342573", "0.61309075", "0.6127204", "0.6122994", "0.6121955", "0.61186296", "0.61044335", "0.6098426", "0.60930943", "0.6088533", "0.6054413", "0.6047844", "0.6045047", "0.6044874", "0.60398144", "0.603123", "0.6022819", "0.60227364", "0.6015733", "0.6015733", "0.6015733", "0.60091925", "0.60091925", "0.59995234", "0.59813887", "0.59805644", "0.5974093", "0.5970477", "0.5969978", "0.59656864", "0.5941753", "0.5912318", "0.5906393" ]
0.8041909
0
set the passvoid to the managed instance
def set_passvoid(self, passvoid, write_to_server=True): if write_to_server: print("Provisioning passvoid " + passvoid) self.arangosh.js_set_passvoid("root", passvoid) self.passvoidfile.write_text(passvoid, encoding="utf-8") self.passvoid = passvoid for i in self.all_instances: if i.is_frontend(): i.set_passvoid(passvoid) self.cfg.passvoid = passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instance(self, instance):\n self._instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def instance(self, instance):\n\n self._instance = instance", "def _attach_to_instance(self, instance):\n self._instance = instance", "def target_instance(self, target_instance):\n self._target_instance = target_instance", "def SetActiveObject(self):", "def set_state( self ):", "def post_save_access_attempt(self, instance, **kwargs):", "def vm(self, vm):\n\n self._vm = vm", "def set_passthrough(self, bPassthrough):\n\t\tcall_sdk_function('PrlVmDev_SetPassthrough', self.handle, bPassthrough)", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')", "def populate_instance(self, mapper, selectcontext, row, instance, **flags):\n instance.TEST = \"hello world\"\n return EXT_CONTINUE", "def passer(self, value=None):\n pass", "def post_execute(self):", "def on_assign(self):", "def put(self, **kwargs):\n logging.debug(\"In put() for FTDDeviceHAPairs class.\")\n # Attempting to \"Deploy\" during Device registration causes issues.\n self.fmc.autodeploy = False\n return super().put(**kwargs)", "def passive(self,target):\r\n target.temp[\"cannot_act\"][\"temp\"] = True", "def register_instance(self, instance):\n self.instance = instance", "def forward_pass(self):", "def __setstate__(self, state):\n return None", "def put(self):\n self._val = True", "def transfer(self):\n pass", "def __setstate__(self, state):\n\n self.set(DER = state)", "def set_turn_holder(active_entity: EntityID):\n store.turn_holder = active_entity", "def __setstate__(self, d):\n\t\tself.__dict__ = d", "def __setstate__(self, state):\n\n self.list = state", "def __setstate__(self, state):\n\n self.list = state", "def export_setInstanceUniqueID( self, instanceID, uniqueID ):\n return gVirtualMachineDB.setInstanceUniqueID( instanceID, uniqueID )", "def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance", "def post_init_callback(sender, **kwargs):\r\n instance = kwargs['instance']\r\n instance.orig_state = instance.state", "def _self(self, _self):\n\n self.__self = _self", "def _self(self, _self):\n\n self.__self = _self", "def __set__(self, obj, value):\r\n pass", "def put(self):\n pass", "def put(self):\n pass", "def SetVoid(self, *args):\n return _Bnd.Bnd_Box2d_SetVoid(self, *args)", "def transact(self):", "def transact(self):", "def _localSetState(self,pdict):\n pass", "def __call__(self):\n\t\treturn", "async def setheist(self, ctx):\r\n\r\n pass", "def set_admin_password(self, instance, new_pass):\n pass", "def unrescue(self, instance):\n pass", "def setIdentity(self) -> None:\n ...", "def SetVoid(self, *args):\n return _Bnd.Bnd_Box_SetVoid(self, *args)", "def _set_shell_obj(self, obj):\n self._shell_obj = weakref.ref(obj)", "def save(self):\n # type: () -> None\n setattr(self.fn, self.PARAM_NAME, self)", "def attach(self, obj):\n self.Object = obj.Object", "def populate_instance(self, mapper, selectcontext, row, instance, **flags):\n instance.TEST_2 = \"also hello world\"\n return EXT_CONTINUE", "def __call__( self ):\n pass", "def set_proxy(self):", "def set_state(self):\n self.able = not self.able\n self.save()", "def setValveState(*args):\n args[0].Controls.ValveState.valve_state = args[1]", "def attach(self, obj):\n return", "def __init__(self):\n self._context = {}", "def setMyCaptain(self, captainObject):\n self.myCaptain = captainObject\n self.captainID = captainObject.id\n captainObject.setMyShip(self.id)", "def _set_context(self, ctx):\n try:\n current_engine_name = self.parent.engine.name \n if sgtk.platform.current_engine(): \n sgtk.platform.current_engine().destroy()\n sgtk.platform.start_engine(current_engine_name, ctx.tank, ctx)\n except Exception, e:\n QtGui.QMessageBox.critical(self, \n \"Could not Switch!\", \n \"Could not change work area and start a new \" \n \"engine. This can be because the task doesn't \"\n \"have a step. Details: %s\" % e)\n return", "def act(self):\n pass", "def _localSetState(self,pdict):\n self.base = pdict.pop('base')", "def post(self, **kwargs):\n logging.debug(\"In post() for FTDDeviceHAPairs class.\")\n # Attempting to \"Deploy\" during Device registration causes issues.\n self.fmc.autodeploy = False\n return super().post(**kwargs)", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def set_persistent_value(self, value, *args, **kwargs):\n pass", "def activated(self):", "def __init__(self, session, vm):\n super().__init__(session)\n self.vm = vm", "def setup(self,context,result):\n pass", "def context(self, context):\n self._context = context", "def set(self, obj, value):\n pass", "def ct(self, ct):\n\n self._ct = ct", "def __set__(self, instance, value):\n instance._values[self.name] = self.process(value)", "def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()", "def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e", "def target(self, target) :\n\t\ttry :\n\t\t\tself._target = target\n\t\texcept Exception as e:\n\t\t\traise e", "def _set_attributes(self):", "def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0", "def setPoint(self,set_point):\n self.set_point = set_point\n self.Integrator=0\n self.Derivator=0", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def ion_instance(self, ion_instance):\n\n self._ion_instance = ion_instance", "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "def setPoint(self,set_point):\n\t\tself.set_point = set_point\n\t\tself.Integrator=0\n\t\tself.Derivator=0", "def __init__(self, context):\n self.__context = context", "def init(self):\n\n self.new_thing = True", "def __setstate__(self, state):\n self.__dict__.update(state)", "def take_action(self, *args, **kwargs):\r\n pass", "def update_proxy(self, instance, value):\n self.value = value", "def __init__(self):\n super(MemoryPersistence, self).__init__(descriptor)", "def put(self):\n return", "def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceTrait, self).save(*args, **kwargs)", "def setContext(self, context: Any, /) -> Any:\n ...", "def context(self, context):\n\n self._context = context", "def __init__(self):\r\n self.activation = Activation(u'signup')\r\n self.activated = False", "def __init__(self, context):\n self.__context = context", "def entity(self, entity):\n\n self._entity = entity", "def __setattr__(self, attr, value):\r\n return setattr(self.__instance, attr, value)", "def set_purged(*args):\n return _ida_frame.set_purged(*args)" ]
[ "0.6201083", "0.614531", "0.614531", "0.614531", "0.614531", "0.614531", "0.6030137", "0.5798542", "0.57683265", "0.5758265", "0.5750186", "0.57295257", "0.567436", "0.56572455", "0.56152976", "0.5603632", "0.5594542", "0.5575087", "0.55718654", "0.55426687", "0.54812384", "0.542333", "0.53874207", "0.536859", "0.5360508", "0.5359031", "0.5348714", "0.53438604", "0.5343683", "0.5327227", "0.5327227", "0.530936", "0.5309118", "0.5300345", "0.5287243", "0.5287243", "0.5281956", "0.52755773", "0.52755773", "0.52718776", "0.5268469", "0.5268469", "0.52679396", "0.52627844", "0.5255324", "0.5244376", "0.5239394", "0.5238087", "0.52327806", "0.5222695", "0.52179337", "0.52135986", "0.5213227", "0.5204164", "0.52031875", "0.5202255", "0.5199651", "0.5197583", "0.5196884", "0.5193052", "0.5192865", "0.5191949", "0.5189862", "0.5187213", "0.5177734", "0.5177734", "0.51770097", "0.5176424", "0.5164888", "0.5163372", "0.5160682", "0.51564664", "0.51458055", "0.51419216", "0.5139324", "0.51388055", "0.51388055", "0.51324964", "0.512965", "0.512965", "0.51253325", "0.512346", "0.51212156", "0.51212156", "0.51212156", "0.511772", "0.5116581", "0.5112656", "0.51114184", "0.5110679", "0.51090616", "0.5101922", "0.5086459", "0.5085889", "0.5077499", "0.50744057", "0.5073227", "0.5068926", "0.5067662", "0.5063903" ]
0.6298621
0
get the passvoid to the managed instance
def get_passvoid(self): return self.passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def object(self):", "def retrieve(self):\n pass", "def context(self) -> Any:\n ...", "def context(self) -> CONTEXT:", "def get_transfer(self):\n return self._transfer", "def _get_state(self):", "def get(self):\n return", "def get(self):\n return", "def forward_pass(self):", "def get_object_to_run(self):", "def get():", "def get():", "def get_result(self, state):\n pass", "def _get_instance(self):", "def _get_instance(self):", "def get(self):\n return None", "def __call__(self):\n return self.referee()", "def post_execute(self):", "def target(self):", "def __get__(self, instance, owner):\n return self.xyz", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def _get(self):\n return None", "def get(self):\r\n raise NotImplementedError", "def call(self):", "def transfer(self):\n pass", "def result(self):", "def result(self):", "def __getstate__(self):\n\n return self.get_DER()", "def get_value(self):", "def remote_getPhase(phase):", "def __call__(self):\n\t\treturn", "def remote_getPhases():", "def cmd(self):", "def get(self):\n raise NotImplementedError", "def handle(self):", "def get_data(self):\r\n pass", "def vault(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def get(self):\n return self.args, self.kwargs", "def get_result(self) -> Any:\n ...", "def get(self):\n raise NotImplementedError", "def variable(self):", "def obtain_action(self):\r\n\t\treturn", "def value(self):", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_turn(self):\n raise NotImplementedError", "def __getstate__(self):\n return None", "def get_data(self):", "def __getstate__(self):\n return self.__dict__", "def __getstate__(self):\n return self._", "def get(self):\n raise NotImplementedError()", "def proxy_result(self):\n return None", "def get_details(self):", "def data(self):", "def get_params(self):", "def get(self):\r\n \r\n return self.__dict__", "def get(self):\n return self._params", "def __deref__(self):\n return _spacegrant_swig.binary_sink_sptr___deref__(self)", "def __deref__(self):\n return _spacegrant_swig.message_debug_sptr___deref__(self)", "def data(self):\n pass", "def data(self):\n pass", "def parameters(self):", "def get(self, obj):", "def get_params_snapshot(self):\n ...", "def obtem_bag_pass(self):\n\n return self.bag_pass", "def getId(self):", "def payload(self):", "def get_reference(self):\t\t\n\t\treturn self._reference", "def _get_activate(self):\n return self.__activate", "def get_state(self):\n pass", "def get_data(self):\n pass", "def get_data(self):\n pass", "def params(self):\n pass", "def __call__(self):\n pass", "def __call__(self):\n pass", "def __deref__(self):\n return _spacegrant_swig.udp_debug_sptr___deref__(self)", "def __call__(self) -> dict:\n\t\tpass", "def get(self) -> tuple:", "def on_get(self, *args):\n\t\tpass", "def get_raw_data(self):\r\n \r\n return(self.return_data)", "def punkte(self):\n return self.args", "def DM(self):", "def return_state(self):\n\t\treturn self.state", "def user(self):", "def __getstate__(self):\n\t\treturn self", "def __call__(self):\n return self.value", "def _get_result(self):\r\n \r\n return self._result" ]
[ "0.59444445", "0.59444445", "0.59444445", "0.59444445", "0.58766246", "0.5858545", "0.5811761", "0.5790257", "0.57571363", "0.57146895", "0.5700108", "0.5700108", "0.5633125", "0.5611686", "0.55938387", "0.55938387", "0.5585385", "0.55678827", "0.55678827", "0.55592895", "0.55553067", "0.5553812", "0.5548197", "0.55450654", "0.5542865", "0.5542865", "0.5534726", "0.55094063", "0.54976785", "0.54966134", "0.54824305", "0.54824305", "0.54790235", "0.5477795", "0.54741406", "0.54686743", "0.54681015", "0.5462113", "0.54420096", "0.54412687", "0.5436905", "0.5423831", "0.5420411", "0.5420411", "0.5420411", "0.5420411", "0.54165095", "0.5415131", "0.5407695", "0.5393645", "0.53895557", "0.538036", "0.53617895", "0.53617895", "0.53617895", "0.53617895", "0.53617895", "0.53617895", "0.53579324", "0.5355185", "0.5348824", "0.534168", "0.53221065", "0.5314761", "0.53018236", "0.5293583", "0.52914774", "0.52855474", "0.5280392", "0.52792555", "0.52782506", "0.5274406", "0.52742463", "0.52742463", "0.5269591", "0.52607554", "0.5259755", "0.52271765", "0.5224259", "0.52225673", "0.52159214", "0.52159005", "0.5214783", "0.52117646", "0.52117646", "0.52014315", "0.5193389", "0.5193389", "0.5192361", "0.5191811", "0.51897764", "0.5187772", "0.517396", "0.51717067", "0.517126", "0.51704055", "0.51702905", "0.5167119", "0.51659966", "0.5164845" ]
0.6621574
0
send an http request to the instance
def send_request(self, instance_type, verb_method, url, data=None, headers=None, timeout=None): if headers is None: request_headers = {} else: request_headers = dict(headers) http_client.HTTPConnection.debuglevel = 1 results = [] for instance in self.all_instances: if instance.instance_type == instance_type: if instance.detect_gone(): print("Instance to send request to already gone: " + repr(instance)) else: request_headers["Authorization"] = "Bearer " + str(self.get_jwt_header()) base_url = instance.get_public_plain_url() full_url = self.get_http_protocol() + "://" + base_url + url attach_http_request_to_report(verb_method.__name__, full_url, request_headers, data) reply = verb_method( full_url, data=data, headers=request_headers, allow_redirects=False, timeout=timeout, verify=False, ) attach_http_response_to_report(reply) results.append(reply) http_client.HTTPConnection.debuglevel = 0 return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request(self, url, *args, **kwargs):\n raise NotImplementedError", "def send(self, request: Request, **requests_kwargs) -> Response:", "def call(self):\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_response(status_code, response)", "def send(self, request : str):\n pass", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def get(self):\n self.post()", "def send(self):\n url = \"{}:{}\".format(self.url, self.port)\n headers = dict(self.request.get_headers())\n body = self.request.get_body()\n self.response = requests.post(url, data=body, headers=headers)", "def http_request(self) -> 'outputs.HttpRequestResponse':\n return pulumi.get(self, \"http_request\")", "def _send_http_request(self, resource, method, data=None, params=None, headers=None):\n\n url = '/'.join((self.https_url, resource))\n\n response = self._session.request(\n url=url,\n method=method,\n data=data,\n params=params,\n headers=headers,\n proxies=self._proxies)\n response.raise_for_status()\n\n return response", "def get(self):\n self.post()", "def get(self):\n self.post()", "def _send_request(self, method='post', headers=None, json=None):\n response = getattr(requests, method)(self.url, headers=headers, json=json)\n return response", "def call(self):\n # if this is a POST request, process data\n if self.data:\n post_json = json.dumps(self.data)\n values = {'json': post_json, 'apikey': API_KEY}\n post = urllib.parse.urlencode(values)\n\n else:\n post = None\n\n req = urllib.request.Request(self.url, post)\n\n try:\n self.response = urllib.request.urlopen(req, timeout=self.timeout)\n\n except (URLError, HTTPError, timeout) as error:\n self.response = error", "def send(self, url, data=None):\n if data:\n info = {\n \"id_string\": data.xform.id_string,\n \"uuid\": data.uuid,\n }\n valid_url = url % info\n requests.get(valid_url)", "def _send_request(self, http_request, **kwargs):\n # type: (HttpRequest, Any) -> HttpResponse\n http_request.url = self._client.format_url(http_request.url)\n stream = kwargs.pop(\"stream\", True)\n pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)\n return pipeline_response.http_response", "def _request(self, method, url, payload=None, **params):\n kwargs = dict(params=params)\n kwargs[\"timeout\"] = self._timeout\n if not url.startswith('http'):\n url = self.prefix + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n if payload:\n kwargs[\"data\"] = json.dumps(payload)\n gs = self._gpool.spawn if self._gpool else gevent.spawn\n r = gs(self.session.request, method, url, headers=headers, **kwargs)\n r.fetch = partial(self.join, r)\n update_wrapper(r.fetch, self.join)\n #gevent.sleep(0.05)\n return r", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, method, url, params=None, data=None):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "async def __call__(self, send):\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.headers.items(),\n }\n )\n\n await send({\"type\": \"http.response.body\", \"body\": self.content})", "def request(self, method: str, url: str, **kwargs) -> requests.Response:\n url = parse.urljoin(self.http_address, url)\n return requests.request(method, url, **kwargs)", "def do_POST(self):\r\n self.do_GET()", "def http_request(self, path=\"/\", method=\"GET\", host=None, port=None, json=False, data=None):\n\n host = host or '127.0.0.1'\n port = port or 8080\n url = get_url(host=host, port=port, path=path)\n\n return self.http_session.request(method, url, json=json, data=data)", "def request(self, method, url, *args, **kwargs):\n full_url = urljoin(self.base_url, url)\n if 'data' in kwargs:\n kwargs['data'] = self._encode_data(kwargs['data'])\n return super(Client, self).request(method, full_url, *args, **kwargs)", "def http_request(self, method, path, data=None, params=None):\n\n s = Session()\n url = urljoin(self.BASE_URL, path)\n full_url = url\n try:\n full_url = full_url + \"?\" + urlencode(params)\n except:\n pass\n\n headers = self.request_headers(method, full_url)\n\n req = Request(\n method,\n url,\n headers=headers,\n data=data,\n params=params\n )\n prepped = req.prepare()\n resp = s.send(prepped, timeout=self.timeout)\n if resp.status_code == 429:\n raise errors.APIRateLimitError(\"Threat Stack API rate limit exceeded\")\n else:\n return self.handle_response(resp)", "def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):\n host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)\n\n url = 'http://{}:{}{}'.format(host, port, params.pop('path'))\n\n http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']\n kwargs = {k: params[k] for k in http_keys if k in params}\n\n query_params = params.pop('params', {})\n\n if app is not None:\n query_params['app'] = app\n\n query_params['version'] = version\n query_params['service'] = service\n\n response = yield from aiohttp.request(method, url, params=query_params, **kwargs)\n return response", "def request(self, *args, **kwargs):\n try:\n return self._http.request(*args, timeout=TIMEOUT, **kwargs)\n except Exception as exc:\n raise RequestException(exc, args, kwargs)", "def make_request(self, url, action, data='', status_code='', parser=None):\n self._url = self.get_api_path(url)\n headers = {\n 'Content-Type': \"application/json\",\n 'Token': self.token,\n\n }\n kwargs = {}\n if headers:\n kwargs.update(headers=headers)\n if data:\n kwargs.update(data=json.dumps(data))\n\n return getattr(self.http, action.lower())(self._url, **kwargs)", "def start(self):\n self.get(self.url)", "async def request(self) -> Any:\n raise NotImplementedError()", "def request(self, method, url, payload={}):\n response = self._make_request(method, url, payload)\n\n return response", "def send_request(url, method='GET', data=None, headers=None):\n assert url and method\n assert method in ['GET', 'PUT', 'DELETE', 'POST']\n method = getattr(requests, method.lower())\n response = method(url=url, data=data, headers=headers)\n return response", "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "def request_with_client_http_session(instance, method, url, **kwargs):\n return instance.conv.entity.http_request(url, method)", "def do_GET(self):\r\n self._send_handler_response('GET')", "def send(self, params=None):\n\t\tif params:\n\t\t\t# Update URL parameters (optional)\n\t\t\tself.params.update(params)\n\t\tLOGGER.debug(\"API %s request to %s with %s\", self.method_override, self.url, self.json or self.data)\n\t\t# Get a prepared request\n\t\trequest = self.prepare()\n\t\t# Take environment variables into account (especially for proxies...)\n\t\tsettings = self.api.merge_environment_settings(request.url, {}, None, None, None)\n\t\tr = self.api.send(request, **settings)\n\t\treturn self.api.create_response(r)", "def _request(http, project, method, data, base_url, client_info):\n user_agent = client_info.to_user_agent()\n headers = {\n \"Content-Type\": \"application/x-protobuf\",\n \"User-Agent\": user_agent,\n connection_module.CLIENT_INFO_HEADER: user_agent,\n }\n api_url = build_api_url(project, method, base_url)\n\n response = http.request(url=api_url, method=\"POST\", headers=headers, data=data)\n\n if response.status_code != 200:\n error_status = status_pb2.Status.FromString(response.content)\n raise exceptions.from_http_status(\n response.status_code, error_status.message, errors=[error_status]\n )\n\n return response.content", "def request(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def make_HTTP_request(self, method, url, body, headers, callback=None):\r\n self.push_HTTP_request(method, url, body, headers, callback)\r\n self.pop_response()", "def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))", "async def request(self, method, **kwargs):\n\n return await self.backend.execute_request(\n method,\n kwargs,\n )", "def send_api_request(self, url, **kwargs):\n\n params = self._params.copy()\n dct = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}\n params.update(dct)\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n try:\n error = res.json()['error']\n except ValueError:\n error = None\n raise SwrveApiException(error, res.status_code, url, params)\n\n return res.json()", "def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)", "def send(self) -> None:\n\n payload = self.get_payload()\n try:\n self.response = requests.get(url=FAST_API, params=payload)\n except requests.exceptions.ConnectionError:\n print(f\"requests.exceptions.ConnectionError! Trying again in 5 seconds...\")\n sleep(5)\n self.send()", "def _request(self, method, *args, **kwargs):\n if not \"headers\" in kwargs:\n kwargs[\"headers\"] = self._headers\n return self._session.request(method, self._url(*args), **kwargs)", "def get(self, *args, **kwargs):\n url = urljoin(self.instance(), args[0])\n return self._requests_call(util.requests_get, url, *args[1:], **kwargs)", "def request(self, *args, **kwargs):\n req = GoodreadsRequest(self, *args, **kwargs)\n return req.request()", "def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)", "def request(self, method: str, url: str, access_token: str, **kwargs: Any) -> 'NetworkResponse':\n raise NotImplementedError # pragma: no cover", "def _request(self, *args):\n raise NotImplementedError", "def send_request(self, options):\n options[\"user\"] = self.user\n if self.auth_token is not None :\n options[\"ltoken\"] = self.auth_token\n\n try :\n data = urllib.urlencode(options)\n req = urllib2.Request(self.review_url, data)\n self.logger.debug('Sending request to server \\\"%s\\\" with data: \\\"%s\\\"' % (req.get_full_url(), req.get_data()))\n return urllib2.urlopen(req)\n except Exception as e:\n self.logger.error(\"Could not establish connection with Klocwork Server for :\\n\" + str(e) + \"\\nPlease check <project_root>\\logs\\klocwork.log as well.\")\n return None", "def request(self, method, url, body, headers):\n\t\tif self.alt_ip:\n\t\t\theaders['X-alt-ip'] = int(self.alt_ip)\n\t\tif url.startswith(\"/\"):\n\t\t\t\turl = ''.join([ 'http://', self._final_destination, url ])\n\t\telif url.startswith(\"http://\") or url.startswith(\"https://\"):\n\t\t\t\tprint \"Request made with a url which includes a base host name %r\" % url\n\t\telse:\n\t\t\t\traise ValueError(\"HTTP request via proxy of %r is invalid\" % url)\n\t\treturn httplib.HTTPConnection.request( self, method, url, body, headers )", "def send(self):\n \n # Generate the URL to call\n url = self._url + self._generate_query_string()\n logger.info('Sending request: %s' % url)\n \n # Generate GET request\n req = urllib2.Request(url=url)\n \n if not self._service.debug:\n try:\n f = urllib2.urlopen(req)\n data = f.read()\n f.close()\n \n # Log raw response\n logger.info('Raw response: %s' % data)\n \n except Exception, err:\n logger.exception('Request failed.')\n data = None\n else:\n # Debug data\n data = 'OK\\r\\nMessageID=1234'\n \n return self.parse_response(data)", "def __call__(self, requestStr):\n return self.connection.Request(requestStr)", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def _request(self, method, url, params=None, data=None, request_type=PRIVATE, headers={}):\n self._is_valid_request_option(request_type=request_type)\n\n request_headers = copy.deepcopy(self.BASE_HEADERS)\n request_headers.update(headers)\n\n response = getattr(requests, method.lower())(\n url,\n headers=request_headers,\n params=params,\n data=data\n )\n\n return self._handle_response(response)", "def request(self, path, **kwargs):\n self.http_client.fetch(self.get_url(path), self.stop, **kwargs)\n return self.wait()", "async def _send(self, url, data):\n r = await self.session.post(url, json=data, headers=self.get_headers())\n\n if r.status < 200 or r.status >= 300:\n text = await r.text()\n logger.error(\n 'Error posting {} value of {} to {}: {} '.format(\n data['name'], data['value'], url, text\n )\n )\n\n r.release()", "def send_request(self, request):\n # Below line is a debug to show what the full request URL is. Useful in testing multitenancy API calls\n #print(\"KARTIK : CONN OBJECT : send_request called with URL: '\"+self._url_prefix + request.endpoint+\"'\")\n #POORVA: changed url-prefix because only admin has right to update spot-region in geo-fabric present in any non-mm tenant\n if '_tenant' in request.endpoint and '_fabric' in request.endpoint:\n find_url = self._url_prefix.find('/_tenant')\n find_url += 1\n url = self._url_prefix[0:find_url]\n final_url = url + request.endpoint\n else:\n final_url = self._url_prefix + request.endpoint\n\n return self._http_client.send_request(\n method=request.method,\n url=final_url,\n params=request.params,\n data=request.data,\n headers=request.headers,\n auth=self._auth,\n )", "def send_request(self, path, body=None, params=None):\n alist = None\n if params:\n pairs = ' '.join('(%s . %s)' % (lisp_string(k), lisp_string(v))\n for k, v in six.iteritems(params))\n alist = \"'(%s)\" % pairs\n\n uri = self.uri(path)\n\n return self.conn.evalInServer('''\n (prog1 (net.aserve.client:do-http-request \n {uri} \n :method {method}\n :query {query}\n :headers '((:authorization . \"test {key}\"))\n :content-type \"text/plain\"\n :content {body}))\n '''.format(uri=lisp_string(uri),\n query=alist or 'nil',\n key=self.key,\n method=':post' if body else ':get',\n body=lisp_string(body) if body else 'nil'))", "def send(self, url, data, headers):\n eventlet.spawn(self._send_payload, (url, data, headers))", "def _request(self, request_method, url, *args, **kwargs):\n\n full_url = self.get_full_url(url)\n\n self.logger.info('Calling %s url: %s', request_method, full_url)\n\n request_args = self.get_request_args(kwargs)\n\n request = NapRequest(request_method, full_url, *args, **request_args)\n\n for mw in self.model._meta['middleware']:\n request = mw.handle_request(request)\n\n resource_response = request.send()\n response = NapResponse(\n url=request.url,\n status_code=resource_response.status_code,\n headers=resource_response.headers,\n content=resource_response.content,\n request_method=request_method,\n )\n\n for mw in reversed(self.model._meta['middleware']):\n response = mw.handle_response(request, response)\n\n return response", "def request(self, url, data=None, params={}, files=None):\n params['token'] = self.token\n request = self.make_request(url, data=data, params=params, files=files)\n return request", "def send_request(self, function_name, body):\n pass", "def _send(self, endpoint, method, extra_headers=None, **kwargs):\n\n headers = self.headers\n if extra_headers:\n headers.update(extra_headers)\n if method == \"GET\":\n return requests.get(\n f\"{self.API_URL}{endpoint}\",\n headers=headers,\n **kwargs\n )\n elif method == \"POST\":\n return requests.post(\n f\"{self.API_URL}{endpoint}\",\n headers=headers,\n **kwargs\n )\n else:\n raise ValueError(f\"supported methods are GET,POST but given {method}\")", "def httpPost(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('POST', url, data, params, headers)", "def send_req(self):\n raise NotImplementedError", "def _send(self, method, path, data=None):\r\n url = \"%s/%s\" % (self.base_url, path)\r\n params = None\r\n if method == 'GET':\r\n params = data\r\n data = None\r\n resp = requests.request(method, url, data=data, params=params,\r\n auth=(self.user, self.pwd),\r\n verify=self.verify_cert, headers=self.headers)\r\n if not resp.ok:\r\n raise Device42HTTPError(\"HTTP %s (%s) Error %s: %s\\n request was %s\" %\r\n (method, path, resp.status_code, resp.text, data))\r\n retval = resp.json()\r\n return retval", "def _request(self, *args, **kwargs):\n raise NotImplementedError()", "def request(self, path, method, data=None, query=None):\n path = '/'.join([self.name, path])\n return self.index.request(path, method, data, query)", "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "def request(self, method, *path, **data):\n\t\theaders = data.pop('headers', {})\n\t\tversion = data.pop('version', None)\n\t\tjson = data.pop('json', True)\n\t\tpath = urljoin(*path)\n\t\treturn self._request(method, path, version, data, headers, json)", "def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def __call__(self, request):\n response = self.get_request(request)\n return response", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "def _send_in_request(self):\n try:\n req_params = urllib.urlencode(self._params)\n except Exception as ex:\n raise ProxyError('Error signing request string') \n \n try:\n self.logger.debug('Send api request to: %s' % self._api_url)\n self.logger.debug('Request params: %s' % req_params)\n self.logger.debug('Request timeout: %s' % self._timeout)\n if len(self._params) > 0:\n f = urllib2.urlopen(self._api_url, req_params, self._timeout)\n response = f.read()\n self.logger.debug('Response length: %s' % len(response))\n f.close() \n return response\n else:\n return \"{'command':'ping', 'message':'ok'}\" \n except (urllib2.URLError) as ex:\n self._error = json.loads(ex.fp.readline()).values()\n raise ProxyResponseError()\n except (IOError) as ex:\n raise ProxyError(ex)", "def _call(self, method, url, params):\n if not url.startswith('http'):\n url = self.root + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n r = self._session.request(method, url,\n headers=headers,\n proxies=self.proxies,\n params=params,\n timeout=self.requests_timeout)\n r.raise_for_status() # Check for error\n return r.json()", "def send_data(self, **kwargs):", "def do_GET(self):\n server_ip = Setup.parse_options()['ip_address']\n uri = \"http://\" + server_ip + self.path\n response = urllib.urlopen(uri)\n self.copyfile(response, self.wfile)\n headers = self.generate_header_dic(self.headers.headers)\n ip_address = self.client_address[0] # get client iP address\n if Setup.system_status != 'green':\n self.process_request(ip_address, headers, self.path)\n self.process_response(ip_address, response.headers)", "def request_externally(url):\n session = BQServer()\n #session = root\n session.authenticate_mex(identity.mex_authorization_token())\n session.root = request.host_url\n url = session.prepare_url(url)\n log.debug(\"begin routing externally: %s\" % url)\n try:\n resp = session.get(url, headers={'Content-Type':'text/xml'})\n except BQCommError as e:\n log.debug('%s' % str(e))\n return\n\n log.debug(\"end routing externally: status %s\" % resp.status_code)\n return resp", "def do_GET(self): # pylint:disable=invalid-name\n if not self.is_log_path_valid():\n self.report_404()\n return\n scheme = \"https\" if self.server.cert is not None else \"http\"\n resp = '<html>'\n resp += '<head>\\n'\n resp += ' <title>{0}</title>\\n'.format(self.app_name)\n resp += '</head>\\n'\n resp += '<body>\\n'\n resp += ' <center>\\n'\n resp += ' <h2>{0} is working via {1}</h2>\\n'.format(self.app_name,\n scheme.upper())\n resp += ' </center>\\n'\n resp += ' <p>Please point your APIC at:<br /><br />'\n ip_add = [(s.connect((self.client_address[0], 80)), s.getsockname()[0],\n s.close()) for s in [socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)]][0][1]\n resp += ' {0}://{1}:{2}{3}</p>'.format(scheme, ip_add,\n self.server.server_address[\n 1],\n self.path)\n resp += '</body>\\n'\n resp += '</html>'\n self.send_200_resp(resp, \"text/html\")", "def do_GET(self):\n #if self.path.startswith('/api/'):\n # f = self.send_response_headers('api call')\n #else:\n f=self.route()\n if f==False:\n f = self.send_head()\n if f:\n try:\n self.copyfile(f, self.wfile)\n finally:\n f.close()", "def _request(self, url: str) -> http.client.HTTPResponse:\n self.request = urllib.request.Request(\n url,\n headers={'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X)'})\n try:\n return urllib.request.urlopen(self.request, timeout=10)\n except Exception as e:\n # print(e)\n # exit(-1)\n print(e, url)\n raise e", "def _request(self, query):\n query_string = self._create_query_string(query)\n\n try:\n response = requests.get(query_string)\n except requests.exceptions.ConnectionError:\n raise EngineConnectionException(self.name, \"Unable to send request, check connectivity.\")\n\n if response.status_code != 200:\n raise EngineConnectionException(self.name, \"\", code=response.status_code)\n\n return self._parse_json_response(query, response)", "def Send(self, url, opname, pyobj, nsdict={}, soapaction=None, chain=None, \n **kw):\n url = url or self.url\n cookies = None\n if chain is not None:\n cookies = chain.flow.cookies\n \n d = {}\n d.update(self.nsdict)\n d.update(nsdict)\n \n if soapaction is not None:\n self.addHTTPHeader('SOAPAction', soapaction)\n \n chain = self.factory.newInstance()\n soapdata = chain.processRequest(pyobj, nsdict=nsdict, \n soapaction=soapaction, **kw)\n \n if self.trace:\n print >>self.trace, \"_\" * 33, time.ctime(time.time()), \"REQUEST:\"\n print >>self.trace, soapdata\n\n f = getPage(str(url), contextFactory=self.contextFactory, \n postdata=soapdata, agent=self.agent, \n method='POST', headers=self.getHTTPHeaders(), \n cookies=cookies)\n \n if isinstance(f, Failure):\n return f\n \n chain.flow = f\n self.chain = chain\n return chain", "def request(self, method, uri, params=None, data=None, headers=None, auth=None,\n timeout=None, allow_redirects=False):\n auth = auth or self.auth\n headers = headers or {}\n \n headers['User-Agent'] = 'twilio-python/{} (Python {})'.format(\n __version__,\n platform.python_version(),\n )\n headers['Accept-Charset'] = 'utf-8'\n \n if method == 'POST' and 'Content-Type' not in headers:\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n \n if 'Accept' not in headers:\n headers['Accept'] = 'application/json'\n \n return self.http_client.request(\n method,\n uri,\n params=params,\n data=data,\n headers=headers,\n auth=auth,\n timeout=timeout,\n allow_redirects=allow_redirects\n )", "def _get(self, url):\n return self._request(url)", "def _http(self):\n raise NotImplementedError(\"HTTP transport is not supported.\")", "def _request(self, *arg, **kwarg):\n return self.request_session.request(*arg, **kwarg)", "def request(self, method, url):\n\t\ttr = TwitterRequest( method.upper(), url )\n\t\treturn self.get_response( tr )", "def request(self, *args, **kwargs):\n response = super(DataGovSession, self).request(*args, **kwargs)\n\n return self._handle_response(response)", "def post(self, url, *args):\n\n req_method = type(self.client).__name__\n\n if not url.startswith(\"http\"):\n if not url.startswith(\"/\"):\n url = \"/%s\" % url\n url = \"%s%s\" % (self.base, url)\n\n if req_method == \"FlaskClient\":\n self.client.post(url, headers=self.headers, *args)\n\n else:\n self.client.post(url, headers=self.headers, *args)", "def httpapi_request(client, **params) -> 'Response':\n return requests.get(\n _HTTPAPI,\n params={\n 'client': client.name,\n 'clientver': client.version,\n 'protover': 1,\n **params\n })", "def http_request(method, url, params=None):\n if method.lower() not in _request_methods:\n raise NotImplementedError(\"HTTP request method not implemented\")\n\n\n return _request_methods[method.lower()](url, params)", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200" ]
[ "0.7022592", "0.69352275", "0.69292426", "0.6814427", "0.6778195", "0.6773092", "0.67646056", "0.672339", "0.6689945", "0.667453", "0.667453", "0.66193897", "0.65896803", "0.6585336", "0.65369105", "0.65295655", "0.6499505", "0.6499505", "0.6468787", "0.64431155", "0.642135", "0.64206266", "0.64202344", "0.6414662", "0.64079726", "0.64004123", "0.63971674", "0.6347673", "0.63142985", "0.63142824", "0.63127", "0.6309679", "0.62943083", "0.6282974", "0.62585235", "0.62222123", "0.62197256", "0.6206916", "0.62030894", "0.62015563", "0.6198095", "0.6193167", "0.617848", "0.6173274", "0.61699957", "0.61533046", "0.6141289", "0.6127576", "0.61227626", "0.6121213", "0.6113213", "0.6103597", "0.6089326", "0.6087833", "0.6087705", "0.60825163", "0.608088", "0.60760605", "0.6066021", "0.6064264", "0.60629845", "0.60573786", "0.60567695", "0.6053225", "0.6050907", "0.6038701", "0.6029793", "0.6025773", "0.6005501", "0.59976006", "0.5994276", "0.5994276", "0.5994276", "0.5985526", "0.5981815", "0.5970036", "0.59680754", "0.59662014", "0.59601307", "0.5927503", "0.59215635", "0.5918888", "0.5914812", "0.59069026", "0.5891514", "0.58808875", "0.58803195", "0.58797014", "0.587888", "0.58761925", "0.5867079", "0.58579546", "0.58572793", "0.5852542", "0.5849638", "0.5848271", "0.5846587", "0.5844076", "0.5840838", "0.5838843" ]
0.5976503
75
make all managed instances plus the starter itself crash.
def crash_instances(self): try: if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING: print("generating coredump for " + str(self.instance)) gcore = psutil.Popen(["gcore", str(self.instance.pid)], cwd=self.basedir) print("launched GCORE with PID:" + str(gcore.pid)) gcore.wait() self.kill_instance() else: print("NOT generating coredump for " + str(self.instance)) except psutil.NoSuchProcess: logging.info("instance already dead: " + str(self.instance)) for instance in self.all_instances: instance.crash_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def detect_fatal_errors(self):\n for instance in self.all_instances:\n instance.detect_fatal_errors()", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def cleanup_resources(self, restart=False):", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def check_vm_errors(st):\n\n global api, owned_instances\n owned_instances_changed = False\n\n logging.info(\"Check VMs in error state...\")\n\n # Get all instances in \"error\" state\n try:\n all_instances = api.get_all_instances()\n\n # Clean up list from nonexisting instances\n new_owned_instances = []\n for o in owned_instances:\n keep = False\n for a in all_instances:\n if o == a.id:\n keep = True\n break\n if keep:\n new_owned_instances.append(o)\n else:\n logging.debug(\"Unknown owned instance removed: %s\" % o)\n owned_instances_changed = True\n if owned_instances_changed:\n owned_instances = new_owned_instances\n\n # Only the ones in error state (generator)\n error_instances = ( x for x in all_instances if x.status(token_id=api.keystone.token_id) == 'error' and x.id in owned_instances )\n\n except Exception as e:\n logging.error(\"Can't get list of owned instances in error: %s\" % e)\n error_instances = []\n\n # Print them\n n_vms_to_restart = 0\n for ei in error_instances:\n\n # Operations to do if a VM is in error:\n # 1. Terminate it\n # 2. Remove it from the managed list\n # 3. Decrement VMs allegedly running\n # 3. Cancel event restoring VMs allegedly running\n # 4. Run new instances (ignoring errors)\n # 5. Increase VMs allegedly running\n\n # Terminate VM in error\n try:\n ei.terminate(token_id=api.keystone.token_id)\n logging.debug(\"Shutdown via API of %s in error state succeeded\" % ei.id)\n except Exception as e:\n logging.error(\"Shutdown via API failed for %s in error state: %s\" % (ei.id, e))\n continue\n\n # Remove from \"owned\" list\n owned_instances.remove(ei.id)\n owned_instances_changed = True\n\n # Change VMs allegedly running\n change_vms_allegedly_running(st, -1)\n\n # Remove event for the current instance\n st['event_queue'][:] = [ x for x in st['event_queue'] if x['action'] != 'change_vms_allegedly_running' or x['params'][1] != ei.id ]\n\n # Restart that number of VMs\n n_vms_to_restart = n_vms_to_restart + 1\n\n # Attempt to run replacement VMs (no retry in this case!)\n if n_vms_to_restart > 0:\n list_ok = scale_up( n_vms_to_restart, valid_hostnames=st['workers_status'].keys() )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n if len(list_ok) == n_vms_to_restart:\n logging.debug(\"Successfully requested all the new replacement VMs: %s\" % ','.join(list_ok))\n else:\n logging.debug(\"Cannot request all the replacement VMs: only %d/%d succeeded (%s)\" % (len(list_ok), n_vms_to_restart, ','.join(list_ok)))\n\n # Save to disk\n if owned_instances_changed:\n save_owned_instances()\n\n # Re-run this command in X seconds\n return {\n 'action': 'check_vm_errors',\n 'when': time.time() + cf['elastiq']['check_vms_in_error_every_s']\n }", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()", "def rescue(self, instance):\n pass", "def _cleanup_running_deleted_instances(self, context):\n action = CONF.running_deleted_instance_action\n\n if action == \"noop\":\n return\n\n # NOTE(sirp): admin contexts don't ordinarily return deleted records\n with utils.temporary_mutation(context, read_deleted=\"yes\"):\n for instance in self._running_deleted_instances(context):\n if action == \"log\":\n LOG.warning(_LW(\"Detected instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n\n elif action == 'shutdown':\n LOG.info(_LI(\"Powering off instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n try:\n try:\n # disable starting the instance\n self.driver.set_bootable(instance, False)\n except NotImplementedError:\n LOG.debug(\"set_bootable is not implemented \"\n \"for the current driver\")\n # and power it off\n self.driver.power_off(instance)\n except Exception:\n msg = _LW(\"Failed to power off instance\")\n LOG.warn(msg, instance=instance, exc_info=True)\n\n elif action == 'reap':\n LOG.info(_LI(\"Destroying instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid, use_slave=True)\n self.instance_events.clear_events_for_instance(instance)\n try:\n self._shutdown_instance(context, instance, bdms,\n notify=False)\n self._cleanup_volumes(context, instance.uuid, bdms)\n except Exception as e:\n LOG.warning(_LW(\"Periodic cleanup failed to delete \"\n \"instance: %s\"),\n e, instance=instance)\n else:\n raise Exception(_(\"Unrecognized value '%s'\"\n \" for CONF.running_deleted_\"\n \"instance_action\") % action)", "def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n i.terminate_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n True,\n )\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n [],\n False,\n )", "def run(self):\n self.create_all_sync_instances()", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def fix(self):\n\n pm.delete(self.errorNodes)\n\n self.run()", "def noise_application_instances(self):\n # Add some \"noise\" application instances to the DB for every test, to\n # make the tests more realistic.\n factories.ApplicationInstance.create_batch(size=3)", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)", "def cleanup_resources(self, kernel_id, restart=False):", "def kill_all():\n compose_kill_all()", "def create_instances(self):\n disk_d = \"//\"+self.host+\"/d$\"\n mask = r\"^IBM$|^WebSphere.*\"\n root_flag = 0\n # print(os.listdir(disk_d)) #checkpoint\n for item in os.listdir(disk_d):\n searchObj = re.search(mask, item, re.M|re.I)\n if searchObj:\n root_flag = 1\n rootdir=disk_d+\"/\"+searchObj.group()\n # print(rootdir) #checkpoint\n\n if os.path.isdir(rootdir):\n candidates=os.listdir(rootdir)\n # print(candidates) #checkpoint\n for candidate in candidates:\n if os.path.isdir(rootdir+'/'+candidate+'/profiles'):\n user_install_root=rootdir+'/'+candidate\n candidate_instance=Instance(user_install_root)\n candidate_instance.get_profiles()\n if candidate_instance.profiles:\n self.instances.append(candidate_instance)\n # print(candidate_instance.uir+\": \"+str(candidate_instance.profiles)) #checkpoint\n\n if root_flag == 0: print(self.host+\" does not have IBM or WebSphere directory on disk D\")", "def reset():\n Vessel.reset_instances()", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def _gracefully_stop(self):\n pass", "def test_concurrent_instances(self):\n cm = contextlib.ExitStack() # TODO: clean this up\n\n work_dir1 = Path(cm.enter_context(tempfile.TemporaryDirectory())) # TODO: make these delete only if no exception occured\n work_dir2 = Path(cm.enter_context(tempfile.TemporaryDirectory()))\n\n archive = RemotePrometheusArchive.for_tag('latest').download()\n prometheus1: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir1))\n prometheus2: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir2))\n\n prometheus1.start()\n\n with self.assertRaisesRegex(Exception, 'certificate verify failed'):\n prometheus2.start()\n\n\n cm.close()", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def stopclean(self):\n raise Exception(\"Not implemented\")", "def coldRestart(self):\n assert False, \"Deriving class must implement\"", "def kill_sync_processes(self, force, rev):\n for i in self.all_instances:\n if i.is_sync_instance():\n if not force and i.pid_file is not None and rev >= semver.VersionInfo.parse(\"0.15.0\"):\n print(\"Skipping manual kill\")\n return\n logging.info(\"manually killing syncer: \" + str(i.pid))\n i.terminate_instance()", "def test_too_many_cores_no_queue(self):\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def _clean_up(self):\n StratisCli.destroy_all()\n self.assertEqual(0, len(StratisCli.pool_list()))", "def power_on(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_on(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def clean_up_executors(self):\n pass", "def test_cleanup_procs(self):\n self.skip_teardown = True\n pool = self.get_pool()\n container = self.get_container(pool)\n dfuse = get_dfuse(self, self.hostlist_clients)\n start_dfuse(self, dfuse, pool=pool, container=container)", "def killAll(controller=False):", "def cleanUp(self):\r\n # All intermediates should be removed by app controller\r\n pass", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def set_up(self):\n self.dut.kill_all()", "def __exit__(self):\n self._stop_all()", "def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n if kill_instance:\n instance.kill_instance()\n instance.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def _clean_up():\n from tests.util import report\n report.update()\n if MAIN_RUNNER is not None:\n MAIN_RUNNER.on_exit()\n from tests.util.services import get_running_services\n for service in get_running_services():\n sys.stderr.write(\"Stopping service \")\n for c in service.cmd:\n sys.stderr.write(c + \" \")\n sys.stderr.write(\"...\\n\\r\")\n service.stop()", "def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_ungraceful_shutdown_aws(self, resources, instances, aws_obj, force):\n aws_obj.stop_ec2_instances(instances=instances, wait=True, force=force)\n aws_obj.start_ec2_instances(instances=instances, wait=True)\n self.validate_cluster(resources, instances)", "def run_with_exceptions(self: AutoScalingCluster) -> None:\n self.server.start()\n time.sleep(2) # NOTE: give the server a chance to start\n self.autoscaler.start()\n self.autoscaler.join()\n self.server.join()", "def _clean_up(self):", "def fix(self):\n\n cmds.lockNode(self.errorNodes, l=False)\n cmds.delete(self.errorNodes)\n cmds.flushUndo()\n for plugin in self.errorPlugins:\n cmds.unloadPlugin(plugin)\n\n self.run()", "def test_least_busy_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, s_ref['id'])\n db.instance_destroy(self.context, s_ref2['id'])", "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def _restart(self):\n pass", "def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()", "def test_run_instance_spawn_fail(self):\n def fake(*args, **kwargs):\n raise test.TestingException()\n self.stub_out('nova.virt.fake.FakeDriver.spawn', fake)\n instance = self._create_fake_instance_obj()\n self.compute.build_and_run_instance(\n self.context, instance=instance, request_spec={},\n filter_properties={}, accel_uuids=[],\n requested_networks=[],\n injected_files=None, admin_password=None,\n block_device_mapping=[], image={}, node=None)\n # check state is failed even after the periodic poll\n self._assert_state({'vm_state': vm_states.ERROR,\n 'task_state': None})\n self.compute.periodic_tasks(context.get_admin_context())\n self._assert_state({'vm_state': vm_states.ERROR,\n 'task_state': None})", "def test_relaunch_deployment_run(self):\n pass", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def power_off(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_off(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def run(self):\n sys.exit(-1)", "def __exit__(self, exc_type, exc_value, traceback):\n nvmlShutdown()", "def __exit__(self, exc_type, exc_value, traceback):\n super().__exit__(exc_type, exc_value, traceback)\n test_utils.clean_mongo()", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def create_all_sync_instances(self):\n # Get directories to sync\n dirs_to_sync_by_sync_instance = self.get_dirs_to_sync(self.config['sync_hierarchy_rules'])\n\n # Store all known running sync instances here to potentially kill later\n # unhandled_sync_instances = copy.deepcopy(dirs_to_sync_by_sync_instance)\n unhandled_sync_instances = copy.deepcopy(self.data_storage.running_data)\n\n # Loop through each entry in the dict and create a sync instance for it\n for instance_name, dirs_to_sync in dirs_to_sync_by_sync_instance.items():\n\n # Mark this instance as handled so it's not killed later\n unhandled_sync_instances.pop(instance_name, None)\n\n # Make new sync instance\n self.create_sync_instance(instance_name, dirs_to_sync)\n\n # Kill any instances in unhandled_sync_instances, because they are\n # no longer required needed\n\n for inst_to_kill in unhandled_sync_instances:\n self.logger.debug(\n \"Cleaning up instance '\" + inst_to_kill + \"'\" +\n \" which is no longer needed.\"\n )\n self.kill_sync_instance_by_pid(self.data_storage.running_data[inst_to_kill]['pid'])", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore\n self.shutdown()", "def start(self):\n try:\n pass\n except:\n pass", "def cleanup(self):\n with hide(\"output\", \"warnings\", \"running\"):\n self.stop_all()\n self._execute_standard(\"rm -rf {model_repo}\".format(model_repo=MODEL_REPO))\n self._execute_root(\"docker rmi --force $(docker images -q)\", warn_only=True)\n self._execute_root(\"docker network rm clipper_nw\", warn_only=True)", "def __exit__(self, exc_type, exc_val, exc_tb):\r\n pass", "def graceful(self):\n self._graceful = True", "def kill(self):\n\t\tself.kill_subcomponents()\n\t\tself._subcomponents.clear()\n\t\tself.bug_world = None\n\n\t\ttry:\n\t\t\tself.ci.deregister_all()\n\t\texcept:\n\t\t\tpass", "def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)", "def test_terminate_run(self):\n pass", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more instances we expect to spawn the more patient:\n tries = 10 * self.expect_instance_count\n\n # Wait for forntend to become alive.\n all_instances_up = False\n while not all_instances_up and tries:\n self.all_instances = []\n detected_instances = []\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n for root, dirs, files in os.walk(self.basedir):\n for onefile in files:\n # logging.debug(\"f: \" + root + os.path.sep + onefile)\n if onefile.endswith(\"log\"):\n logfiles.add(str(Path(root) / onefile))\n\n for name in dirs:\n # logging.debug(\"d: \" + root + os.path.sep + name)\n match = None\n instance_class = None\n if name.startswith(\"sync\"):\n match = re.match(r\"(syncmaster|syncworker)(\\d*)\", name)\n instance_class = SyncInstance\n else:\n match = re.match(\n r\"(agent|coordinator|dbserver|resilientsingle|single)(\\d*)\",\n name,\n )\n instance_class = ArangodInstance\n # directory = self.basedir / name\n if match and len(match.group(2)) > 0:\n # we may see a `local-slave-*` directory inbetween,\n # hence we need to choose the current directory not\n # the starter toplevel dir for this:\n instance = instance_class(\n match.group(1),\n match.group(2),\n self.cfg.localhost,\n self.cfg.publicip,\n Path(root) / name,\n self.passvoid,\n self.cfg.ssl,\n self.cfg.version,\n self.enterprise,\n jwt=jwt,\n )\n instance.wait_for_logfile(tries)\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n detected_instances.append(instance.instance_type)\n self.all_instances.append(instance)\n\n print(self.expect_instances)\n detected_instances.sort()\n print(detected_instances)\n attach(str(self.expect_instances), \"Expected instances\")\n attach(str(detected_instances), \"Detected instances\")\n if (self.expect_instances != detected_instances) or (not self.get_frontends()):\n tries -= 1\n time.sleep(5)\n else:\n all_instances_up = True\n\n if not self.get_frontends():\n print()\n logging.error(\"STARTER FAILED TO SPAWN ARANGOD\")\n self.show_all_instances()\n logging.error(\"can not continue without frontend instance\")\n logging.error(\"please check logs in\" + str(self.basedir))\n for logf in logfiles:\n logging.debug(logf)\n message = \"if that does not help try to delete: \" + str(self.basedir)\n logging.error(message)\n raise Exception(message)\n self.show_all_instances()", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def server_clean(self):\n # Kill any doas servers running on the hosts\n self.kill()\n # Clean up any files that exist on the hosts\n self.clean_files()", "def MultipleBFEBSInstances(self):\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.image = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n self.MultipleInstances()", "def clean_up(self):\n dist.destroy_process_group()", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def check_vms(st):\n\n logging.info(\"Checking batch system's VMs...\")\n check_time = time.time()\n\n # Retrieve *all* running instances (also the non-owned ones) and filter out\n # statuses of workers which are not valid VMs: we are not interested in them\n rvms = running_instances()\n rvms2 = []\n\n rips = []\n if rvms is not None:\n for inst in rvms:\n ipv4 = inst.network_ip(network_name=cf[\"api\"][\"network_name\"])\n if ipv4 is not None:\n rips.append(ipv4)\n rvms2.append(inst)\n if len(rips) == 0:\n rips = None\n new_workers_status = BatchPlugin.poll_status( st['workers_status'], rips )\n\n rvms=rvms2\n\n if new_workers_status is not None:\n #logging.debug(new_workers_status)\n st['workers_status'] = new_workers_status\n new_workers_status = None\n\n hosts_shutdown = []\n for host,info in st['workers_status'].iteritems():\n if info['jobs'] != 0: continue\n if (check_time-info['unchangedsince']) > cf['elastiq']['idle_for_time_s']:\n logging.info(\"Host %s is idle for more than %ds: requesting shutdown\" % \\\n (host,cf['elastiq']['idle_for_time_s']))\n st['workers_status'][host]['unchangedsince'] = check_time # reset timer\n hosts_shutdown.append(host)\n\n if len(hosts_shutdown) > 0:\n inst_ok = scale_down(hosts_shutdown, valid_hostnames=st['workers_status'].keys())\n change_vms_allegedly_running(st, -len(inst_ok))\n\n # Scale up to reach the minimum quota, if any\n min_vms = cf['quota']['min_vms']\n if min_vms >= 1:\n rvms = running_instances(st['workers_status'].keys())\n if rvms is None:\n logging.warning(\"Cannot get list of running instances for honoring min quota of %d\" % min_vms)\n else:\n n_run = len(rvms)\n n_consider_run = n_run + st['vms_allegedly_running']\n logging.info(\"VMs: running=%d | allegedly running=%d | considering=%d\" % \\\n (n_run, st['vms_allegedly_running'], n_consider_run))\n n_vms = min_vms-n_consider_run\n if n_vms > 0:\n logging.info(\"Below minimum quota (%d VMs): requesting %d more VMs\" % \\\n (min_vms,n_vms))\n inst_ok = scale_up(n_vms, valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in inst_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n\n # OK: schedule when configured\n sched_when = time.time() + cf['elastiq']['check_vms_every_s']\n\n else:\n # Not OK: reschedule ASAP\n sched_when = 0\n\n return {\n 'action': 'check_vms',\n 'when': sched_when\n }", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.call()\n self._cleanup_shared_list(exc_type, exc_val, exc_tb)", "def terminate(self):\n\t\tself.raise_exc(SystemExit)", "def clean_master():", "def __exit__(self, exc_type, exc_val, exc_tb):\n pass", "def error():\r\n raise RuntimeError('admin ticket generator at your service')", "def stopEngines():\n pass", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def terminate(self):", "def test_with_process_crash(self):\n if self.num_replicas < 2:\n self.assertTrue(False, msg=\"Required: num_replicas > 1\")\n\n # Override num_of_nodes affected to 1 (Positive case)\n self.num_nodes_affected = 1\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Remove active vbuckets from doc_loading to avoid errors\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"target_vbuckets\"] = list(set(range(0, 1024))\n ^ set(active_vbs_in_target_nodes))\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n self.sleep(5, \"Wait for doc loaders to start loading data\")\n\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud()\n\n # Wait for document_loader tasks to complete\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with process crash\")\n\n if self.simulate_error \\\n not in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # In case of error with Ephemeral bucket, need to rebalance\n # to make sure data is redistributed properly\n if self.bucket_type == Bucket.Type.EPHEMERAL:\n retry_num = 0\n result = None\n while retry_num != 2:\n result = self.task.rebalance(\n self.servers[0:self.nodes_init],\n [], [])\n if result:\n break\n retry_num += 1\n self.sleep(10, \"Wait before retrying rebalance\")\n\n self.assertTrue(result, \"Rebalance failed\")\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover stat validation\n if self.simulate_error == CouchbaseError.KILL_MEMCACHED:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n if self.simulate_error != CouchbaseError.STOP_MEMCACHED \\\n and self.bucket_type == Bucket.Type.EPHEMERAL:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n val = failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats mismatch after error condition:\" \\\n \" %s != %s\" \\\n % (failover_info[\"init\"][node.ip],\n failover_info[\"afterCrud\"][node.ip])\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Doc count validation\n self.validate_test_failure()\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)", "def delete_all_runtimes(self):\n self.compute_handler.delete_all_runtimes()", "def __exit__(self, *excinfo):\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def _check_vmware():\n for proc in psutil.process_iter():\n try:\n if proc.name() == 'vmware-vmx':\n raise CommandError('S2E uses KVM to build images. VMware '\n 'is currently running, which is not '\n 'compatible with KVM. Please close all '\n 'VMware VMs and try again.')\n except NoSuchProcess:\n pass", "def test_settings_doesnt_break(self):\r\n self.settingsDeploy()", "def scale_up(nvms, valid_hostnames=None, vms_allegedly_running=0):\n\n global api, img, flavor, user_data, network, owned_instances\n\n # Try to get image if necessary\n if img is None:\n img = image(cf['api']['image_id'])\n if img is None:\n logging.error(\"Cannot scale up: image id %s not found\" % image(cf['api']['image_id']))\n return []\n\n n_succ = 0\n n_fail = 0\n logging.info(\"We need %d more VMs...\" % nvms)\n\n inst = running_instances(valid_hostnames)\n if inst is None:\n logging.error(\"No list of instances can be retrieved from API\")\n return []\n\n n_running_vms = len(inst) + vms_allegedly_running # number of *total* VMs running (also the ones *not* owned by HTCondor)\n if cf['quota']['max_vms'] >= 1:\n # We have a \"soft\" quota: respect it\n n_vms_to_start = int(min(nvms, cf['quota']['max_vms']-n_running_vms))\n if n_vms_to_start <= 0:\n logging.warning(\"Over quota (%d VMs already running out of %d): cannot launch any more VMs\" % \\\n (n_running_vms,cf['quota']['max_vms']))\n else:\n logging.warning(\"Quota enabled: requesting %d (out of desired %d) VMs\" % (n_vms_to_start,nvms))\n else:\n n_vms_to_start = int(nvms)\n\n # Launch VMs\n inst_ok = []\n for i in range(1, n_vms_to_start+1):\n\n success = False\n if int(cf['debug']['dry_run_boot_vms']) == 0:\n try:\n # Returns the reservation\n new_inst_id = img.run(\n token_id=api.keystone.token_id,\n key_name=cf['api']['key_name'],\n user_data=user_data,\n instance_type=flavor.id,\n network=network.id\n )\n\n # Get the single instance ID from the reservation\n owned_instances.append( new_inst_id )\n inst_ok.append( new_inst_id )\n\n success = True\n except Exception:\n logging.error(\"Cannot run instance via API: check your \\\"hard\\\" quota\")\n\n else:\n logging.info(\"Not running VM: dry run active\")\n success = True\n\n if success:\n n_succ+=1\n logging.info(\"VM launched OK. Requested: %d/%d | Success: %d | Failed: %d | ID: %s\" % \\\n (i, n_vms_to_start, n_succ, n_fail, new_inst_id))\n else:\n n_fail+=1\n logging.info(\"VM launch fail. Requested: %d/%d | Success: %d | Failed: %d\" % \\\n (i, n_vms_to_start, n_succ, n_fail))\n\n # Dump owned instances to file (if something changed)\n if n_succ > 0:\n save_owned_instances()\n\n return inst_ok" ]
[ "0.6183212", "0.6157061", "0.5949658", "0.5940772", "0.58803564", "0.5872182", "0.58227324", "0.57449096", "0.573384", "0.5643141", "0.5623912", "0.5593848", "0.5592989", "0.559106", "0.55775046", "0.5566891", "0.5562831", "0.555892", "0.55581", "0.5557331", "0.5549696", "0.55376285", "0.5530706", "0.5511063", "0.55059254", "0.5468521", "0.5456432", "0.5443141", "0.54375386", "0.5435014", "0.5430887", "0.5424401", "0.5409062", "0.54086477", "0.5406507", "0.5397051", "0.5386716", "0.53863233", "0.5367642", "0.5364355", "0.5359641", "0.53396475", "0.5321483", "0.5309392", "0.5304142", "0.5303697", "0.52932227", "0.5293126", "0.52879536", "0.52773803", "0.52694577", "0.524012", "0.5233396", "0.5233187", "0.52330047", "0.5225425", "0.52196294", "0.5215274", "0.5213363", "0.52124816", "0.5202586", "0.52019006", "0.52011615", "0.51985157", "0.5191421", "0.5187731", "0.518264", "0.5179394", "0.5173151", "0.5168397", "0.51657087", "0.51622224", "0.51515645", "0.51515645", "0.5145622", "0.5142911", "0.5138945", "0.51338077", "0.5128898", "0.5124731", "0.5119571", "0.51175696", "0.5116119", "0.5114901", "0.5108012", "0.5099696", "0.50968635", "0.5093057", "0.50889444", "0.5083858", "0.5083146", "0.508246", "0.50819767", "0.50805056", "0.50771105", "0.50771105", "0.50771105", "0.5072566", "0.5059882", "0.5055909" ]
0.70966077
0
check whether this is still running
def is_instance_running(self): try: self.instance.wait(timeout=1) except psutil.TimeoutExpired: pass return self.instance.is_running()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_running(self):\n\t\treturn self in _running", "def running(self):\n return not self._kill_event.is_set()", "def is_running(self):\n\t\treturn self._running", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_running(self) -> bool:\r\n return self.__running", "def is_running(self) -> bool:\n return False", "def running(self):\n return self.sub_process and self.sub_process.is_alive()", "def _is_running(self):\n return self._run_state.is_running()", "def running(self) -> bool:", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def running(self):\n\t\treturn self._start is not None", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def is_alive(self):\n return True", "def is_alive(self):\n\n return not self._stop.is_set()", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def is_alive(self):\n pass", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def running(self):\n return self._lifetime_state in {\"starting\",\"running\",\"finishing\"}", "def is_running(self):\n return self.current_state == self.States.RUNNING", "def is_running(self) -> bool:\n return self._running.is_set()", "def is_running(self) -> bool:\n return self._is_running", "def is_running(self):\n if self._thread and self._thread.is_alive:\n return True\n\n return False", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running", "def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False", "def _keep_running():\n return True", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def is_running(self):\n return self._running.is_set()", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self._is_running", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def is_running(self):\n return self.action_thread and self.action_thread.is_alive()", "def running(self):\n try:\n return self._thread.isAlive()\n except (AttributeError, RuntimeError, ThreadError):\n return False", "def is_alive(self) -> bool:\n return self._main_thread.is_alive()", "def alive(self):\n return self._thread is not None", "def running(self):\n return self.more() or not self.stopped", "def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True", "def is_alive(self):", "def running(self):\n return bool(self.proc and self._running())", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def running(self) -> bool:\n return self._running", "def running(self):\n return self._state == RUNNING_STATE", "def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def __bool__(self):\n return not self._stopped", "def is_working(self):\n if not self.__th:\n return False\n return self.__th.is_alive()", "def is_alive(self):\n # TODO: This is not 100% correct, but that's the behavior our code relies ON.\n # Eventually we need to fix the implementation on StoppableThread so RunState defaults\n # \"_is_running\" to False and then set it to True inside \"run()\" and only check that value\n # here.\n if six.PY2:\n return super(StoppableThread, self).isAlive()\n\n if (\n not self._run_state.is_running()\n or not super(StoppableThread, self).is_alive()\n ):\n return False\n\n return True", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "def IsRunning(self):\n return self.running", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def is_running(self):\n\n return self._state == \"RUNNING\"", "def is_alive(self) -> bool:\n if self._thread is None:\n return False\n return self._thread.is_alive()", "def is_running(self):\n qstat = self._grep_qstat('running')\n if qstat:\n return True\n return False", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def _is_running(self):\n # Public interface is given by get_status instead.\n self._update()\n return True if self.running_mode else False", "def isAlive(self):\n return self.is_alive()", "def is_running(self) -> bool:\n return self.executor.is_alive() if self.executor else False", "def is_running(self):\n self.__condition.acquire()\n result = self.__is_running\n self.__condition.release()\n return result", "def running(self):\n if self.done() and self._is_running:\n self._is_running = False\n return self._is_running", "def is_running(self):\n # type: () -> bool\n return self._run_state.is_running()", "def is_alive(self):\n return self.alive", "def is_alive(self):\n return self.alive", "def is_running(self):\n # do we have a job ID to work with?\n if self.jobid == None:\n return False\n else:\n q_status = self.queue.get_status(self.jobid)\n\n if q_status == self.queue.state[\"active\"]:\n self.meta[\"status\"] = 'PENDING'\n return True\n else:\n return False", "def KeepAlive(self) -> bool:", "def isstarted():", "def is_alive(self):\n return self._is_alive", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def is_running(self):\n return self._task.running()", "def is_running(self):\n return self._task.running()", "def running(self):\n with self._done_condition:\n return self._state == RUNNING", "def is_alive(self, pid):\n return pid in self._pids", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def is_alive(self):\n return (self.read_name() != '')", "def is_done(self):\n\n return not self.thread.is_alive()", "def running(self): # type: () -> bool\n return self.state['Running']", "def _is_running(self):\n try:\n # Process is not killed, os.kill(pid, 0) does nothing but raise if process does not\n # exist.\n os.kill(self.pid, 0)\n except ProcessLookupError:\n return False\n else:\n return True", "def is_idle(self) -> bool:", "def is_live(self):\r\n return self._thread.isAlive() and self._eventThread.isAlive() and not \\\r\n self._closed", "def is_alive(self) -> bool:\n return any(thread.is_alive() for thread in self.threads)", "def Running(self):\n return self.Timer.IsRunning()", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def is_running(self):\n return all(p.status == 'running' for p in self.values())", "def check(self):\n\n if not self.running:\n return False\n\n # On my machine, os.kill is faster and takes ~0.3usec while os.stat and P.exists take ~1.5usec (using timeit)\n # However, with kill if the process is under a separate UID, PermissionError is raised\n # Could try os.kill and fallback to P.exists and save the choice, but that's just overcomplicated\n\n running = P.exists(self.path)\n if running:\n self.update_status()\n else:\n # Process ended since last check, recond end time\n self.running = False\n self.ended_datetime = datetime.now()\n # TODO duration attribute could have a value while running; update in getter method\n self.duration = self.ended_datetime - self.created_datetime\n # Formats like 3:06:29.873626, so cutoff microseconds\n text = str(self.duration)\n self.duration_text = text[:text.rfind('.')]\n\n return running", "def alive(self):\n return self._process.is_alive()", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def is_alive(self):\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))", "def is_running(self):\n return self.style['position'][0] == 'running()'", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def is_running(self):\n data = self._poll()\n return data.get('building', False)", "def _is_running(self, _):\n if self._shutdown_event.is_set():\n raise RequestProcessingError(\n \"Unable to process message - currently shutting down\"\n )", "def is_running(self):\n status = self._call_player_proxy('GetStatus', None).unpack()[0]\n if status[3] == 1:\n return True\n return False", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "def is_playing(self):\n return self.process is not None" ]
[ "0.81097966", "0.79656595", "0.7808091", "0.7757858", "0.77343863", "0.77087873", "0.76777333", "0.76591086", "0.7655416", "0.76325166", "0.76325166", "0.76325166", "0.76198006", "0.76070553", "0.7606519", "0.75803435", "0.75677806", "0.7559066", "0.75572735", "0.7554629", "0.75373185", "0.74642664", "0.7463129", "0.7448027", "0.74440604", "0.74440604", "0.74431306", "0.74144536", "0.7406221", "0.7403612", "0.740317", "0.740317", "0.73951334", "0.7392074", "0.7383646", "0.73806846", "0.73647255", "0.7364578", "0.7360696", "0.7356493", "0.7354658", "0.735163", "0.73295134", "0.7325352", "0.7319562", "0.7317168", "0.73108107", "0.72987074", "0.72920966", "0.72920966", "0.72828835", "0.727902", "0.7273404", "0.72732717", "0.7253186", "0.7250847", "0.7240073", "0.7236352", "0.7228629", "0.72276056", "0.7217122", "0.7211345", "0.72074634", "0.7201248", "0.71807647", "0.71646434", "0.71639204", "0.71615726", "0.71615726", "0.7158996", "0.71576905", "0.7157605", "0.7145105", "0.71448237", "0.7119776", "0.7119776", "0.71023077", "0.7100449", "0.70953554", "0.705907", "0.7050353", "0.70443404", "0.70266664", "0.70216966", "0.7018194", "0.7008483", "0.7004902", "0.70038235", "0.6994393", "0.6987454", "0.69828904", "0.6971226", "0.6968804", "0.6966461", "0.6957639", "0.69547933", "0.6953033", "0.6940187", "0.69395703", "0.69384044" ]
0.7342232
42
wait for our instance to create a logfile
def wait_for_logfile(self): counter = 0 keep_going = True logging.info("Looking for log file.\n") while keep_going: self.check_that_instance_is_alive() if counter == 20: raise Exception("logfile did not appear: " + str(self.log_file)) counter += 1 logging.info("counter = " + str(counter)) if self.log_file.exists(): logging.info("Found: " + str(self.log_file) + "\n") keep_going = False time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not exist before starting\n self.assertFalse(path.exists(log_file))\n\n cli_entry._validate_and_upload(directory_status, False)\n\n # Make sure log file is created\n self.assertTrue(path.exists(log_file))", "def test_creation_logfile(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n manager = execution.LogManager('MainThread', log_file)\n LOGGER.debug('Log me!')\n manager.close()\n self.assertEqual(count_lines(log_file), 1)\n os.remove(log_file)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def wait_for_log(self, hub, success_criteria):\n timeMax = time() + BrowserConstants().CONTAINER_TIMEOUT\n line = 'error'\n while line not in BrowserConstants().CONTAINER_SUCCESS or time() < timeMax:\n for line in hub.logs().decode().split('\\n'):\n if success_criteria in line:\n logging.debug(line)\n return\n\n # TODO handle RemoteDisconnected\n # TODO check for running containers before creation/worker to store running containers", "def log_wait(logfile):\n if xopts['verbose']: print(\"** Watching logfile: %s\" % (logfile))\n with open(logfile, 'r') as tlog:\n stalker = tailer.follow(tlog)\n logline = stalker.next()\n\n return logline", "def init_logfile(self):\n\t\tif os.path.exists(self.logfile):\n\t\t\t# move the logfile aside and compress it\n\t\t\tbz_file = bz2.BZ2File(\"%s.bz2\" % self.logfile,'w')\n\t\t\tlog = open(self.logfile,'r')\n\t\t\tbz_file.writelines(log.readlines())\n\t\t\tlog.close()\n\t\t\tbz_file.close()\n\t\t#print \"Logging output to %s\" % self.logfile\n\t\tdate = dateutil.get_datetime()\n\t\ttime = dateutil.get_datetime(1)\n\t\tnew_file = open(self.logfile,'w')\n\t\tnew_file.write(\"#------------------------- RSYNC LOG -------------------------\\n#\\n\")\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Date',date))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Time',time))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Source',self.source))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Destination',self.destination))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Command',self.command))\n\t\tnew_file.write(\"#%12s: %s\\n\\n\" % ('Logfile',self.logfile))\n\t\tnew_file.close()\n\t\treturn True", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def __init__(self, abs_path_logfile):\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n handler = logging.handlers.TimedRotatingFileHandler(abs_path_logfile, when='D', interval=1)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)", "def establish(lvl='INFO', logName=None, logPath=None, backups=0):\n try:\n print 'Script Started. Setting up Logging.'\n\n # Set logging level\n if lvl == 'DEBUG':\n logLevel = logging.DEBUG\n elif lvl == 'INFO':\n logLevel = logging.INFO\n elif lvl == 'WARNING':\n logLevel = logging.WARNING\n elif lvl == 'ERROR':\n logLevel = logging.ERROR\n else:\n print 'Invalid logging level. Choose: ERROR, WARNING, INFO, DEBUG'\n return\n\n # Setup basic logging configuration to standard output stream\n logging.basicConfig(level=logLevel, format=\"%(asctime)s\\t%(levelname)s:\\t%(message)s\")\n \n if logName != None and logName.strip() != '':\n # A logName has been provided so create a log file\n if logPath == None or logPath.strip() == '':\n # If no logPath is provided, use relative path\n logPath = r'.\\\\'\n logPathName = os.path.join(logPath, str(logName).strip())\n # If backups are needed, set the write style (write/append)\n if backups == 0:\n logMode = 'w'\n else:\n logMode = 'a'\n # Setup logging to a file\n fh = logging.handlers.RotatingFileHandler(filename=logPathName, mode=logMode, backupCount=int(backups))\n fh.setLevel(logLevel)\n formatter = logging.Formatter('%(asctime)s\\t%(levelname)s:\\t%(message)s')\n fh.setFormatter(formatter)\n logging.getLogger('').addHandler(fh)\n if os.path.isfile(logPathName):\n fh.doRollover()\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n info('Log file created at: {0}'.format(logPathName))\n else:\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n fh = None\n return fh\n except:\n print 'Error Establishing Log: {0}'.format(traceback.format_exc())", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def on_start(self):\r\n self.log()", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def init_log():\n os.system('rm -rf /target/testdriver.log || true')\n os.system('touch /target/testdriver.log')\n os.system(f\"chown {uid_gid_output} /target/testdriver.log\")\n os.system('chmod 664 /target/testdriver.log')", "def __init__(self, logfile):\r\n super(PopenWrapper, self).__init__()\r\n self.logfile = logfile", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def openLogfileConnection(self,):\n \n #\n # Imports\n #\n import sys\n import time\n import os\n \n #\n # for logmessages\n # \n tmpLogMessages = []\n \n #\n # check if logfile present open connection or create\n #\n SEAseqPipeLine.logfile = self.analysisPath + '/logfile.txt'\n if os.path.isfile(SEAseqPipeLine.logfile):\n if self.command == 'initiateAnalysis':\n print 'ERROR: the logfile already exists please use another path to initiate the analysis.\\n'\n sys.exit(1)\n else:\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'a',1)\n SEAseqPipeLine.logfile.write('----------------\\nConnection to logfile '+SEAseqPipeLine.logfile.name+' opened.\\n')\n return 0\n else:\n tmpLogMessage = 'Creating the logfile \"'+SEAseqPipeLine.logfile+'\".\\n'\n tmpLogMessages.append(tmpLogMessage)\n print tmpLogMessage\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'w',1)\n \n return tmpLogMessages", "def on_server_start(self):\n self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params)\n self.signal_ready()\n\n for log_line in self.get_lines():\n try:\n alert_dict = self.parse_line(log_line)\n if alert_dict:\n self.add_alert_to_queue(alert_dict)\n except Exception:\n self.logger.exception(None)", "def wait_for_log(self, regex, timeout=TIMEOUT):\n return self.wait_for_logs([regex], timeout)", "def verify_new_log_created(self, pod_type):\n pod_obj = self.get_pod_obj_based_on_id(pod_type)\n output_cmd = pod_obj.exec_cmd_on_pod(command=\"ls -lh /var/log/ceph\")\n expected_string = (\n self.podtype_id[pod_type][2]\n if pod_type == \"rgw\"\n else f\"{self.podtype_id[pod_type][2]}{self.podtype_id[pod_type][1]}\"\n )\n cnt_logs = len(re.findall(expected_string, output_cmd))\n if cnt_logs != int(self.podtype_id[pod_type][3]) + 1:\n log.info(output_cmd)\n log.error(\n f\"pod_type:{pod_type} cnt_logs_before_fill_log:\"\n f\"{self.podtype_id[pod_type][3]} cnt_logs_after_fill_log:{cnt_logs}\"\n )\n pod_obj.exec_cmd_on_pod(\n command=f\"dd if=/dev/urandom of=/var/log/ceph/{expected_string}.log bs=1M count=560\",\n out_yaml_format=False,\n container_name=\"log-collector\",\n )\n return False\n return True", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def _logging_subprocess(self):\n\n # Setup logging for logging subprocess\n setproctitle('flowbber - logging manager')\n\n # # Level\n level = self.LEVELS.get(self._verbosity, logging.DEBUG)\n\n # # Format\n if level != logging.DEBUG:\n format_tpl = self.FORMAT\n else:\n format_tpl = self.FORMAT_DEBUG\n formatter = ColoredFormatter(fmt=format_tpl, style='{')\n\n # # Handler\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n # # Configure baisc logging\n logging.basicConfig(handlers=[handler], level=level)\n\n # Start listening for logs and prints\n listener = QueueListener(self._log_queue, handler)\n listener.start()", "def _test():\n try:\n print 'Test for Loging'\n # Establish Logging at the beginning of the script\n fh = establish(lvl='DEBUG', logName='TestLog.txt', logPath='', backups=0)\n\n # Supply log functions with message as a STRING\n info('TEST - Info lvl')\n debug('TEST - Debug lvl')\n warning('TEST - Warning lvl')\n error('TEST - Error lvl')\n exception('TEST - Exception. See the exception below this line.')\n info('Would any of this be logged to ArcPy: {0}'.format(_logToArcpyMessagingWindow))\n\n except:\n exception('Error in main function of script')\n print 'ERROR WITH SCRIPT: {0}'.format(traceback.format_exc())\n finally:\n # Ensure to Shut-down the Logging\n info('Script Completed')\n shutdown(fh)\n print 'Test Complete'", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def execute_to_log(cmd, logfile, timeout=-1,\n watch_logs=[\n ('[syslog]', '/var/log/syslog'),\n ('[sqlslo]', '/var/log/mysql/slow-queries.log'),\n ('[sqlerr]', '/var/log/mysql/error.log')\n ],\n heartbeat=True, env=None, cwd=None\n ):\n\n if not os.path.isdir(os.path.dirname(logfile)):\n os.makedirs(os.path.dirname(logfile))\n\n logger = logging.getLogger(logfile)\n log_handler = logging.FileHandler(logfile)\n log_formatter = logging.Formatter('%(asctime)s %(message)s')\n log_handler.setFormatter(log_formatter)\n logger.addHandler(log_handler)\n\n descriptors = {}\n\n for watch_file in watch_logs:\n if not os.path.exists(watch_file[1]):\n logger.warning('Failed to monitor log file %s: file not found'\n % watch_file[1])\n continue\n\n try:\n fd = os.open(watch_file[1], os.O_RDONLY)\n os.lseek(fd, 0, os.SEEK_END)\n descriptors[fd] = {'name': watch_file[0],\n 'poll': select.POLLIN,\n 'lines': ''}\n except Exception as e:\n logger.warning('Failed to monitor log file %s: %s'\n % (watch_file[1], e))\n\n cmd += ' 2>&1'\n start_time = time.time()\n p = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env, cwd=cwd)\n\n descriptors[p.stdout.fileno()] = dict(\n name='[output]',\n poll=(select.POLLIN | select.POLLHUP),\n lines=''\n )\n\n poll_obj = select.poll()\n for fd, descriptor in descriptors.items():\n poll_obj.register(fd, descriptor['poll'])\n\n last_heartbeat = time.time()\n\n def process(fd):\n \"\"\" Write the fd to log \"\"\"\n global last_heartbeat\n descriptors[fd]['lines'] += os.read(fd, 1024 * 1024)\n # Avoid partial lines by only processing input with breaks\n if descriptors[fd]['lines'].find('\\n') != -1:\n elems = descriptors[fd]['lines'].split('\\n')\n # Take all but the partial line\n for l in elems[:-1]:\n if len(l) > 0:\n l = '%s %s' % (descriptors[fd]['name'], l)\n logger.info(l)\n last_heartbeat = time.time()\n # Place the partial line back into lines to be processed\n descriptors[fd]['lines'] = elems[-1]\n\n while p.poll() is None:\n if timeout > 0 and time.time() - start_time > timeout:\n # Append to logfile\n logger.info(\"[timeout]\")\n os.kill(p.pid, 9)\n\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n if time.time() - last_heartbeat > 30:\n # Append to logfile\n logger.info(\"[heartbeat]\")\n last_heartbeat = time.time()\n\n # Do one last write to get the remaining lines\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n # Clean up\n for fd, descriptor in descriptors.items():\n poll_obj.unregister(fd)\n os.close(fd)\n try:\n p.kill()\n except OSError:\n pass\n\n logger.info('[script exit code = %d]' % p.returncode)\n logger.removeHandler(log_handler)\n log_handler.flush()\n log_handler.close()\n return p.returncode", "def test_creation_no_logfile(self):\n # When we don't give the handler a URI, it creates a NullHandler\n # instance, so we don't save any of the logging messages to the log\n # file.\n manager = execution.LogManager('sample_thread_name')\n manager.close()\n self.assertEqual(manager.logfile_handler.__class__,\n logging.NullHandler)", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def onStartup(self):\n if self.console.config.has_option('server', 'local_game_log'):\n self.locallog = self.console.config.getpath('server', 'local_game_log')\n else:\n # setup ip addresses\n self._publicIp = self.console.config.get('server', 'public_ip')\n self._port = self.console.config.getint('server', 'port')\n\n if self._publicIp[0:1] == '~' or self._publicIp[0:1] == '/':\n # load ip from a file\n f = file(self.console.getAbsolutePath(self._publicIp))\n self._publicIp = f.read().strip()\n f.close()\n\n logext = str(self._publicIp.replace('.', '_'))\n logext = 'games_mp_' + logext + '_' + str(self._port) + '.log'\n self.locallog = os.path.normpath(os.path.expanduser(logext))\n\n self.locallog = b3.getWritableFilePath(self.locallog)\n self.debug('local game log is :%s' % self.locallog)\n\n if self.console.config.has_option('server', 'log_append'):\n self._logAppend =self.console.config.getboolean('server', 'log_append')\n else:\n self._logAppend = False\n\n if self.console.config.has_option('server', 'log_timeout'):\n self.timeout = self.console.config.get('server', 'log_timeout')\n else:\n # get timeout value set by gameservers.com\n try:\n \n req = urllib2.Request(self._timeout_url)\n req.headers['User-Agent'] = user_agent\n f = urllib2.urlopen(req)\n self.timeout = int(f.readlines()[0])\n f.close()\n self.debug('using timeout value of %s seconds' % self.timeout)\n \n except (urllib2.HTTPError, urllib2.URLError, socket.timeout), error: \n self.timeout = self._default_timeout\n self.error('ERROR: %s' % error)\n self.error('ERROR: Couldn\\'t get timeout value. Using default %s seconds' % self.timeout)\n\n if self.console.config.get('server','game_log')[0:7] == 'http://' :\n self._url = self.console.config.get('server','game_log')\n self.initThread()\n else:\n self.error('your game log url doesn\\'t seem to be valid: please check your config file')\n self.console.die()", "def os_open_logfile( self, ):\r\n# from subprocess import Popen, PIPE # since infrequently used ??\r\n# try:\r\n# proc = Popen( [ self.parameters.ex_editor, self.parameters.pylogging_fn ] )\r\n#\r\n# except Exception as excpt:\r\n# self.logger.info( \"os_open_logfile exception trying to use >\" + str( self.parameters.ex_editor ) + \"< to open file >\" + str( self.parameters.pylogging_fn ) +\r\n# \"< Exception \" + str( excpt ) )\r\n# #self.logger.info( \"send_receive() timeout -- send_data = >\" + send_data +\"<\", )\r\n AppGlobal.os_open_txt_file( self.parameters.pylogging_fn )", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def _setup_file_logger(self):\n if self._file_log_handler is not None:\n raise RuntimeError(\"{}: File logger already exists\".format(self))\n\n # Note that in unit test driver's runpath might not be set\n if self.cfg.file_logger and self.runpath is not None:\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(message)s\"\n )\n self._file_log_handler = logging.FileHandler(\n os.path.join(self.runpath, self.cfg.file_logger)\n )\n self._file_log_handler.setFormatter(formatter)\n self.logger.addHandler(self._file_log_handler)\n self.logger.propagate = False # No console logs", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def on_test_begin(self, logs=None):", "def init(directory,name):\n \n import os,time\n fname=os.path.join(directory,'Log_'+name+'_'+time.strftime('%Y-%b-%d')+'.txt')\n if not os.path.isfile(os.path.join(directory,fname)):\n try:\n fhandle = open(fname, 'a')\n fhandle.write('-----------------------------------------------------\\n')\n fhandle.write('Program initialized! on '+time.strftime('%Y-%b-%d')+'\\n')\n fhandle.write('-----------------------------------------------------\\n')\n print('======begin log!======')\n return(fhandle)\n except:\n print('Could not create Log file with name : '+\n ' Log_'+name+'_'+date.today().isoformat()+'.txt')\n else:\n fhandle=open(fname,'a')\n fhandle.write('-----------------------------------------------------\\n')\n fhandle.write('Program initialized! on '+time.strftime('%Y-%b-%d')+'\\n')\n fhandle.write('-----------------------------------------------------\\n')\n print('======begin log!======')\n return(fhandle)", "def __init_logging(self):\n\n logger = logging.getLogger('__name__')\n if os.path.exists(constants.LOG_FILE):\n logger.setLevel(logging.DEBUG)\n logger_file_handler = logging.FileHandler(constants.LOG_FILE)\n logger_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n logger_file_handler.setFormatter(logger_formatter)\n logger.addHandler(logger_file_handler)\n else:\n logger.disabled = True", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def instantiate_logs(self):\n\n # Log file\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n self.log_dir = os.path.join(\"experiment_logs\", timestamp)\n\n # Create Log directory if it does not exist\n try:\n os.makedirs(self.log_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n self.info_file = os.path.join(self.log_dir, \"run_info.txt\")\n self.log_file = os.path.join(self.log_dir, \"data.csv\")\n\n with open(self.info_file, \"w+\") as f:\n f.write(\"Period = {}\\nMaxVel = {}\".format(self.period, self.max_vel))\n\n self.log_file_desc = open(self.log_file, \"w+\")\n self.log_file_desc.write(\"t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw\")", "def test_wait_for_ready(main_container):\n # This could take a while, as we download the application.\n TIMEOUT = 180\n for i in range(TIMEOUT):\n logs = main_container.logs().decode(\"utf-8\")\n if READY_MESSAGE in logs:\n break\n time.sleep(1)\n else:\n raise Exception(\n f\"Container does not seem ready. \"\n f'Expected \"{READY_MESSAGE}\" in the log within {TIMEOUT} seconds.'\n f\"\\nLog output follows:\\n{logs}\"\n )", "def Create_log():\r\n \"\"\"And Maintain log file to the current date in MMM_DD_YY format\"\"\"\r\n \r\n name = multiprocessing.current_process().name\r\n config = config_create()\r\n Stream = config.get('Log', 'Log1')\r\n Tweet = config.get('Log', 'Log2')\r\n OverallLog = config.get('Log', 'Log3')\r\n \r\n uscore = '_'\r\n txtn = '.txt'\r\n StreamL = uscore +Stream+ txtn\r\n TweetL = uscore +Tweet+ txtn\r\n OverallLogL = OverallLog+txtn\r\n \r\n \r\n \r\n name = multiprocessing.current_process().name\r\n StreamFileName = time.strftime(\"%b_%d_%y\")+StreamL\r\n TweetFileName = time.strftime(\"%b_%d_%y\")+TweetL\r\n config.set('Latest_Log', 'currentstreamlog',StreamFileName)\r\n config.set('Latest_Log', 'currenttweetlog',TweetFileName)\r\n config.set('Latest_Log', 'overalllog',OverallLogL)\r\n \r\n with open('botconfig.ini', 'w') as x:\r\n config.write(x)\r\n if os.path.isfile(StreamFileName) is False:\r\n open(StreamFileName, 'w')\r\n \r\n if os.path.isfile(OverallLogL) is False:\r\n open(OverallLogL, 'w')\r\n \r\n if os.path.isfile(TweetFileName) is False:\r\n twfile = open(TweetFileName, 'w')\r\n ## Edit this or comment to change first line entered upon\r\n ## File creation\r\n twfile.write('0 ComicTweetBot')\r\n #time.sleep(1)\r\n #Create_log()\r", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def _logger_setup(self, log_file):\r\n # settings\r\n self.log_formatter = logging.Formatter(\"%(asctime)s %(processName)s %(thread)d %(message)s\")\r\n self.default_logging_level = logging.DEBUG\r\n\r\n # set up listener thread for central logging from all processes\r\n queue_manager = multiprocessing.Manager()\r\n self.log_queue = queue_manager.Queue()\r\n self.log_listener = Listener(self.log_queue, self.log_formatter,\r\n self.default_logging_level, log_file)\r\n # note: for debugging, comment out the next line. Starting the listener\r\n # will cause pipe breakage in case of a bug elsewhere in the code,\r\n # and the console will be flooded with error messages from the\r\n # listener.\r\n self.log_listener.start()", "def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')", "def logfile():\n\n class Logfile(object):\n def __init__(self, filename, *args, **kwargs):\n super(Logfile, self).__init__(*args, **kwargs)\n self.filename = filename\n self.logs = \"\"\n\n def read(self):\n with open(self.filename) as file:\n for line in file:\n self.logs += line\n return self.logs\n\n yield Logfile(filename=\"gen3tests.logs\")\n\n # cleanup after each use\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")", "def _collect_log(self, log_type, log_dir, log_name, cmd,\n timeout=10, background=True):\n log_tag = self.get_log_tag()\n target_log = posixpath.join('/tmp/', log_name)\n self.logger.info(\"{} Attempting to collect a {} log\".format(\n log_tag, log_type))\n status, _, _ = self.exec_command_ssh(cmd, background=background)\n\n if status != 0:\n raise Exception(\"{} '{}' command did not generate a log \"\n \"on the target\".format(log_tag, cmd))\n\n # Wait for the system to finish writing the log\n time.sleep(timeout)\n self.logger.info(\"{} Attempting to copy generated log from \"\n \"the target to the PC\".format(log_tag))\n dest = os.path.join(log_dir, log_name)\n with self.ssh_client as ssh_client:\n with ssh_client.open_sftp() as sftp:\n sftp.get(target_log, dest)\n\n self.logger.info(\"{} Attempting to delete log from \"\n \"target\".format(log_tag))\n status, _, _ = self.exec_command_ssh(\n \"rm /tmp/{}\".format(log_name))\n\n if status != 0:\n self.logger.error(\"{} Failed to delete log from \"\n \"target\".format(log_tag))\n\n self.logger.info(\"{} Log collection complete!\".format(log_tag))", "def on_test_end(self, logs=None):", "def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False", "def test_file_creation(data, logging_file_name):\n create_instance(data, logging_file_name)\n log_file_name = create_file_path(logging_file_name)\n print(log_file_name)\n if data is None or len(data) == 0:\n assert not os.path.exists(log_file_name)\n else:\n assert os.path.exists(log_file_name)", "def _verify_logging(self):\n log_file = self.device.log_file_name\n self.assertTrue(os.path.exists(log_file),\n f\"{self.device.name}'s log file {log_file} does not exist\")\n self.assertTrue(os.path.getsize(log_file),\n f\"{self.device.name}'s log file {log_file} is empty\")", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def test_init_logger_with_logfile(monkeypatch):\n log_path = f\"{gettempdir()}/{uuid()}.log\"\n assert not Path(log_path).exists()\n monkeypatch.setenv(\"LOG_OUTPUT\", log_path)\n logger = helpers.init_logger(uuid())\n msg = \"Write to disk.\"\n logger.warning(msg)\n assert Path(log_path).exists()\n with open(log_path, \"r\") as log:\n assert msg in log.read()", "def logfile(self, logfile):\n self._logfile = logfile", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def __init__(self, logfilename='logfile.log'):\n # Create file handler (output to file)\n # \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n # \"[%(asctime)s %(process)d] %(message)s\"\n # fileFormatter = logging.Formatter(\"%(asctime)s : %(threadName)-12.12s : %(levelname)-5.5s : %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n fileFormatter = logging.Formatter(\"%(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n fileHandler = logging.FileHandler(filename=logfilename)\n fileHandler.setFormatter(fileFormatter)\n fileHandler.setLevel(logging.INFO)\n self.fileHandler = fileHandler\n\n # Create console handler (output to console/terminal)\n # consoleFormatter = logging.Formatter(\"%(name)-12s : %(levelname)-8s : %(message)s\")\n consoleFormatter = logging.Formatter(\"%(message)s\")\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(consoleFormatter)\n consoleHandler.setLevel(logging.INFO)\n self.consoleHandler = consoleHandler\n\n # Create logger and add handlers\n # logger = logging.getLogger(__name__)\n logger = logging.getLogger('')\n logger.setLevel(logging.INFO)\n logger.addHandler(fileHandler)\n logger.addHandler(consoleHandler)\n self.logger = logger\n\n # from combo (when use candle)\n # for log in [logger, uno_data.logger]:\n # log.setLevel(logging.DEBUG)\n # log.addHandler(fh)\n # log.addHandler(sh)\n\n self.logger.info('{}'.format('-' * 90))\n self.logger.info(datetime.now())\n self.logger.info(f'Machine: {platform.node()} ({platform.system()}, {psutil.cpu_count()} CPUs)')\n #return logger", "def _init_logging(self):\n # Setup logging variable\n self.log = logging.getLogger(\"collection-log\")\n self.log.setLevel(logging.INFO)\n self.formatter = logging.Formatter(\"%(asctime)s %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\n # Log to stdout\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(logging.INFO)\n streamhandler.setFormatter(self.formatter)\n self.log.addHandler(streamhandler)", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def wait(self):\n self.Popen.wait()", "def startLogger(self):\n #------------------------------------------\n # Initialize logger\n log_level = getattr(logging, str(self.loglevel).upper())\n logging.basicConfig(filename=self.logfile,level=log_level, format=DEFAULT_LOG_FORMAT)\n logging.info(START_STRING)", "async def _record_logs(self, report):\n\t\tif report.action == Frame.Report.PARSE:\n\t\t\t# Collects the tests parsing log for further writing to Test_Parser.log\n\t\t\tif report.success:\n\t\t\t\tself._parse_logs[\"success\"] += [report.log]\n\t\t\telse:\n\t\t\t\tself._parse_logs[\"failure\"] += [report.log]\n\t\telif report.action == Frame.Report.EXECUTE:\n\t\t\t# Writes a test log and dump to the results directory\n\t\t\ttest_log = (\"EXECUTE STATUS: SUCCESS\\n\\n\" if report.success else \"EXECUTE STATUS: FAILURE\\n\\n\") + report.log\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, FileSystem.dump_to, \n\t\t\t\t self._result_directory_name + \"/Log/\" + report.test_name + \".log\", test_log)]):\n\t\t\t\tawait task\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, TestLogger._write_test_dump, \n\t\t\t\t self._result_directory_name + \"/Dump/\" + report.test_name + \".pcap\", report.dump)]):\n\t\t\t\tawait task", "def __init__(self, logfile=None):\n if logfile is None:\n self.__fd = None\n else:\n self.__fd = open(logfile, \"a\")", "def end_logging(self):\n self.append_to_logfile()", "def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)", "def initLogger(filename, dir_name='data/logs/'):\n \n filename = os.path.join(dir_name, filename)\n \n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n \n if os.path.exists(filename):\n os.remove(filename)\n \n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', \n '%m-%d-%Y %H:%M:%S')\n fh = logging.FileHandler(filename)\n fh.setFormatter(formatter)\n \n logger.addHandler(fh)", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def wait(self):\n pass", "def wait(self):\n pass", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)", "def test_attach(self, check_docker, containers, log_worker, time_):\n check_docker.return_value = True\n\n container1 = MagicMock()\n container1.name = f\"{APPNAME}-iknl-user\"\n containers.list.return_value = [container1]\n\n log_worker.return_value = \"\"\n time_.sleep.side_effect = KeyboardInterrupt()\n\n runner = CliRunner()\n result = runner.invoke(cli_node_attach, ['--name', 'iknl'])\n\n self.assertEqual(\n result.output,\n \"[info] - Closing log file. Keyboard Interrupt.\\n\"\n )\n self.assertEqual(result.exit_code, 0)", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def do_wait(self):\n pass", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def setup_script_logging():\n #handlers = [logbook.NullHandler()]\n format_str = (\"[{record.time:%Y-%m-%dT%H:%MZ}] \"\n \"{record.level_name}: {record.message}\")\n\n #handler = logbook.StreamHandler(sys.stderr, format_string=format_str,\n # level=\"DEBUG\")\n #handler.push_thread()\n #return handler", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def notify_console(self):\n self.logger.error(\"Test %s Event %s OCCURRED on host %s!!!\\nEvent %s\\nMail sent to %s\" % (\n self.test_name, self.event.capitalize(), self.host_name, self.event_details, self.target_mail\n ))", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def wait_for_upgrade_done_in_log(self, timeout=120):\n keep_going = True\n logging.info('Looking for \"Upgrading done\" in the log file.\\n')\n while keep_going:\n text = self.get_log_file()\n pos = text.find(\"Upgrading done.\")\n keep_going = pos == -1\n if keep_going:\n time.sleep(1)\n progress(\".\")\n timeout -= 1\n if timeout <= 0:\n raise TimeoutError(\"upgrade of leader follower not found on time\")\n for instance in self.all_instances:\n instance.wait_for_shutdown()", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def check_progress_logs(self, job_id: str, sleep_time: int = 45) -> Dict:\n sys.stdout.write(\n \"\"\"\n @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@.####.@@(.@@/((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@@.####.&(@.,((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@@@*####....((((.@@@@@@@@@@..*@@@@@@@.@@@@@@@@/.@@@@@@@@..@@@@@..@@@@@@..........@@@@@@..@@@@@@@@@@@@@@@@@.../@@@@@@@\n @@@@@@@(###.%@.(((.@@@@@@@@@@@.@*.(@@@@@.@@@@@@@@/.@@@@@@@@@@..@..@@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@@@..@@.,@@@@@@\n @@@@@@@@/#.....,(.@@@@@@@@@@@@.@@@,./@@@.@@@@@@@@/.@@@@@@@@@@@...@@@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@@.*@@@@..@@@@@\n @@@@@@@(###(.%*(((.@@@@@@@@@@@.@@@@@/.(@.@@@@@@@@/.@@@@@@@@@*.@@&.,@@@@@@@@@@@..@@@@@@@@@@..@@@@@@@@@@@@@@..........@@@@\n @@@@@@,####....((((.@@@@@@@@@@.@@@@@@@...@@@@@@@@/.@@@@@@@(./@@@@@..&@@@@@@@@@..@@@@@@@@@@......@@@@@@@@/.@@@@@@@@@,.@@@\n @@@@@.####.@,&.,((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@.####.@@@*@@/((((.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n Have some coffee while you wait...\n ( (\n ) )\n ........\n | |]\n \\ /\n `----'\n \"\"\"\n )\n sys.stdout.write(f'\\nSetting up Nixtla infrastructure (this will take up 5 mins)...\\n')\n\n idx_logs = 0\n in_progress = True\n while in_progress:\n resp = self.get_status(job_id)\n status = resp['status']\n logs = json.loads(resp['logs'])\n\n if status != 'InProgress' and not logs:\n time.sleep(30)\n resp = self.get_status(job_id)\n status = resp['status']\n logs = json.loads(resp['logs'])\n\n if logs:\n #if logs != latest_logs:\n for log in logs[idx_logs:]:\n sys.stdout.write(f'{log}\\n')\n #latest_logs = logs\n idx_logs = len(logs)\n\n in_progress = status == 'InProgress'\n\n time.sleep(sleep_time)\n\n return status", "def on_sync(self):\r\n self.log()", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def test_with_logging(self):\n module = imp.load_source('sample', os.path.join(DATA_DIR,\n 'sample_scripts.py'))\n\n temp_file_uri = os.path.join(DATA_DIR, 'test_log.txt')\n executor = execution.Executor(module, {'1':1}, func_name='try_logging',\n log_file=temp_file_uri)\n executor.start()\n executor.join()\n\n # This logging is generated in the main thread, not the worker thread,\n # which only logs 2 lines. The number of lines in the log file should\n # be 5 (2 from the function, 2 from printing arguments, 1 of blank\n # space).\n LOGGER.debug('hello. This should not appear in the log file.')\n\n self.assertEqual(count_lines(temp_file_uri), 10)\n os.remove(temp_file_uri)", "def initialize_logger(self, exp_dir):\n env = EnvSing.get_instance()\n # configure logger\n self.log_file = exp_dir + \"/pruner.log\"\n\n if not env.exists(self.log_file):\n env.dump(\"\", self.log_file)\n self.fd = env.open_file(self.log_file, flags=\"w\")\n self._log(\"Initialized Pruner Logger\")", "def test_passing_env(self):\n\n log_file = \"test.log\"\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n if os.path.exists(whole_log_file):\n os.remove(whole_log_file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_file\n\n logger = pgo_logger.get_logger()\n\n assert logger is not None\n\n logger.info(\"test\")\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True", "def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def on_start(self):\n # self.login()\n\n # self.createfiles()", "def test_first_log_post(self):\n commands = self.conveyer.log(\"{message: \\\"test\\\"}\")\n self.assertEquals(len(commands), 2)\n creator, appender = commands\n self.assertEquals(type(creator), CreateLogCmd)\n self.assertEquals(type(appender), AppendLogCmd)\n self.assertEquals(creator.filename, \"testfile.dat\")\n self.assertEquals(appender.event, \"{message: \\\"test\\\"}\")", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()" ]
[ "0.6342506", "0.6339533", "0.632533", "0.62805986", "0.61698097", "0.6142417", "0.60563993", "0.5989396", "0.58973587", "0.58555603", "0.5847372", "0.5842015", "0.5833766", "0.58280855", "0.58130676", "0.5812685", "0.5794679", "0.57772213", "0.5774569", "0.57051474", "0.56672895", "0.56639576", "0.5659014", "0.56439865", "0.56435555", "0.5639434", "0.56275976", "0.5625284", "0.5624935", "0.56205183", "0.5616587", "0.56136113", "0.56103534", "0.5569889", "0.5569889", "0.5569889", "0.5569889", "0.5567085", "0.5564427", "0.5547499", "0.55197346", "0.5513994", "0.5513823", "0.5504043", "0.550232", "0.5501879", "0.54956096", "0.5491616", "0.5484874", "0.5478824", "0.54627484", "0.5459786", "0.54552877", "0.54547197", "0.5447296", "0.5424716", "0.54240227", "0.539382", "0.539324", "0.5388688", "0.53867424", "0.5382472", "0.538232", "0.53765047", "0.5371576", "0.5368943", "0.53621745", "0.535998", "0.5355623", "0.5340445", "0.5339527", "0.5334021", "0.5334021", "0.5332888", "0.53322697", "0.5325968", "0.5321416", "0.5320357", "0.5318883", "0.53150177", "0.53106487", "0.53081346", "0.53005654", "0.5280766", "0.5278624", "0.5275077", "0.52745986", "0.5271479", "0.52642584", "0.5255778", "0.5253342", "0.5247937", "0.524484", "0.52338916", "0.52333367", "0.5225987", "0.5225456", "0.52245986", "0.52224994", "0.52189875" ]
0.7679139
0
wait for our instance to bind its TCPports
def wait_for_port_bind(self): if self.starter_port is not None: count = 0 while count < 10: for socket in self.instance.connections(): if socket.status == "LISTEN" and socket.laddr.port == self.starter_port: print("socket found!") return count += 1 time.sleep(1) raise Exception(f"starter didn't bind {self.starter_port} on time!") print("dont know port")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_open_ports(self, instance_name=\"\"):\n ports = None\n if instance_name in wellknownports:\n ports = wellknownports[instance_name]\n else:\n elements = self.systemd_name.split(\"@\")\n if elements[0] in wellknownports:\n ports = wellknownports[elements[0]]\n if ports:\n ipautil.wait_for_open_ports('localhost', ports,\n self.api.env.startup_timeout)", "def socket_bind(self):\n try:\n self.socket.bind((self.host, self.port))\n self.socket.listen(5)\n except socket.error as e:\n print(\"Socket binding error: \" + str(e))\n time.sleep(5)\n self.socket_bind()\n return", "def _wait_for_port(self, delay=0.1, attempts=20):\n while attempts > 0:\n s = socket.socket()\n try:\n s.connect((self.host, self.port))\n except Exception:\n time.sleep(delay)\n attempts -= 1\n else:\n return\n finally:\n s.close()\n raise RuntimeError(\"Port %d is not open\" % self.port)", "def bind(self):\n self._conn = socket.socket(socket.AF_INET, self.protocol.value)\n try:\n self._conn.bind((self.host, self.port))\n except OSError as e:\n self.close()\n raise BindError(str(e))\n self._conn.setblocking(False)\n self._conn.listen(100)\n self._selector.register(self._conn, selectors.EVENT_READ, self.accept)\n\n # Event callback.\n self.event_callback[ConnectionEvent.ON_BIND](self._conn)\n\n self._mainloop()", "def __bind(self, args = []):\n \n try: \n\n # Start the local chat server and be ready to receive incoming requests\n localServerPort = self.__agent.startLocalServer()\n\n # Sleep a little bit to allow the new thread to open the listening port\n sleep(0.3)\n \n serverIp, serverPort = self.__cm.getConnectionInfo()\n\n self.__cm.send(p.T_BIND, [serverIp, localServerPort])\n reply = self.__cm.receive()\n \n if (reply.type == p.T_ERR):\n raise Exception, \"Port binding was not succussful!\"\n\n except Exception,e:\n self.__handleError('Bind', e)", "def _wait_for_connection(self, port, *args):\n getLogger(__name__).info(\"Waiting for connection on port {}...\"\n .format(port))\n listener = self._create_new_socket()\n listener.bind((\"\", port))\n listener.listen(1)\n conn, addr = listener.accept()\n\n self._set_socket(conn)\n getLogger(__name__).info(\"Connected to peer at {}:{}\"\n .format(addr[0], addr[1]))", "def test_get_unused_port() -> None:\n available_port = get_unused_port()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.bind((\"\", available_port))\n assert int(sock.getsockname()[1]) == available_port", "def checkPort(self, port, servicename, hint):\n print (\"Checking remote port %s/tcp (%s)\" % (port, servicename)).ljust(65, '.'),\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect( (self._target,int(port)) )\n s.close()\n print \"[ OK ]\"\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR: %s\" % err\n print \"Port %s/tcp seems to be closed\" % port\n print hint\n sys.exit(0)", "def bind_server(self):\n self.MAIN_CONNECTION.bind((self.HOST, self.PORT))", "def tcp_listening(port):\n return (\n subprocess.call(\n tcp_listening_cmd(port).split(),\n stdin=DEVNULL,\n stdout=DEVNULL,\n stderr=DEVNULL,\n close_fds=True,\n )\n == 0\n )", "def go_online(self, start=50000, tries=10):\n flag = False\n for listen_port in xrange(start, start+tries-1):\n if bind_to_port(self.s, listen_port):\n flag = True\n break\n\n if not flag:\n print \"Couldn't bind to connection port. Aborting...\"\n sys.exit()\n\n self.s.listen(25)\n print 'Server is listening at', listen_port\n self.port = listen_port", "def wait_for_port(port: int, host: Text = \"127.0.0.1\", timeout: float = 5.0):\n\n start_time = time.perf_counter()\n\n while True:\n try:\n with socket.create_connection((host, port), timeout=timeout):\n break\n except OSError as ex:\n time.sleep(0.01)\n if time.perf_counter() - start_time >= timeout:\n raise TimeoutError(\n \"Waited too long for the port {} on host {} to start accepting \"\n \"connections.\".format(port, host)\n ) from ex", "def port_in_use(port_num):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port_num))\n except OSError:\n return True\n else:\n return False", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")", "def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")", "def startListening(self, port=-1, findFreePort=False):\n res = self.listen(self.__address, port)\n if findFreePort and Preferences.getCooperation(\"TryOtherPorts\"):\n endPort = port + Preferences.getCooperation(\"MaxPortsToTry\")\n while not res and port < endPort:\n port += 1\n res = self.listen(self.__address, port)\n return res, port", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "def test_connection(self):\n self._bind_to_service()", "def wait_for_port(port, host=\"localhost\", interval=30):\n print('Waiting for database connections to be available...')\n good = False\n while not good:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n good = True\n except socket.error:\n pass\n finally:\n sock.close()\n time.sleep(interval)", "def listen(self):\r\n self.theSocket.listen(0)\r\n print('Socket now listening')\r\n \r\n #wait to accept a connection - blocking call\r\n self.conn, self.addr = self.theSocket.accept()\r\n\r\n print('Connected with ' + self.addr[0] + ':' + str(self.addr[1]))\r\n return self.conn", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()", "def setup(self):\n # Bind socket to local host and port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(2)\n \n try:\n self.socket.bind((HOST, PORT))\n except socket.error:\n return False\n\n if self.running:\n # Start listening on socket\n self.socket.listen(2)\n print \"MapServer: Socket now listening.\"\n\n # Wait to accept a connection - blocking call\n try:\n self.connection, address = self.socket.accept()\n print \"MapServer: Socket connected with \" + address[0] + \":\" + str(address[1])\n self.connection.sendall(str(self.MAP_SIZE_PIXELS)+\"\\n\")\n return True\n except socket.error:\n return False", "def open_listener(self):\n\n try:\n self.listener = Listener((self.host, self.port))\n self.startup_success = True\n log.info(\"listening on '%s', %s\", self.host, self.port)\n except:\n self.startup_success = False\n log.exception(\"Could not bind socket '%s', %s\", self.host, self.port)\n\n self.startup.set()\n return self.startup_success", "def bind_ports(self, ip, ports): #{\n if isinstance(ports, int):\n ports = [ports]\n for p in ports:\n try:\n if p==0:\n port = self.socket.bind_to_random_port(\"tcp://%s\" % ip)\n else:\n self.socket.bind(\"tcp://%s:%i\" % (ip, p))\n port = p\n except zmq.ZMQError:\n # bind raises this if the port is not free\n continue\n except zmq.ZMQBindError:\n # bind_to_random_port raises this if no port could be found\n continue\n else:\n break\n else:\n raise zmq.ZMQBindError('Could not find an available port')\n\n url = 'tcp://%s:%i' % (ip, port)\n self.bound.add(url)\n self._ready = True\n\n return port", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def WaitUntilServing(self, timeout=30.0):\n assert self._process, 'server was not started'\n finish_time = time.time() + timeout\n while time.time() < finish_time:\n if self._process.poll() is not None:\n raise Error('server has already exited with return: %r',\n self._process.returncode)\n if self._CanConnect():\n return\n time.sleep(0.2)\n raise Error('server did not start after %f seconds', timeout)", "def check_port_availability(self, hostname, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.logger.debug(\"Attempting to connect to {}:{}\\\n \".format(hostname, port))\n num_retries = 1000\n retry_count = 0\n port_up = False\n while(retry_count < num_retries):\n if(sock.connect_ex((hostname, int(port)))):\n self.logger.debug(\"{} port is up on {}\".format(port, hostname))\n port_up = True\n break\n retry_count += 1\n time.sleep(0.1)\n return port_up", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def check_free_port(host, port, verbose=True):\n sock = socket.socket()\n try:\n sock.bind((host, port))\n sock.close()\n print(\"host {} on port {} is AVAIL\".format(host, port))\n return(True)\n except:\n print(\"host {} on port {} is BUSY\".format(host, port))\n sock.close()\n return(False)", "def server_bind(self):\n # SO_REUSEADDR: reuse the socket in TIME_WAIT state without\n # waiting for its natural timeout to expire\n # Allows local address reuse\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # If no timeout is set then recv() will block forever if\n # the connection is kept alive with no data sent\n # SO_RCVTIMEO: the timeout on receive calls in seconds\n # set using a packed binary string containing two uint32s as\n # (seconds, microseconds)\n if self.ae.network_timeout is not None:\n timeout_seconds = int(self.ae.network_timeout)\n timeout_microsec = int(self.ae.network_timeout % 1 * 1000)\n self.socket.setsockopt(\n socket.SOL_SOCKET,\n socket.SO_RCVTIMEO,\n pack('ll', timeout_seconds, timeout_microsec)\n )\n\n # Bind the socket to an (address, port)\n # If address is '' then the socket is reachable by any\n # address the machine may have, otherwise is visible only on that\n # address\n self.socket.bind(self.server_address)\n self.server_address = self.socket.getsockname()", "def port_is_alive(target, port):\n a_socket = socket(AF_INET, SOCK_STREAM)\n a_socket.settimeout(5)\n\n location = (target, port)\n try:\n result_of_check = a_socket.connect_ex(location)\n except gaierror:\n return False\n a_socket.close()\n\n if result_of_check == 0:\n return True\n else:\n return False", "def listen(self):\n self.socket.listen(6)", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def listeningPort(self):\n listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listening_socket.bind((self.ip, 9090))\n listening_socket.listen(1)\n while self.running == True:\n new_conn_sock, (new_conn_ip, new_conn_port) = listening_socket.accept()\n message = self.parseMessage(new_conn_sock)\n if (message.type == 'NHST'):\n print('Got NHST message from ' + message.origin)\n new_thread = Thread(target=lambda: self.listenToHost(new_conn_sock))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(new_conn_ip, new_conn_sock, new_thread)\n self.connections.append(new_connection)\n host_area = str(self.x_min) + ':' + str(self.x_max)\n #send current host area to the newly connected host\n area_message = Message('AREA', self.ip, host_area)\n new_conn_sock.sendall(area_message.generateByteMessage())\n print('Sent AREA message to ' + new_conn_ip)\n else:\n print('Invalid Message Type received from ' + message.origin)\n new_conn_sock.close()\n return", "def find_unbound_port():\n while True:\n port = random.randint(*PORT_RANGE)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.bind((\"127.0.0.1\", port))\n return port\n except socket.error:\n print(\"randomly generated port %d is bound. Trying again.\" % port)", "def status(self):\n pid = self.pid()\n if pid is None or not pid_exists(pid):\n return False\n\n process = Process(pid)\n try:\n for connection in process.connections():\n if connection.status == 'LISTEN' and \\\n connection.laddr[1] == self.port:\n return True\n except AccessDenied:\n return False\n\n return False", "def test_execute_check_tcp(delay):\n port = port_for.select_random()\n check = check_tcp(port)\n\n assert check() is False\n process = execute(\n [SERVICE, '--delay', str(delay), 'tcp', '--port', str(port)],\n [check_tcp(port)],\n timeout=1 + delay)\n assert check() is True\n assert process.poll() is None # Still running.\n process.kill()", "def assert_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((\"\", port))\n except socket.error:\n raise exceptions.SpotifyError(\n \"Port {} is not available. If you are currently running a server, \" \"please halt it for a min.\".format(port)\n )\n finally:\n s.close()", "def _server_started(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((self.host, self.port))\n s.shutdown(2)\n return True\n except:\n return False", "def start(self, tcp_port, udp_port):\n self.running = True\n\n try:\n self.sock.bind(('', tcp_port))\n self.sock_udp.bind(('', udp_port))\n self.sock.listen(2)\n\n start_new_thread(self.udp_listener, ())\n\n out('Server listen on port '+str(tcp_port)+'/tcp')\n out('Server listen on port '+str(udp_port)+'/udp')\n\n except socket.error, msg:\n self.sock.close()\n self.sock = None\n self.running = False\n print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]\n\n if self.sock is not None:\n gt = None\n while self.running:\n conn, addr = self.sock.accept()\n\n if self.p1 is None:\n self.p1 = conn\n out('Player 1 is connected')\n elif self.p2 is None:\n self.p2 = conn\n out('Player 2 is connected')\n else:\n conn.close()\n out('rps is already running')\n\n if gt is None and self.p1 is not None and self.p2 is not None:\n # If both players are connected, start the game\n\n gt = RPSThread(self.p1, self.p2)\n gt.start()", "def nat_waitforconn_alive():\r\n return NAT_STATE_DATA[\"mux\"] != None and NAT_STATE_DATA[\"mux\"].isAlive()", "def is_port_taken(host, port):\n socket = socketserver.socket\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((host, port))\n s.shutdown(1)\n time.sleep(2)\n return True\n except:\n return False", "def is_port_listening(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n result = s.connect_ex((\"127.0.0.1\", port))\n return result == 0", "def bind(self):\n if self.allow_reuse_address:\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n self.socket.bind(self.server_address)\n self.server_address = self.socket.getsockname()", "def is_port_available(port):\n port = int(port)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n available = sock.connect_ex(('localhost', port))\n sock.close()\n return available", "def socket_port(ip, port):\n socket.setdefaulttimeout(3) \n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = s.connect_ex((ip, port))\n if result == 0:\n print(ip, u':', port, u'port is occupied')\n return False\n return True\n except Exception as error:\n print('error:', error)\n return False", "def get_free_port():\n max_tries = 0\n while max_tries < MITM_MAX_TRIES:\n max_tries += 1\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n except Exception:\n sleep(1)\n else:\n return port\n return None", "def detect_used_ports():\n MAX_PORT = 1025\n DEFAULT_HOST = '127.0.0.1'\n open_ports = []\n socket.setdefaulttimeout(1)\n for port in range(0, MAX_PORT):\n res = port_scan(DEFAULT_HOST, port)\n if res:\n open_ports.append(port)\n # debugging purpose to see if program is running\n if port % 5000 == 0 and port != 0:\n sys.stderr.write('.')\n return open_ports", "def __init__(self, host, port):\n super(TcpThreadedListeningServer, self).__init__()\n\n self.socket = TcpListeningSocket(host, port)\n\n # if there is a problem with closing, enable the timeout\n # self.socket.timeout = 3", "def server_bind(self):\n\t\tif self.allow_reuse_address:\n\t\t\tself.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\tself.socket.bind(self.server_address)", "def new_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n for i in range(12042, 16042):\n try:\n s.bind(('127.0.0.1', i))\n s.close()\n return i\n except socket.error, e:\n pass\n raise Exception('No local port available')", "def initialize_and_run(self, port, host=''):\n port = int(port)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((host, port))\n while True:\n self.sock.listen(5) # TODO: make this configurable\n conn, addr = self.sock.accept()\n raw_command = conn.recv(1024)\n splitted = raw_command.split()\n if splitted:\n command, args = splitted[0], splitted[1:]\n else:\n command, args = b'', b''\n command = command.decode()\n args = [x.decode() for x in args]\n\n try:\n result = self.handler(command, args)\n except Exception:\n logger.info(traceback.format_exc())\n # kill all the child processes\n self.handle_killall()\n result = 'Error occured. Please check log at /tmp/assistant.log.' # noqa\n\n out = '{}\\n'.format(result)\n conn.send(try_encode(out))\n conn.close()", "def open_rtp_port(self):\n self.rtp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.rtp_socket.settimeout(0.005)\n try:\n self.rtp_socket.bind((\"127.0.0.1\", self.rtp_port))\n except:\n QMessageBox.critical(self, 'Unable to Bind', 'Unable to bind PORT=%d' % self.rtp_port,\n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)", "def _reserve_port():\n sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:\n raise RuntimeError(\"Failed to set SO_REUSEPORT.\")\n sock.bind(('', int(SERVICE_PORT)))\n try:\n yield sock.getsockname()[1]\n finally:\n sock.close()", "def __wait_for_master_ssh( self ):\n for _ in itertools.count( ):\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n try:\n s.settimeout( 5 )\n s.connect( ('mesos-master', 22) )\n return\n except socket.error:\n pass\n finally:\n s.close( )", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def wait_for_servers(self, timeout):\n for user, host, port in self.server_addresses:\n if not self.wait_for_server(user, host, port, timeout):\n logging.warn(\"could not start server %s:%s:%s\", user, host, port)\n return False\n return True", "def setup_socket(self):\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.config['HOST_NAME'], self.config['BIND_PORT']))\n self.server_socket.listen(10)", "def _check_port_available(hostname, port):\n for config_file in config_files:\n network_config = networkConfig(config_file)\n for name, host in network_config.hostDict.items():\n if port == host.port:\n return False\n\n return _check_socket_is_free(hostname, port)", "def test_startedOnce(self):\n port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)\n port._listen = self.listen\n port.privilegedStartService()\n self.assertEqual(len(self.ports), 1)\n self.checkPort(self.ports[0])\n port.startService()\n self.assertEqual(len(self.ports), 1)", "def wait_for_connections(sumo_command, shlex, sumo_port, bind_address, do_daemonize, do_kill, pidfile, keep_temp):\n \n if do_kill:\n check_kill_daemon(pidfile) \n \n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listener.bind((bind_address, sumo_port))\n listener.listen(5)\n logging.info(\"Listening on port %d\" % sumo_port)\n\n if do_daemonize:\n logging.info(\"Detaching to run as daemon\")\n daemonize(pidfile)\n\n try:\n while True:\n conn, addr = listener.accept()\n logging.debug(\"Connection from %s on port %d\" % addr)\n thread.start_new_thread(handle_connection, (sumo_command, shlex, conn, addr, keep_temp))\n \n except exceptions.SystemExit:\n logging.warning(\"Killed.\")\n \n except exceptions.KeyboardInterrupt:\n logging.warning(\"Keyboard interrupt.\")\n \n except:\n raise\n \n finally:\n # clean up\n logging.info(\"Shutting down.\")\n listener.close()", "def wait(self, retries=30):\n running = check_server(self.adj.host, self.adj.port,\n '/__application__', retries=retries)\n if running:\n return True\n try:\n self.shutdown()\n finally:\n return False", "def start(self) -> None:\n try:\n self._socket.bind((self.ip, self.port))\n\n except socket.error as e:\n print(e)\n\n else:\n self._socket.listen()\n logger.info('Server is online!')\n\n run = True\n while run:\n conn_data = ConnectionData()\n self._accept_conn(conn_data)\n\n # Makes the server stoppable\n while conn_data.conn is None or conn_data.addr is None:\n try:\n time.sleep(0.1)\n except KeyboardInterrupt:\n run = False\n break\n\n conn, addr = conn_data.conn, conn_data.addr\n logger.info(f'Connection established to {addr}')\n\n if self.func is not None:\n self.func(conn, addr)", "def wait_for_ssh(self):\n self.wait_for_status(16)\n printy(\"The instance is now running ...\")\n # The instance is running, but we give it 60 more seconds for running\n # SSHD\n printy(\"Waiting 60 seconds for SSH server to start ...\")\n time.sleep(60)", "def bind_sockets(port, address=..., family=..., backlog=..., flags=..., reuse_port=...):\n ...", "def start(self):\n retries=0\n while retries<=self.RETRIES and self._running==False:\n try:\n print(\"Trying to start server\")\n self._s.bind((self.address, self._listenToPort))\n #self._s.listen(5) #not required for UDP\n self._running = True\n print(\"Running\")\n\n except Exception as e:\n #self.stop()\n if retries<self.RETRIES:\n print(\"starting server failed, retrying...\",e)\n sleep(1)\n else:\n print(\"Server Failed \",e)\n return False\n retries=retries+1\n\n\n self._listen=True\n self._t1.start() #_listenForDataThread\n return True", "def start(self):\n logging.info(\"! start udp %s:%s\" % (self._cfg.host, self._cfg.port))\n self._sock.bind((self._cfg.host, self._cfg.port))\n self.ready()\n self._status = \"ok\"\n while self._status:\n (txt, addr) = self._sock.recvfrom(64000)\n if not self._status: break\n data = str(txt.rstrip(), \"utf-8\")\n if not data: break\n self.output(data, addr)\n logging.info(\"! stop udp %s:%s\" % (self._cfg.host, self._cfg.port))", "def __init__(self, port: int, backlog: int = 0):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((\"localhost\", port))\n self.socket.listen(backlog)", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def wait_for_simulation():\n MGMT_PORT = 54322\n SIMULATION_END_CMD_RC = 80\n SDP_HEADER_FORMAT = '<HBBBBHHIiii'\n SDP_HEADER_SIZE = struct.calcsize(SDP_HEADER_FORMAT)\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(1) \n sock.bind(('0.0.0.0', MGMT_PORT)) # Bind all addresses on given port\n \n while True:\n try:\n data = sock.recv(1024) \n unpack = struct.unpack(SDP_HEADER_FORMAT, data[:SDP_HEADER_SIZE])\n command = unpack[7] \n if command == SIMULATION_END_CMD_RC: break\n except socket.timeout:\n pass", "def find_free_port(ports_socket, name):\n request_name = \"-\".join((name, str(os.getpid())))\n while True:\n port = test_server_request(ports_socket, request_name, GETPORT)\n if not tcp_listening(port):\n return port\n error(\"port %u is busy, try another\" % port)", "def test_setup_server(self):\r\n\r\n self.server_socket = setup_server()\r\n host, port = self.server_socket.getsockname()\r\n # Not much to test here, if we get something back it should work\r\n self.assertGreater(port, 1023)\r\n # binding to a known port should fail\r\n self.assertRaises(error, setup_server, 80)", "def run_server(port, create):\r\n host = '' # all available network interfaces\r\n # create an internet socket for TCP protocol\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((host, port))\r\n s.listen(True) # listen for incoming connections\r\n print 'listening on port', port\r\n while True:\r\n conn, addr = s.accept() # accept a connection\r\n new_connection = HandleConnection(conn, addr, create)\r\n new_connection.start()\r\n # handle_connection(conn, addr, create)\r\n s.close() # can't get here\r", "def run(self):\n self._socket = _get_socket(self.opts)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n _set_tcp_keepalive(self._socket, self.opts)\n self._socket.setblocking(1)\n self._socket.bind(_get_bind_addr(self.opts, \"ret_port\"))\n self._socket.listen(self.backlog)\n\n while True:\n try:\n # Wait for a connection to occur since the socket is\n # blocking.\n connection, address = self._socket.accept()\n # Wait for a free slot to be available to put\n # the connection into.\n # Sockets are picklable on Windows in Python 3.\n self.socket_queue.put((connection, address), True, None)\n except OSError as e:\n # ECONNABORTED indicates that there was a connection\n # but it was closed while still in the accept queue.\n # (observed on FreeBSD).\n if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:\n continue\n raise", "def serve_ports(ports_socket, start_free_ports, min_free_ports):\n ports_q = collections.deque()\n free_ports = set()\n port_age = {}\n serialno = 0\n\n def get_port():\n while True:\n free_socket = socket.socket()\n free_socket.bind((\"\", 0))\n free_port = free_socket.getsockname()[1]\n free_socket.close()\n if free_port < 1024:\n continue\n if free_port in RESERVED_FOR_TESTS_PORTS:\n continue\n if free_port in free_ports:\n continue\n break\n free_ports.add(free_port)\n port_age[free_port] = time.time()\n return free_port\n\n def queue_free_ports(min_queue_size):\n while len(ports_q) < min_queue_size:\n port = get_port()\n ports_q.append(port)\n port_age[port] = time.time()\n\n queue_free_ports(start_free_ports)\n ports_by_name = collections.defaultdict(set)\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.bind(ports_socket)\n sock.listen(1)\n cold_start = True\n\n while True:\n connection, _ = sock.accept()\n command, name = receive_sock_line(connection).split(\",\")\n response = None\n if command == GETSERIAL:\n serialno += 1\n response = serialno\n elif command == PUTPORTS:\n ports_returned = 0\n for port in ports_by_name[name]:\n ports_returned += 1\n ports_q.append(port)\n port_age[port] = time.time()\n del ports_by_name[name]\n response = ports_returned\n if ports_returned:\n cold_start = False\n elif command == GETPORT:\n while True:\n port = ports_q.popleft()\n if time.time() - port_age[port] > MIN_PORT_AGE or cold_start:\n break\n ports_q.append(port)\n time.sleep(1)\n ports_by_name[name].add(port)\n response = port\n queue_free_ports(min_free_ports)\n elif command == LISTPORTS:\n response = list(ports_by_name[name])\n if response is not None:\n response_str = \"\"\n if isinstance(response, int):\n response = [response]\n response_str = \"\".join([\"%u\\n\" % i for i in response])\n connection.sendall(response_str.encode()) # pylint: disable=no-member\n connection.close()", "def _wait_until_up(self, address, port, timeout):\n\n def check_up(addr, p):\n \"\"\"\n Find out if a port at an address is occupied\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((addr, p))\n sock.close()\n if result == 0:\n ans = True\n else:\n ans = False\n return ans\n\n max_time = time() + timeout\n up = False\n while not up and time() < max_time:\n self.log.debug(\"Checking if Galaxy is up...\")\n up = check_up(address, port)\n\n # If we query Galaxy immediately it may reset the connection:\n sleep(10)\n\n if not up:\n raise Exception('There was no response at {} on port {} for {} seconds'\n .format(address, port, timeout))", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def begin_listening(self):\n print(responses.STATUS_STARTING_TO_LISTEN)\n sockets = [self.english_sc, self.maori_sc, self.german_sc]\n\n try:\n incoming, outgoing, exceptions = select.select(sockets, [], [])\n\n if incoming[0] == self.english_sc:\n self.process_incoming(\n incoming[0], self.english_sc, self.ports['English'])\n return True\n\n elif incoming[0] == self.maori_sc:\n self.process_incoming(\n incoming[0], self.maori_sc, self.ports['Te reo Maori'])\n return True\n\n elif incoming[0] == self.german_sc:\n self.process_incoming(\n incoming[0], self.german_sc, self.ports['German'])\n return True\n\n else:\n print(responses.ERROR_FOREIGN_PORT)\n return False\n\n except:\n print(responses.ERROR_NO_SOCKET)\n return False", "def findFreePort(start=8000, end=1<<16):\n for port in range(start, end+1):\n try:\n sock = socket.socket()\n sock.bind(('', port))\n return port\n finally:\n sock.close()\n raise ValueError('Impossible to find a free port in %s-%s' % (start, end))", "async def listen(self, maddr: Multiaddr) -> bool:", "def tcp_listening_cmd(port, ipv=4, state=\"LISTEN\", terse=True, pid=None):\n return lsof_tcp_listening_cmd(port, ipv, state, terse, pid)", "def tryconnect(name, port):\n return port_talker.TCPTalk(name, port, 2, '', None, 0, 1) # use ext. resolver", "def bind(self, server_name: str, port: int) -> None:\n self.socket.bind((server_name, port))", "def get_free_port() -> int:\n not_free = True\n while not_free:\n port = np.random.randint(7000, 7999)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n res = sock.connect_ex((\"localhost\", port))\n if res != 0:\n not_free = False\n return port", "def is_listening(port):\n return not listening(port)", "def _wait_for_management(self, ip, timeout, port=80):\n validation_url = 'http://{0}:{1}/blueprints'.format(ip, port)\n\n end = time.time() + timeout\n\n while end - time.time() >= 0:\n try:\n status = urllib.urlopen(validation_url).getcode()\n if status == 200:\n return True\n except IOError:\n time.sleep(5)\n\n return False", "def checkAlive(self, timeout = 1500 , port = 3389):\n time_retry = 90\n # ugly c-style loop \n while 1:\n try:\n ip = self.getIp()\n if not ip:\n logging.warning(\"!Failed to obtain ip address\")\n else:\n logging.info(\"Probing \" + str(ip) + \":\" + str(port) + \" for connectivity\")\n sock = socket.create_connection((ip,port) , timeout)\n sock.close()\n logging.info(\"Server \" + str(ip) + \":\" + str(port) + \" successfully responded\")\n return True\n except Exception as e:\n logging.error(\"!: Failed to probe the remote server for a connection!\")\n logging.error(\"!:\" + str(e))\n logging.error(traceback.format_exc())\n timeout = timeout - time_retry\n if timeout > 0:\n logging.info(\"--- Waiting more \" + str(timeout) + \" for it to respond\");\n time.sleep(time_retry)\n else:\n break\n\n return False", "def _startListening(self, port=None):\n if not port:\n port = self.registryValue('port')\n self.listener = reactor.listenTCP(port, self.site)", "def waitForConnection(self):\n logging.debug(\"Establishing connection\")\n if self.tcpsocket is not None:\n result = True\n if not self.isConnected() and not self._hasError:\n # spy = QSignalSpy(self.tcpsocket.connected)\n spy = QSignalSpy(self._stopWaiting)\n result = spy.wait(5000) # Asynchronous wait, Timeout 5 s\n if not result:\n # it is bad if the socket needs longer than the timeout and connects\n # after the connection is deemed dead\n self.close()\n logging.debug(\"Connection not established, manually closing socket.\")\n return result and not self._hasError\n return False", "def port_test(self):\n self._rpc_version_warning(5)\n result = self._request('port-test')\n if 'port-is-open' in result:\n return result['port-is-open']\n return None", "def try_connection_start_stop_writing():\n global start_stop_video_socket\n\n start_stop_video_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n start_stop_video_socket.bind((IP, PORT_START_STOP))\n start_stop_video_socket.listen(1)\n print \"Listening\"\n start_stop_video_socket = start_stop_video_socket.accept()[0]\n\n video_writer_listen()", "def server_bind(self):\n basehttp.WSGIServer.server_bind(self)\n self.socket.settimeout(1)", "def test_open_with_retries(self):\n # Bind port\n port_blocker = InverterFinder()\n port_blocker.open()\n\n # Try binding port using retry function in separate thread\n def try_bind(q: Queue):\n finder = InverterFinder()\n finder.open_with_retries(retries=10, period=0.01)\n finder.close()\n # If bind failed, an exception should've been thrown by now\n # I assume the bind has succeeded here\n q.put(True)\n\n queue = Queue()\n thread = Thread(target=try_bind, args=(queue,))\n thread.start()\n\n # Unbind port\n sleep(0.01)\n port_blocker.close()\n\n # Check if bind succeeded\n thread.join()\n succeeded = queue.get(timeout=1.0)\n self.assertTrue(succeeded)", "async def test_port_is_available(hass):\n next_port = await hass.async_add_executor_job(\n find_next_available_port, DEFAULT_CONFIG_FLOW_PORT\n )\n assert next_port\n\n assert await hass.async_add_executor_job(port_is_available, next_port)", "def make_data_port(self):\n err = None\n sock = None\n for res in socket.getaddrinfo(None, 0, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):\n af, socktype, proto, canonname, sa = res\n try:\n sock = socket.socket(af, socktype, proto)\n sock.bind(sa)\n except OSError as _:\n err = _\n if sock:\n sock.close()\n sock = None\n continue\n break\n if sock is None:\n if err is not None:\n raise err\n else:\n raise OSError(\"getaddrinfo returns an empty list\")\n sock.listen(1)\n port = sock.getsockname()[1]\n host = self.sock.getsockname()[0]\n response = self._send_port_command(host, port)\n return sock, response", "def local_bind_ports(self):\n self._check_is_started()\n return [_server.local_port for _server in self._server_list if\n _server.local_port is not None]", "def get_available_port() -> int:\n with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.bind(('', 0))\n _, port = sock.getsockname()\n return int(port)", "def _find_open_port(worker_ip: str, local_listen_port: int, ports_to_skip: Iterable[int]) -> int:\n max_tries = 1000\n found_port = False\n for i in range(max_tries):\n out_port = local_listen_port + i\n if out_port in ports_to_skip:\n continue\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((worker_ip, out_port))\n found_port = True\n break\n # if unavailable, you'll get OSError: Address already in use\n except OSError:\n continue\n if not found_port:\n msg = \"LightGBM tried %s:%d-%d and could not create a connection. Try setting local_listen_port to a different value.\"\n raise RuntimeError(msg % (worker_ip, local_listen_port, out_port))\n return out_port" ]
[ "0.7288536", "0.70136964", "0.699281", "0.669659", "0.663772", "0.66184366", "0.6514401", "0.6453551", "0.6413905", "0.63941497", "0.63894254", "0.6319579", "0.6308585", "0.6289045", "0.6239562", "0.6228363", "0.6223435", "0.62091696", "0.62067133", "0.6188757", "0.6175939", "0.6159813", "0.61548287", "0.6138284", "0.6120944", "0.6082442", "0.6073268", "0.6066105", "0.60513693", "0.60339737", "0.60218674", "0.60213965", "0.60071975", "0.599776", "0.598107", "0.5960159", "0.5954804", "0.59508395", "0.594757", "0.59170276", "0.5913102", "0.59068257", "0.5875694", "0.5871548", "0.5869457", "0.5867774", "0.5857002", "0.58507645", "0.58498627", "0.5827556", "0.5824093", "0.5822572", "0.58191603", "0.5805281", "0.58035433", "0.58028233", "0.57989043", "0.57985324", "0.5798237", "0.57963127", "0.5791742", "0.57713956", "0.57672465", "0.57613647", "0.5756483", "0.57485247", "0.5746421", "0.5745841", "0.5743874", "0.57276124", "0.5725615", "0.57188773", "0.5708661", "0.57029545", "0.57021815", "0.5699434", "0.56961167", "0.5695592", "0.56902206", "0.5687931", "0.568636", "0.5682193", "0.56796926", "0.5671023", "0.56700826", "0.5668579", "0.56641537", "0.56590855", "0.56583875", "0.5655411", "0.56502205", "0.56464493", "0.5644977", "0.56401426", "0.5636083", "0.5635936", "0.5632604", "0.5631296", "0.56237775", "0.56213427" ]
0.8042169
0
in single server mode the 'upgrade' commander exits before the actual upgrade is finished. Hence we need to look into the logfile of the managing starter if it thinks its finished.
def wait_for_upgrade_done_in_log(self, timeout=120): keep_going = True logging.info('Looking for "Upgrading done" in the log file.\n') while keep_going: text = self.get_log_file() pos = text.find("Upgrading done.") keep_going = pos == -1 if keep_going: time.sleep(1) progress(".") timeout -= 1 if timeout <= 0: raise TimeoutError("upgrade of leader follower not found on time") for instance in self.all_instances: instance.wait_for_shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def upgrade(self):", "def upgrade(self):", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()", "def full_upgrade(self):\n return self.upgrade(\"full-upgrade\")", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "def upgrade_script():\n if postgres.db_exists(env.db):\n with cd(path()):\n sudo('bin/upgrade_{odoo} -d {db} '.format(**env), user=env.account)", "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def test_nothing_to_upgrade(self, mock_click_echo):\n agent_config = self.load_agent_config(self.agent_name)\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Checking if there is a newer remote version of agent package '{agent_config.public_id}'...\"\n )\n mock_click_echo.assert_any_call(\n \"Package not found, continuing with normal upgrade.\"\n )\n mock_click_echo.assert_any_call(\"Everything is already up to date!\")", "def run_migration(env, upgrade_type):\n pass", "def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )", "def _upgradeDB():\n\n # Set current DB name\n currentDbName = basedefs.DB_NAME\n\n # Before db upgrade we want to make a backup of existing db in case we fail\n # The backup is performed on local system, even for remote DB.\n dbBackupFile = tempfile.mkstemp(suffix=\".sql\", dir=basedefs.DIR_DB_BACKUPS)[1]\n logging.debug(\"backing up %s db to file %s\"%(basedefs.DB_NAME, dbBackupFile))\n\n # Run db backup\n utils.backupDB(basedefs.DB_NAME, getDbUser(), dbBackupFile, getDbHostName(), getDbPort())\n\n # Rename DB first. If it fails - stop with \"active connections\" error.\n # if upgrade passes fine, rename the DB back.\n DB_NAME_TEMP = \"%s_%s\" % (basedefs.DB_NAME, utils.getCurrentDateTime())\n utils.renameDB(basedefs.DB_NAME, DB_NAME_TEMP)\n currentDbName = DB_NAME_TEMP\n\n # if we're here, DB was renamed.\n # upgrade script must run from dbscripts dir\n currentDir = os.getcwd()\n os.chdir(basedefs.DIR_DB_SCRIPTS)\n\n try:\n\n logging.debug(\"upgrading db schema\")\n cmd = [\n os.path.join(basedefs.DIR_DB_SCRIPTS, basedefs.FILE_DB_UPGRADE_SCRIPT),\n \"-u\", getDbUser(),\n \"-d\", DB_NAME_TEMP,\n \"-s\", getDbHostName(),\n \"-p\", getDbPort(),\n ]\n\n # Run upgrade.sh script to update existing db\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_UPGRADE_FAILED)\n\n # Log the successful upgrade\n logging.debug('Successfully upgraded %s DB'%(basedefs.DB_NAME))\n controller.MESSAGES.append(\"DB was upgraded to latest version. previous DB backup can be found at %s\"%(dbBackupFile))\n\n # Go back to previous dir\n os.chdir(currentDir)\n\n # Upgrade was successful, so rename the DB back.\n utils.renameDB(DB_NAME_TEMP, basedefs.DB_NAME)\n currentDbName = basedefs.DB_NAME\n\n # Update rpm version in vdc options\n utils.updateVDCOption(\"ProductRPMVersion\", utils.getRpmVersion(basedefs.ENGINE_RPM_NAME))\n except:\n # Upgrade failed! we need to restore the old db\n logging.debug(\"DB upgrade failed, restoring it to a previous state. DB was backed up to %s\", dbBackupFile)\n\n # Delete the original DB.\n # TODO: handle the case of failure - it should not stop the flow, but should write to the log\n sqlQuery=\"DROP DATABASE %s\" % currentDbName\n utils.execRemoteSqlCommand(getDbUser(), \\\n getDbHostName(), \\\n getDbPort(), \\\n basedefs.DB_POSTGRES, \\\n sqlQuery, False, \\\n output_messages.ERR_DB_DROP)\n\n # Restore the DB\n utils.restoreDB(getDbUser(), getDbHostName(), getDbPort(), dbBackupFile)\n\n raise Exception(output_messages.ERR_DB_UPGRADE_FAILED)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def test_serviceRunsMigrations(self):\n m1 = TestMigration(store=self.store)\n m2 = TestMigration(store=self.store)\n self.store.powerUp(m1)\n self.store.powerUp(m2)\n self.assertEquals(m1.ran, 0)\n self.assertEquals(m2.ran, 0)\n self.manager.startService()\n self.assertEquals(m1.ran, 1)\n self.assertEquals(m2.ran, 1)", "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def mep_260(ctx):\n click.secho(\"Start migration v2.6\", fg=\"cyan\")\n click.secho(\"Nothing\", fg=\"cyan\")", "def null_upgrade_step(setup_tool):\n pass", "def update_self(self, command):\n yield from command.reply('Starting full self update...')\n yield from self.git_pull(command)\n yield from self.migrate(command)\n yield from self.sysinfo(command)\n yield from self.restart(command)", "def db_upgrade():\n generate_migration_file()\n dbu_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n for time_step in [_.strip('.sql') for _ in migration_files()]:\n decide = MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': time_step}})\n if not decide:\n MySQLScheme.commit(getattr(dbu_query, f\"upgrade_{time_step}\").sql)\n LOGGER.info(f\"successful migration: {time_step}\")\n else:\n LOGGER.info(f'migration already exists: {time_step}')", "def test_nothing_to_upgrade(self, mock_click_echo):\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Updating AEA version specifier from ==0.1.0 to {compute_specifier_from_version(get_current_aea_version())}.\"\n )\n\n # test 'aea_version' of agent configuration is upgraded\n expected_aea_version_specifier = compute_specifier_from_version(\n get_current_aea_version()\n )\n agent_config = self.load_agent_config(self.current_agent_context)\n assert agent_config.aea_version == expected_aea_version_specifier\n assert agent_config.author == self.author\n assert agent_config.version == DEFAULT_VERSION", "def continue_server():\n update_server_status({'ready': True})", "def IntrumentHook(self):\n #if iserver is not running\n # return fail\n pass", "def run_migration(self):\n step = \"Migrating Database\"\n try:\n self.slacker.send_thread_reply(step)\n self.kuber.run_migration(tag=self.tag, source=config.APP_MIGRATOR_SOURCE)\n self.migration_completed = True\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def default_upgrade_charm():\n reactive.set_state('upgraded')", "def upgradedb(ctx):\n path = Path(__file__).resolve().parent.parent\n conf = Config(str(path / \"migrations\" / \"alembic.ini\"))\n conf.set_main_option(\"script_location\", str(path / \"migrations\"))\n command.upgrade(conf, \"heads\")", "def main():\n updater = VersionUpdater('PowerDNS-Admin')\n updater.run()", "def setup_complete():\n\n async def predicate(ctx:vbu.Context):\n if await fetch_guild_settings(ctx):\n return True\n raise CheckFailure(f'Your server hasn\\'t yet been set up. Use {ctx.prefix}setup')\n return commands.check(predicate)", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def upgrade(ctx):\n tf_cmds = [\n [\"terraform\", \"init\", \"--upgrade\"],\n [\"terraform\", \"refresh\"],\n [\"terraform\", \"apply\", \"-auto-approve\"],\n ]\n\n if ctx.invoked_subcommand is None:\n if click.confirm('Do you want to run upgrade prechecks?'):\n ctx.invoke(precheck)\n else:\n print_warning_msg(f\"Skipping upgrade prechecks\")\n\n click.echo(\n \"Following commands will be run during upgrade\\n%s\" % (\n \"\\n\".join((map(\" \".join, tf_cmds)))\n ),\n )\n for cmd in tf_cmds:\n if click.confirm(\n 'Do you want to continue with %s?' %\n \" \".join(cmd),\n ):\n rc = execute_command(cmd)\n if rc != 0:\n print_error_msg(\"Upgrade Failed!!!\")\n return", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def StatusUpdate(msg):\r\n if verbosity > 0:\r\n print msg", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def run_management_command_or_exit(command_name, *args, required_commit=None, custom_message=None, **kwargs):\n try:\n call_command(command_name, *args, **kwargs)\n except Exception:\n traceback.print_exc()\n print(f\"\"\"\n A migration must be performed before this environment can be upgraded to the latest version\n of CommCareHQ. This migration is run using the management command {command_name}.\n \"\"\")\n if required_commit or custom_message:\n print(\"\")\n print(custom_message or f\"\"\"\n Run the following commands to run the migration and get up to date:\n\n commcare-cloud <env> fab setup_limited_release --set code_branch={required_commit}\n\n commcare-cloud <env> django-manage --release <release created by previous command> migrate_multi\n\n commcare-cloud <env> deploy commcare\n \"\"\")\n sys.exit(1)", "def do_upgrade(self, url):\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0", "def diff_versions_agent_server(self):\n # (1)Setup\n self.log.info(\"==(1)Setup, create pool and container.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server | hosts_client)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n cmd = \"dmg system query\"\n positive_test = True\n negative_test = False\n agent_server_ver = \"2.0 agent to 2.0 server\"\n self.verify_daos_libdaos(\"1.1\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (2)dmg system stop\n self.log.info(\"==(2)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (3)Upgrade 1 server-host to new\n self.log.info(\"==(3)Upgrade 1 server to 2.2.\")\n server = hosts_server[0:1]\n self.upgrade(server, [])\n self.log.info(\"==(3.1)server %s Upgrade to 2.2 completed.\", server)\n\n # (4)Negative test - dmg pool query on mix-version servers\n self.log.info(\"==(4)Negative test - dmg pool query on mix-version servers.\")\n agent_server_ver = \"2.0 agent, mix-version server-hosts\"\n cmd = \"dmg pool list\"\n exp_err = \"unable to contact the DAOS Management Service\"\n self.verify_daos_libdaos(\n \"4.1\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (5)Upgrade rest server-hosts to 2.2\n server = hosts_server[1:len(hosts_server)]\n self.log.info(\"==(5) Upgrade rest server %s to 2.2.\", server)\n self.upgrade(server, [])\n self.log.info(\"==(5.1) server %s Upgrade to 2.2 completed.\", server)\n\n # (6)Restart 2.0 agent\n self.log.info(\"==(6)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (7)Verify 2.0 agent connect to 2.2 server\n self.log.info(\"==(7)Verify 2.0 agent connect to 2.2 server\")\n agent_server_ver = \"2.0 agent to 2.2 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"admin:0.0.0 are not compatible\"\n self.verify_daos_libdaos(\n \"7.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"7.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} --type POSIX --properties 'rf:2'\".format(pool_id)\n self.verify_daos_libdaos(\"7.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\"7.5\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (8)Upgrade agent to 2.2\n self.log.info(\"==(8)Upgrade agent to 2.2, now 2.2 servers 2.2 agent.\")\n self.upgrade([], hosts_client)\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (9)Pool and containers create on 2.2 agent and server\n self.log.info(\"==(9)Create new pools and containers on 2.2 agent to 2.2 server\")\n agent_server_ver = \"2.2 agent to 2.2 server\"\n cmd = \"dmg pool create --size 5G New_pool1\"\n self.verify_daos_libdaos(\"9.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool list\"\n self.verify_daos_libdaos(\"9.2\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C21 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 C22 --type POSIX --properties 'rf:2'\"\n self.verify_daos_libdaos(\"9.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos container list New_pool1\"\n self.verify_daos_libdaos(\"9.5\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"9.6\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos pool autotest --pool New_pool1\"\n self.verify_daos_libdaos(\"9.7\", hosts_client, cmd, positive_test, agent_server_ver)\n\n # (10)Downgrade server to 2.0\n self.log.info(\"==(10)Downgrade server to 2.0, now 2.2 agent to 2.0 server.\")\n self.log.info(\"==(10.1)Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"==(10.2)Downgrade server to 2.0\")\n self.downgrade(hosts_server, [])\n self.log.info(\"==(10.3)Restart 2.0 agent\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n # (11)Verify 2.2 agent to 2.0 server\n agent_server_ver = \"2.2 agent to 2.0 server\"\n cmd = \"daos pool query {0}\".format(pool_id)\n self.verify_daos_libdaos(\"11.1\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"dmg pool query {0}\".format(pool_id)\n exp_err = \"does not match\"\n self.verify_daos_libdaos(\n \"11.2\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n cmd = \"sudo daos_agent dump-attachinfo\"\n self.verify_daos_libdaos(\"11.3\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create {0} 'C_oldP' --type POSIX --properties 'rf:2'\".format(\n pool_id)\n self.verify_daos_libdaos(\"11.4\", hosts_client, cmd, positive_test, agent_server_ver)\n cmd = \"daos cont create New_pool1 'C_newP' --type POSIX --properties 'rf:2'\"\n exp_err = \"DER_NO_SERVICE(-2039)\"\n self.verify_daos_libdaos(\n \"11.5\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n exp_err = \"common ERR\"\n cmd = \"daos pool autotest --pool {0}\".format(pool_id)\n self.verify_daos_libdaos(\n \"11.6\", hosts_client, cmd, negative_test, agent_server_ver, exp_err)\n\n # (12)Downgrade agent to 2.0\n self.log.info(\"==(12)Agent %s Downgrade started.\", hosts_client)\n self.downgrade([], hosts_client)\n self.log.info(\"==Test passed\")", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def StatusUpdate(msg):\n if verbosity > 0:\n print msg", "def this_needs_work_test_hook_upgrade(self):\n self.do_test_hook_install(testee.upgrade_setup, True)", "def test_relaunch_deployment_run(self):\n pass", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def auto_upgrade(self) -> bool:\n return pulumi.get(self, \"auto_upgrade\")", "def at_server_reload(self):\n self.db.started = True", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "async def update(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are updating\n self.add_to_output(f\"Updating...\")\n # create ssh connection to miner\n try:\n conn = await self.get_connection(\"root\", \"admin\")\n # tell the user we are sending the update file\n self.add_to_output(\"Sending upgrade file...\")\n # send the update file\n await self.send_file(UPDATE_FILE_S9, \"/tmp/firmware.tar\")\n # install the update and collect the result\n result = await conn.run(f'sysupgrade /tmp/firmware.tar')\n self.add_to_output(result.stdout.strip())\n # tell the user the update completed\n self.add_to_output(f\"Update completed...\")\n except OSError:\n self.add_to_output(f\"Unknown error...\")", "def install_or_upgrade():\n global action_url\n global cgi_executable_path\n global documentation_path\n global full_python_path\n global private_data_directory\n if get_script_mode() == \"install\":\n output(\"\\nInstalling...\")\n elif get_script_mode == \"upgrade\":\n output(\"\\nUpgrading...\")\n if get_script_mode() == \"install\":\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"mkdir -p \" + private_data_directory)\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"cp -r data \" + private_data_directory)\n os.system(\"chmod 755 \" + private_data_directory + \"/*\")\n os.system(\"chmod 1777 \" + private_data_directory)\n\tsubstitute(private_data_directory + \"/footer.html\", \"ACTION_URL\", \\\n\t action_url)\n elif get_script_mode() == \"upgrade\":\n\tpass\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"mkdir -p \" + cgi_executable_path)\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"cp bin/mobile \" + cgi_executable_path)\n substitute(cgi_executable_path, \"DOCUMENT_ROOT_PATH\", \\\n private_data_directory)\n substitute(cgi_executable_path, \"FULL_ACTION_URL\", action_url)\n substitute(cgi_executable_path, \"FULL_PYTHON_PATH\", full_python_path)\n os.system(\"chmod 0755 \" + cgi_executable_path)\n if get_script_mode() == \"install\":\n substitute(\"doc/README\", \"<the private Mobile Web Proxy data directory>\", \\\n private_data_directory)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"rm -rf \" + documentation_path)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"cd doc; cp -rp * \" + documentation_path)\n if get_script_mode() == \"install\":\n output(\\\n\"\"\"\n\nInstallation is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")\n if get_script_mode() == \"upgrade\":\n output(\\\n\"\"\"\n\nThe upgrade is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README file.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")", "def main(cli_args, debug):\n lp_client = LpReleaseMigrator(debug, cli_args)\n lp_client.process()", "def safe_upgrade():\n goviewbe.upgrade_db(current_app)", "def command(self) -> None:\n plug.echo(f\"Upgrading RepoBee from v{_installed_version()}...\")\n repobee_requirement = f\"repobee{self.version_spec or ''}\"\n\n upgrade = disthelpers.pip(\n \"install\",\n repobee_requirement,\n upgrade=True,\n no_cache=True,\n force_reinstall=True,\n )\n if upgrade.returncode != 0:\n raise plug.PlugError(\"failed to upgrade RepoBee\")\n\n plug.echo(f\"RepoBee succesfully upgraded to v{_installed_version()}!\")", "def check_for_migrations(worker: bool = True):\n # Test if auto-updates are enabled\n if not get_setting('INVENTREE_AUTO_UPDATE', 'auto_update'):\n return\n\n from plugin import registry\n\n plan = get_migration_plan()\n\n # Check if there are any open migrations\n if not plan:\n logger.info('There are no open migrations')\n return\n\n logger.info('There are open migrations')\n\n # Log open migrations\n for migration in plan:\n logger.info(migration[0])\n\n # Set the application to maintenance mode - no access from now on.\n logger.info('Going into maintenance')\n set_maintenance_mode(True)\n logger.info('Mainentance mode is on now')\n\n # Check if we are worker - go kill all other workers then.\n # Only the frontend workers run updates.\n if worker:\n logger.info('Current process is a worker - shutting down cluster')\n\n # Ok now we are ready to go ahead!\n # To be sure we are in maintenance this is wrapped\n with maintenance_mode_on():\n logger.info('Starting migrations')\n print('Starting migrations')\n\n try:\n call_command('migrate', interactive=False)\n except NotSupportedError as e: # pragma: no cover\n if settings.DATABASES['default']['ENGINE'] != 'django.db.backends.sqlite3':\n raise e\n logger.error(f'Error during migrations: {e}')\n\n print('Migrations done')\n logger.info('Ran migrations')\n\n # Make sure we are out of maintenance again\n logger.info('Checking InvenTree left maintenance mode')\n if get_maintenance_mode():\n\n logger.warning('Mainentance was still on - releasing now')\n set_maintenance_mode(False)\n logger.info('Released out of maintenance')\n\n # We should be current now - triggering full reload to make sure all models\n # are loaded fully in their new state.\n registry.reload_plugins(full_reload=True, force_reload=True)", "def fix_replication_from_slow_query_log_after_upgrade():\n run_mysql_command(\"STOP SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'OFF';\")\n run_mysql_command(\"START SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'ON';\")\n run_mysql_command(\"show slave status\\G;\")", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def startRunHook(self):\n #Verify iserver is running\n if not self.msw.isServerRunning():\n raise EnvironmentError,'Server is down'\n\n #start sdebug 3\n self.localShell.runCommand('localShelljava GetiServerConfig mymsw LoggingConfig')", "def __init__(self, store):\n super(UpgradeHelperProcess, self).__init__()\n self.store = store\n self.store.setMigrating(True)", "def auto_upgrade_v1(cfg):\n v1 = V1Status(cfg)\n if v1.installed:\n # On first auto-upgrade pickley (ran in background by wrapper)\n setup_audit_log(cfg)\n inform(\"Auto-upgrading %s packages with pickley v2\" % len(v1.installed))\n for prev in v1.installed:\n pspec = PackageSpec(cfg, prev.name)\n try:\n manifest = perform_install(pspec, is_upgrade=False, quiet=False)\n if manifest and manifest.entrypoints and prev.entrypoints:\n for old_ep in prev.entrypoints:\n if old_ep not in manifest.entrypoints:\n runez.delete(os.path.join(cfg.base.path, old_ep))\n\n except BaseException:\n inform(\"%s could not be upgraded, please reinstall it\" % runez.red(prev.name))\n if prev.entrypoints:\n for old_ep in prev.entrypoints:\n runez.delete(os.path.join(cfg.base.path, old_ep))\n\n inform(\"----\")\n\n v1.clean_old_files()\n inform(\"Done\")", "def pre_upgrade(self, upgrade_specs):\n pass", "def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, Path(self.current_agent_context)),\n )\n assert OefSearchMessage.protocol_id in agent_config.protocols\n assert SOEF_PUBLIC_ID in agent_config.connections\n assert OEF_PUBLIC_ID in agent_config.connections", "def __on_backup_created(self, logger, *args):", "def upgrade(self):\n # replace '\\' with '/' (For NT system compatability)\n filesplit = upgrade_file.replace('\\\\', '/')\n filesplit = filesplit.split('/')\n shortfilename = filesplit[-1]\n\n #define the JSON data for the multipart\n upgradejsondata = {media: {\"image\": partition, \"image-file\": shortfilename, \"reboot-after-upgrade\": 0}}\n url = self.base_url + 'upgrade/hd'\n\n #define the headers that have your auth token\n headers = {'Authorization': \"A10 \" + self.token}\n try:\n print(self.device + ' Performing upgrade, this may take a few minutes depending on your connection, please wait...')\n response = requests.post(url, headers=headers, files={'file': (shortfilename, open(upgrade_file, 'rb'), 'application/octet-stream'), 'json': (None, json.dumps(upgradejsondata), 'application/json'),}, verify=False)\n if response.status_code == 204:\n print(self.device + ' The device successfully upgraded')\n except Exception as e:\n print(' ERROR: Upgrade failed on ' + self.device + ' - ' + str(e))\n return 'FAIL'", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current_migration_version < latest_migration_version:\n next_migration_version = current_migration_version + 1\n logger.info(\n f\"Migrating the database from v{current_migration_version} to v{next_migration_version}...\",\n )\n\n migration = importlib.import_module(f\".migrations.{str(next_migration_version).rjust(3, '0')}\", \"middleman\")\n # noinspection PyUnresolvedReferences\n migration.migrate(self)\n\n # Update the stored migration version\n self._execute(\"UPDATE migration_version SET version = ?\", (next_migration_version,))\n\n logger.info(f\"Database migrated to v{next_migration_version}\")\n current_migration_version += 1", "def test_redeploy(self):\n pass", "def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")", "def check_first_write(plugin, data_version):\n backend = plugin.backend\n\n logging.info(\"Comparing backup version {} versus first write version {}\".format(\n backend.version, data_version\n ))\n\n if backend.version == data_version - 1:\n logging.info(\"Versions match up\")\n return True\n\n elif backend.prev_version == data_version - 1 and plugin.backend.rewind():\n logging.info(\"Last changes not applied, rewinding non-committed transaction\")\n return True\n\n elif backend.prev_version > data_version - 1:\n kill(\"Core-Lightning seems to have lost some state (failed restore?). Emergency shutdown.\")\n\n else:\n kill(\"Backup is out of date, we cannot continue safely. Emergency shutdown.\")", "def test_retest_deployment_run(self):\n pass", "def data_upgrades():\n pass", "def data_upgrades():\n pass", "def test_first_run(dbbackup, plugin, update, version_file=None, orig_version=None):\n\n if version_file:\n os.unlink(version_file)\n\n cli.initialize()\n update.assert_called_once()\n dbbackup.assert_not_called()\n\n # Check that it got called for each default plugin\n from kolibri.core.settings import DEFAULT_PLUGINS\n\n assert plugin.call_count == len(DEFAULT_PLUGINS)", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def test_normalUpgrade(self):\n\n self.setUpInitialStates()\n\n config.DocumentRoot = self.olddocroot\n config.DataRoot = self.newdataroot\n\n # Check pre-conditions\n self.assertTrue(os.path.exists(os.path.join(config.DocumentRoot, \"principals\")))\n self.assertTrue(os.path.isdir(os.path.join(config.DocumentRoot, \"principals\")))\n self.assertTrue(os.path.exists(os.path.join(config.DocumentRoot, \"principals\", OLDPROXYFILE)))\n self.assertFalse(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))\n\n (yield self.doUpgrade(config))\n\n # Check post-conditions\n self.assertFalse(os.path.exists(os.path.join(config.DocumentRoot, \"principals\",)))\n self.assertTrue(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))", "def test_upgrade(longhorn_upgrade_type,\n upgrade_longhorn_repo_url,\n upgrade_longhorn_repo_branch,\n upgrade_longhorn_manager_image,\n upgrade_longhorn_engine_image,\n upgrade_longhorn_instance_manager_image,\n upgrade_longhorn_share_manager_image,\n upgrade_longhorn_backing_image_manager_image,\n client, core_api, volume_name, csi_pv, # NOQA\n pvc, pod_make, statefulset, storage_class): # NOQA\n longhorn_repo_url = upgrade_longhorn_repo_url\n longhorn_repo_branch = upgrade_longhorn_repo_branch\n longhorn_manager_image = upgrade_longhorn_manager_image\n longhorn_engine_image = upgrade_longhorn_engine_image\n longhorn_instance_manager_image = upgrade_longhorn_instance_manager_image\n longhorn_share_manager_image = upgrade_longhorn_share_manager_image\n longhorn_backing_image_manager_image = \\\n upgrade_longhorn_backing_image_manager_image\n\n host_id = get_self_host_id()\n pod_data_path = \"/data/test\"\n\n # Disable Auto Salvage Setting\n update_setting(client, SETTING_AUTO_SALVAGE, \"false\")\n\n # 2-1 Create vol_revision_enabled with revision counter enabled\n # attached to a node\n update_setting(client, SETTING_DISABLE_REVISION_COUNTER, \"false\")\n vol_revision_enabled_name = 'vol-revision-enabled'\n vol_revision_enabled, vol_revision_enabled_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_revision_enabled_name)\n\n # 2-2 Create vol_revision_disabled with revision counter disable\n # attached to a node\n update_setting(client, SETTING_DISABLE_REVISION_COUNTER, \"true\")\n vol_revision_disabled_name = 'vol-revision-disabled'\n vol_revision_disabled, vol_revision_disabled_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_revision_disabled_name)\n\n # 2-3 Create vol_rebuild for replica rebuilding after system upgrade\n # & engine live upgrade\n vol_rebuild_name = 'vol-rebuild'\n vol_rebuild, vol_rebuild_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_rebuild_name)\n\n # Create Volume used by Pod\n pod_volume_name = 'lh-vol-pod-test'\n pod_name, pv_name, pvc_name, pod_md5sum = \\\n prepare_pod_with_data_in_mb(client, core_api, csi_pv, pvc,\n pod_make, pod_volume_name,\n data_path=pod_data_path,\n add_liveness_probe=False)\n\n # Create multiple volumes used by StatefulSet\n statefulset_name = 'statefulset-upgrade-test'\n update_statefulset_manifests(statefulset,\n storage_class,\n statefulset_name)\n create_storage_class(storage_class)\n create_and_wait_statefulset(statefulset)\n statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset)\n\n for sspod_info in statefulset_pod_info:\n sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api,\n sspod_info['pod_name'],\n sspod_info['data'])\n # upgrade Longhorn manager\n assert longhorn_upgrade(longhorn_repo_url,\n longhorn_repo_branch,\n longhorn_manager_image,\n longhorn_engine_image,\n longhorn_instance_manager_image,\n longhorn_share_manager_image,\n longhorn_backing_image_manager_image)\n\n client = get_longhorn_api_client()\n\n # wait for 1 minute before checking pod restarts\n time.sleep(60)\n\n # Check Pod and StatefulSet didn't restart after upgrade\n pod = core_api.read_namespaced_pod(name=pod_name,\n namespace='default')\n assert pod.status.container_statuses[0].restart_count == 0\n\n for sspod_info in statefulset_pod_info:\n sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'],\n namespace='default')\n assert \\\n sspod.status.container_statuses[0].restart_count == 0\n\n # Check all volumes data after system upgrade\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_before_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_before_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_before_sys_upgrade)\n\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)\n assert res_pod_md5sum == pod_md5sum\n\n # Write data to all volumes after system upgrade\n for sspod_info in statefulset_pod_info:\n sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api,\n sspod_info['pod_name'],\n sspod_info['data'])\n\n vol_revision_enabled_data_after_sys_upgrade = \\\n write_volume_random_data(vol_revision_enabled)\n vol_revision_disabled_data_after_sys_upgrade = \\\n write_volume_random_data(vol_revision_disabled)\n vol_rebuild_data_after_sys_upgrade = \\\n write_volume_random_data(vol_rebuild)\n\n # Check data written to all volumes\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_after_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_after_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_after_sys_upgrade)\n\n # Detach the vol_revision_enabled & vol_revision_disabled,\n # and Delete Pod, and StatefulSet to detach theirvolumes\n\n statefulset['spec']['replicas'] = replicas = 0\n apps_api = get_apps_api_client()\n\n apps_api.patch_namespaced_stateful_set(\n name=statefulset_name,\n namespace='default',\n body={\n 'spec': {\n 'replicas': replicas\n }\n })\n\n delete_and_wait_pod(core_api, pod_name)\n\n # Upgrade all volumes engine images\n volumes = client.list_volume()\n for v in volumes:\n if v.name != vol_rebuild_name:\n volume = client.by_id_volume(v.name)\n volume.detach(hostId=\"\")\n wait_for_volume_detached(client, v.name)\n\n engineimages = client.list_engine_image()\n for ei in engineimages:\n if ei.image == longhorn_engine_image:\n new_ei = ei\n\n for v in volumes:\n volume = client.by_id_volume(v.name)\n volume.engineUpgrade(image=new_ei.image)\n\n # Recreate Pod, and StatefulSet\n statefulset['spec']['replicas'] = replicas = 2\n apps_api = get_apps_api_client()\n\n apps_api.patch_namespaced_stateful_set(\n name=statefulset_name,\n namespace='default',\n body={\n 'spec': {\n 'replicas': replicas\n }\n })\n\n wait_statefulset(statefulset)\n\n pod = pod_make(name=pod_name)\n pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]\n create_and_wait_pod(core_api, pod)\n\n # Attach the volume\n for v in volumes:\n if v.name == vol_revision_enabled_name or \\\n v.name == vol_revision_disabled_name:\n volume = client.by_id_volume(v.name)\n volume.attach(hostId=host_id)\n wait_for_volume_healthy(client, v.name)\n\n # Verify volume's engine image has been upgraded\n for v in volumes:\n volume = client.by_id_volume(v.name)\n engine = get_volume_engine(volume)\n assert engine.engineImage == new_ei.image\n assert engine.currentImage == new_ei.image\n\n # Check All volumes data\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)\n assert res_pod_md5sum == pod_md5sum\n\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_after_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_after_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_after_sys_upgrade)\n\n # Delete one healthy replica for vol_rebuild to trigger the rebuilding\n delete_replica_on_test_node(client, vol_rebuild_name)\n # Make sure vol_rebuild replica is deleted\n replica_count = 2\n vol_rebuild = wait_for_volume_replica_count(client, vol_rebuild_name,\n replica_count)\n # vol_rebuild will become degraded and start replica rebuilding\n # Wait for replica rebuilding to complete\n # Verify the vol_rebuild is still healthy\n vol_rebuild = wait_for_volume_degraded(client, vol_rebuild_name)\n assert vol_rebuild.robustness == \"degraded\"\n vol_rebuild = wait_for_volume_healthy(client, vol_rebuild_name)\n assert vol_rebuild.robustness == \"healthy\"\n assert len(vol_rebuild.replicas) == 3", "def migration():", "def level_upgrade(self, lvl):\n\t\tpass", "def run_upgrade(args):\n upgrader = Upgrade(\n args.src,\n args.dst,\n PuppetUpgrader(args.src),\n disable_rollback=args.disable_rollback)\n\n upgrader.run()", "def upgrade_cmd(jail, release):\n lgr = ioc_logger.Logger('ioc_cli_upgrade')\n lgr = lgr.getLogger()\n\n jails, paths = IOCList(\"uuid\").list_datasets()\n _jail = {tag: uuid for (tag, uuid) in jails.items() if\n uuid.startswith(jail) or tag == jail}\n\n if len(_jail) == 1:\n tag, uuid = next(iter(_jail.items()))\n path = paths[tag]\n root_path = \"{}/root\".format(path)\n elif len(_jail) > 1:\n lgr.error(\"Multiple jails found for\"\n \" {}:\".format(jail))\n for t, u in sorted(_jail.items()):\n lgr.critical(\" {} ({})\".format(u, t))\n exit(1)\n else:\n lgr.critical(\"{} not found!\".format(jail))\n exit(1)\n\n pool = IOCJson().json_get_value(\"pool\")\n iocroot = IOCJson(pool).json_get_value(\"iocroot\")\n freebsd_version = checkoutput([\"freebsd-version\"])\n status, jid = IOCList.list_get_jid(uuid)\n conf = IOCJson(path).json_load()\n host_release = os.uname()[2]\n jail_release = conf[\"release\"]\n started = False\n\n if conf[\"release\"] == \"EMPTY\":\n lgr.critical(\"Upgrading is not supported for empty jails.\")\n exit(1)\n\n if conf[\"type\"] == \"jail\":\n if not status:\n IOCStart(uuid, tag, path, conf, silent=True)\n status, jid = IOCList.list_get_jid(uuid)\n started = True\n elif conf[\"type\"] == \"basejail\":\n lgr.critical(\"Please run \\\"iocage migrate\\\" before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n elif conf[\"type\"] == \"template\":\n lgr.critical(\"Please convert back to a jail before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n else:\n lgr.critical(\"{} is not a supported jail type.\".format(conf[\"type\"]))\n exit(1)\n\n _freebsd_version = \"{}/releases/{}/root/bin/freebsd-version\".format(\n iocroot, release)\n\n if \"HBSD\" in freebsd_version:\n Popen([\"hbsd-upgrade\", \"-j\", jid]).communicate()\n else:\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(root_path)):\n # 10.3-RELEASE and under lack this flag\n if float(host_release.partition(\"-\")[0][:5]) <= 10.3:\n lgr.critical(\"Host: {} is too old, please upgrade to \"\n \"10.3-RELEASE or above\".format(host_release))\n exit(1)\n\n os.environ[\"PAGER\"] = \"/bin/cat\"\n fetch = Popen([\"freebsd-update\", \"-b\", root_path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(root_path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(root_path),\n \"--currently-running {}\".format(jail_release), \"-r\",\n release, \"upgrade\"], stdin=PIPE)\n fetch.communicate(b\"y\")\n\n while not __upgrade_install__(root_path, release):\n pass\n\n if release[:4].endswith(\"-\"):\n # 9.3-RELEASE and under don't actually have this binary.\n new_release = release\n else:\n with open(_freebsd_version, \"r\") as r:\n for line in r:\n if line.startswith(\"USERLAND_VERSION\"):\n new_release = line.rstrip().partition(\"=\")[\n 2].strip(\n '\"')\n\n IOCJson(path, silent=True).json_set_value(\"release={}\".format(\n new_release))\n\n if started:\n IOCStop(uuid, tag, path, conf, silent=True)\n\n lgr.info(\"\\n{} ({}) successfully upgraded from {} to {}!\".format(\n uuid, tag, jail_release, new_release))", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_upgrade_apply_all_fine(setup, platform, skuba):\n\n setup_kubernetes_version(skuba)\n\n # node upgrade apply\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\n \"Node my-master-0 is up to date\"\n ) != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\n \"Node my-worker-0 is up to date\"\n ) != -1", "def final_run_hook(self, instance, status, private_data_dir):\n instance.log_lifecycle(\"finalize_run\")\n artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))\n collections_info = os.path.join(artifact_dir, 'collections.json')\n ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')\n\n if os.path.exists(collections_info):\n with open(collections_info) as ee_json_info:\n ee_collections_info = json.loads(ee_json_info.read())\n instance.installed_collections = ee_collections_info\n instance.save(update_fields=['installed_collections'])\n if os.path.exists(ansible_version_file):\n with open(ansible_version_file) as ee_ansible_info:\n ansible_version_info = ee_ansible_info.readline()\n instance.ansible_version = ansible_version_info\n instance.save(update_fields=['ansible_version'])", "def wait_for_upgrade(self, timeout=60):\n ret = None\n try:\n ret = self.upgradeprocess.wait(timeout=timeout)\n except psutil.TimeoutExpired as timeout_ex:\n msg = \"StarterManager: Upgrade command [%s] didn't finish in time: %d\" % (\n str(self.basedir),\n timeout,\n )\n raise TimeoutError(msg) from timeout_ex\n logging.info(\n \"StarterManager: Upgrade command [%s] exited: %s\",\n str(self.basedir),\n str(ret),\n )\n if ret != 0:\n raise Exception(\"Upgrade process exited with non-zero reply\")", "def environment_needs_upgrade(self, db):\n\n return False", "def do_migrate(self, arg): \n arg = str(arg).split(' ')\n arg = [i for i in arg if i != '']\n\n if len(arg) >= 2:\n if arg[0] == 'server':\n if len(arg) == 3:\n migrateServer(arg[0], arg[1], arg[2], arg[3], 'None')\n elif len(arg) == 2:\n migrateServer(arg[0], 'None', 'None', arg[1] , 'None')\n else:\n print \" Please enter \\'help migrate\\' for valid commands!\" \n \n elif arg[0] == 'client':\n if '@' in arg[1]:\n if len(arg) == 5:\n migrateServer(arg[0], arg[1], arg[2], arg[3], arg[4])\n elif len(arg) == 4:\n migrateServer(arg[0], arg[1], 'None', arg[2], arg[3]) \n else:\n print \" Please enter \\'help migrate\\' for valid commands!\" \n else:\n print \" Please enter \\'help migrate\\' for valid commands!\" \n else:\n print \" Please enter \\'help migrate\\' for valid commands!\"\n else:\n print \" Please enter \\'help migrate\\' for valid commands!\"", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def _do_upgrade(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def _do_upgrade(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def handle_updates(self, update):\r\n self.__manage_pump()", "def test_update_deployment(self):\n pass", "def handle_adminbootthreaded(bot, ievent):\n ievent.untildone = True\n ievent.reply(\"reloading all plugins\")\n if 'saveperms' in ievent.rest: boot(force=True, saveperms=True, clear=True)\n else: boot(force=True, saveperms=False, clear=True)\n ievent.done()", "def setUp(self):\n self.wes_server_process = subprocess.Popen('python {} --backend=wes_service.toil_wes --opt=\"extra=--logLevel=CRITICAL\"'\n ''.format(os.path.abspath('wes_service/wes_service_main.py')),\n shell=True)\n time.sleep(5)", "def test_new_upgrade_already_notified(\n mocker, state, slack, ouw_oc_map, ouw_ocm_map, upgrade_config, dt\n):\n state.exists.return_value = True\n state.get.return_value = None\n dt.utcnow.return_value = upgrade_at + timedelta(hours=1)\n gso = mocker.patch(\n \"reconcile.openshift_upgrade_watcher._get_start_osd\", autospec=True\n )\n gso.return_value = upgrade_at.strftime(\"%Y-%m-%dT%H:%M:%SZ\"), upgrade_version\n ouw.notify_upgrades_start(\n ocm_map=ouw_ocm_map,\n oc_map=ouw_oc_map,\n clusters=[load_cluster(\"cluster1.yml\")],\n state=state,\n slack=slack,\n )\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0", "def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return", "async def wrap_up(self):\n if Config().is_central_server():\n await super().wrap_up()", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)" ]
[ "0.69765425", "0.66025215", "0.64390016", "0.5890137", "0.5890137", "0.5887266", "0.5863103", "0.5858503", "0.57888055", "0.5738239", "0.572192", "0.5695196", "0.5680773", "0.5659436", "0.5635215", "0.5601957", "0.5586011", "0.5567595", "0.5544726", "0.5493787", "0.5493098", "0.5425805", "0.5408277", "0.5391213", "0.5383084", "0.5370809", "0.53705597", "0.5367161", "0.5344926", "0.53347164", "0.53327364", "0.5332555", "0.53313905", "0.5317456", "0.53079975", "0.52975595", "0.5285857", "0.5283144", "0.5276441", "0.5266051", "0.5246471", "0.5243849", "0.52390134", "0.52254647", "0.52204674", "0.52188873", "0.5205639", "0.5196437", "0.5181447", "0.5179966", "0.517186", "0.5159829", "0.51536983", "0.5148614", "0.5143196", "0.5138235", "0.5133367", "0.51238394", "0.512145", "0.5120333", "0.5115836", "0.5109448", "0.5100977", "0.509514", "0.50939244", "0.5091456", "0.50866294", "0.50851417", "0.50828713", "0.5081328", "0.507677", "0.5075652", "0.5060159", "0.5060159", "0.5055305", "0.50545436", "0.5048769", "0.50387716", "0.50381917", "0.50373185", "0.5032234", "0.5028001", "0.5024932", "0.5024932", "0.5023454", "0.5011921", "0.50088", "0.5008498", "0.50080484", "0.50070393", "0.49969673", "0.49969673", "0.49928123", "0.4991649", "0.498605", "0.49849698", "0.49770287", "0.49719474", "0.49706733", "0.49634215" ]
0.53627825
28
check whether all spawned arangods are fully bootet
def is_instance_up(self): logging.debug("checking if starter instance booted: " + str(self.basedir)) if not self.instance.is_running(): message = "Starter Instance {0.name} is gone!".format(self) logging.error(message) raise Exception(message) # if the logfile contains up and running we are fine lfs = self.get_log_file() regx = re.compile(r"(\w*) up and running ") for line in lfs.splitlines(): match = regx.search(line) if match: groups = match.groups() if len(groups) == 1 and groups[0] == "agent": continue return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n for r in self.robots:\n r.joints = None\n while r.joints is None and not rospy.is_shutdown():\n try:\n r.joints = rospy.wait_for_message(\n r.ns + '/joint_states', JointState, timeout=3.0)\n except:\n rospy.logerr(\"Current /joint_states not ready yet.\\n\\\n Do you spawn the robot and launch ros_control?\")\n try:\n r.model_index = rospy.wait_for_message('/gazebo/model_states', ModelStates, 3).name.index(r.ns[1:])\n except rospy.exceptions.ROSException:\n rospy.logerr(\"Robot model does not exist.\")\n\n # rospy.logdebug(\"ALL SYSTEMS READY\")\n return True", "def allready(antReady) :\n return numNotready(antReady) == 0", "def _check_all_systems_ready(self):\n \n self._check_all_sensors_ready()\n #self._check_joint_states_ready()\n self._check_cmd_vel_pub()\n \n return True", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def _check_all_sensors_ready(self):\n \n self._check_dist_ready()\n self._check_angle_ready()\n self._check_odom_ready()\n self._check_distsb_ready()\n self._check_anglesb_ready()\n \n return True", "def check_all_systems_ready(self):\n joint_states_msg = None\n while joint_states_msg is None and not rospy.is_shutdown():\n try:\n joint_states_msg = rospy.wait_for_message(\"/joint_states\", JointState, timeout=0.1)\n self.joints_state = joint_states_msg\n rospy.logdebug(\"Current joint_states READY\")\n except Exception as e:\n self._ctrl_conn.start_controllers(controllers_on=\"joint_state_controller\") \n rospy.logdebug(\"Current joint_states not ready yet, retrying==>\"+str(e))\n\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0", "def _check_awaiting(self):\r\n # TODO: check for wait loops\r\n for w in list(self._awaiting.values()):\r\n self._try_register_platform(w[\"instance\"], w[\"kind\"], w[\"parent\"], w[\"wait\"], awaiting=True)", "def is_bootable(self):\n return self.bootable_flag == 0x80", "def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()", "def all_enter(self):\n return self.num_enters == self.num_workers", "def needs_bootstrap(self):\n return (\n not self.bootstrapped\n or (\n datetime.utcnow() - self.last_bootstrapped > timedelta(seconds=self.bootstrap_interval)\n and self.run_migrations\n )\n )", "def is_ready() -> bool:\n return True", "def pilotIsBootValid (self):\n return self.isBootValid()", "def all_is_running(self):\r\n return all(p.running for p in self._platforms.values())", "def all_pods_are_ready(self, app_name):\n app_pod_exists = False\n for pod in self.list_pods(namespace=self.project):\n if app_name in pod.name and 'build' not in pod.name and 'deploy' not in pod.name:\n app_pod_exists = True\n if not pod.is_ready():\n return False\n if app_pod_exists:\n logger.info(\"All pods are ready!\")\n return True\n\n return False", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def _on_thats_all(self) -> None:\n self._should_start_ringing_rounds = True", "def check_full(self):\n switch = False\n for battery in self.batteries.values():\n if battery.full() is True:\n switch = True\n\n return switch", "def is_full(self):\n core_full = self.drone.complete() and self.subject.complete()\n if self.peds is None:\n return core_full\n else:\n return core_full and all([p.complete() for p in self.peds.values()])", "def check_ready(self):\r\n print \"Checking ready\"\r\n\t\tif self.game.trough.is_full():\r\n print \"Ready\"\r\n\t\t\tself.ready()\r\n\t\t\treturn True\r\n\t\tprint \"Not Ready\"\r\n\t\treturn False", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def is_done(self):\n\n # Robosuite envs always rollout to fixed horizon.\n return False", "def at_least_one_alive(self, containers):\n for container in self.get_standard_containers(containers):\n # Update container variables so that status is accurate.\n container.container.reload()\n if container.container.status != 'exited':\n return True\n return False", "def check_services_ready(self, services):\n for ser in services:\n services[ser] = False\n response = self.bus.wait_for_response(Message(\n 'mycroft.{}.is_ready'.format(ser)))\n if response and response.data['status']:\n services[ser] = True\n return all([services[ser] for ser in services])", "def wait_for_everyone():\n PartialState().wait_for_everyone()", "def is_ringing(self) -> bool:", "def is_full(self):\n return len(self.walls) == 4", "async def should_handle(self):\n local_controller = self.controller\n cavern = local_controller.caverns\n if local_controller.hives and not cavern:\n return False\n if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):\n return False\n if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):\n return False\n if cavern.ready:\n return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)\n return not local_controller.floating_buildings_bm", "def is_ready(self) -> bool:\n pass", "def exitPreMolecule(self):\n if self.molecules == 0 and max(self.departed.values()) > 0:\n print(\"these atoms exited before first molecule created: \", end=\"\")\n print([atom.name for atom,count in self.departed.items() if count>0])\n return False\n return True", "def is_ready(cls):\n\n return False", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def is_ready(self):\n if not self.is_accessible:\n return False\n\n is_ready_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no stat /var/lib/cloud/instance/boot-finished > /dev/null'\n rc = subprocess.call(is_ready_cmd.format(ip=self._ip), shell=True)\n\n logger.info(\"Checking if {} is ready\".format(self._ip))\n if rc != 0:\n return False\n\n return True", "def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True", "def is_ready(self):\n return self.parent_depencencies_left == 0", "def _is_valid(self):\n if len(self.slots) == 0:\n print(\"Parking Lot not created\")\n return False\n return True", "def isstarted():", "def _wait_initialized(client, instance_id_list):\n logging.info('Waiting for instances to be initialized.')\n while True:\n res = client.describe_instance_status(InstanceIds=instance_id_list)\n if len(res['InstanceStatuses']) == 0:\n time.sleep(10)\n continue\n if all([ s['InstanceStatus']['Status'] == 'ok' for s in res['InstanceStatuses'] ]):\n logging.info('Instances are initialized now.')\n return\n time.sleep(10)", "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")", "def all_healthy(self):\n for attempt_number in range(360):\n\n try:\n\n if all([\n self.rabbitmq_resource_healthy(),\n self.galera_resource_healthy(),\n self.redis_resource_healthy(),\n self.vips_resource_healthy(),\n self.ha_proxy_cinder_healthy(),\n self.ovn_resource_healthy()\n ]):\n LOG.info(\"pcs status checks: all resources are\"\n \" in healthy state\")\n return True\n else:\n\n LOG.info(\"pcs status check: not all resources are \"\n \"in healthy \"\n \"state\")\n raise PcsResourceException()\n except PcsResourceException:\n # reread pcs status\n LOG.info('Retrying pacemaker resource checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.pcs_df = get_pcs_resources_table()\n # exhausted all retries\n tobiko.fail('pcs cluster is not in a healthy state')", "def pilotValidateBoot (self):\n return self.validateBoot()", "def race_condition():\n if len(allocated_pids) != len(set(allocated_pids)):\n return True\n else:\n return False", "def is_ready(self):\n for dependency in self.dependencies:\n if not dependency.is_finished:\n return False\n # If all dependencies are finished we're ready.\n return True", "def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def is_booted(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def other_threads_are_active():\n return len(fake_threads) >= 2", "async def _async_has_devices(opp: OpenPeerPower) -> bool:\n # TODO Check if there are any devices that can be discovered in the network.\n devices = await opp.async_add_executor_job(my_pypi_dependency.discover)\n return len(devices) > 0", "def check_apps_ready(self):\n if not self.apps_ready:\n raise RuntimeError(\"Apps aren't loaded yet.\")", "def is_full(self):\r\n return self.num_checkers == self.width * self.height", "def is_ready(self) -> bool:\n is_ready = True\n if self.head_pod is not None:\n is_ready = self.head_pod.is_ready.is_set()\n if is_ready:\n for shard_id in self.shards:\n is_ready = self.shards[shard_id].is_ready\n if is_ready and self.uses_before_pod is not None:\n is_ready = self.uses_before_pod.is_ready.is_set()\n if is_ready and self.uses_after_pod is not None:\n is_ready = self.uses_after_pod.is_ready.is_set()\n if is_ready and self.gateway_pod is not None:\n is_ready = self.gateway_pod.is_ready.is_set()\n return is_ready", "def is_ready_to_reap(self):\n self.calc_progress()\n return self._num_results > 0 and (\n self._num_results == self.num_sown_batches\n )", "async def check(self):\n if await self.is_water_level_critical():\n _LOGGER.debug(\"Water level critical - pump should be off\")\n else:\n for run in self._runs:\n if run.run_now(self._now):\n _LOGGER.debug(\"Pool pump should be on now: %s\", run)\n await self._switch_pool_pump(STATE_ON)\n return\n # If we arrive here, the pool pump should be off.\n _LOGGER.debug(\"Pool pump should be off\")\n await self._switch_pool_pump(STATE_OFF)", "def cluster_ready(self,cluster_name,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n cluster = self.cluster(project_id,cluster_name)\n pprint.pprint(cluster)\n return cluster['stateName'] == 'IDLE'", "def has_initial_states(self):\n return len(self.initial_states()) > 0", "def _is_initialized(self) -> bool:\n return len(self) > 0", "def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()", "def check_device_state(self):", "def _wait_for_ready(self):\n while not self._ready_to_evict():\n if self._ready_waited > self._ready_timeout:\n raise ClusterTimeout()\n\n time.sleep(self.POLL_PERIOD)\n self._ready_waited += self.POLL_PERIOD\n\n self._mds_map = self._volume_client._rados_command(\"mds dump\", {})", "def ready(self):\n return len(self.player1.ships) == len(\n self.player2.ships) == len(self.SHIP_INFO)", "def test_ready_true(self):\n\n mock_player = Mock()\n mock_stage = Mock()\n mock_stage._ready_list = []\n mock_stage.game.players = [mock_player]\n\n JobStage.ready(mock_stage, mock_player)\n mock_stage.can_end.assert_called_once_with()", "def _ensure_gazebo_startup(self):\n model_base_name = \"__start_up_box__\"\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)\n i = 0\n while True:\n # Get all models currently listed in Gazebo's models\n # that contain the model_base_name in their name.\n boxes = list(b for b in self.model_names if model_base_name in b)\n\n if len(boxes) != 0:\n for b in boxes:\n self._remove_model(b)\n return\n i += 1\n self._spawn_model(f\"\"\"<model name=\"{model_base_name}{i}\"></model>\"\"\")\n rospy.sleep(0.1)\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)", "def slaves_found(self):\n return not (len(self.topology) and self.topology[0][1] == [])", "def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()", "def probe(self):\n return False", "def has_root_lanes(self):\n return flask.request.library.has_root_lanes", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def should_keep_running(self):\n return len(self.party.active_users())", "def is_ready(self):\n return (self.is_calibrated() and not self.has_error()\n and not self.is_moving())", "def ready(self):\n return self.shader is not None and self.texturesReady()", "def CheckBoot(self, instance):\n try:\n serial_out = self.GetSerialPortOutput(instance=instance, port=1)\n self.CheckBootFailure(serial_out, instance)\n return ((self.BOOT_COMPLETED_MSG in serial_out)\n or (self.BOOT_STARTED_MSG in serial_out))\n except errors.HttpError as e:\n if e.code == 400:\n logger.debug(\"CheckBoot: Instance is not ready yet %s\", str(e))\n return False\n raise", "def wait_for_restore(self):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.detect_restore_restart()", "def check_any_light_on(bridge):\n for i,group in bridge.get_group().items():\n if group['state']['any_on']:\n return True\n return False", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def is_done(self):\n return not any((agent.is_alive() for agent in self.agents))", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False", "def check_powerups(self):\n for powerup in self.pjs.powerups:\n block = powerup.rects[0]\n if block.overlap(self.rects[0]):\n self.eat(powerup)", "def has_running_builders(self):\n # Thread counting may not work\n if len(self.builders) <= 0:\n return False\n\n for b in self.builders:\n if b.is_alive():\n return True\n else:\n self.builders.remove(b)\n\n return False", "def _IsReady(self):\n return self._GetPod()['status']['phase'] != 'Pending'", "def allPreExited(self, curAtom):\n if self.departed[curAtom] == curAtom.value:\n print(\"No %s atoms left in molecule to exit\" % curAtom.name)\n return False\n return True", "def is_network_appbase_ready(props):\n if \"HIVEIT_BLOCKCHAIN_VERSION\" in props:\n return False\n elif \"HIVE_BLOCKCHAIN_VERSION\" in props:\n return True", "def available(self) -> bool:\n return len(self._state) > 0", "def needs_rebuild(self):\n for p in self.sys.particles:\n dr = p.r - self.old_pos[p.id]\n dr.apply_periodic(self.sys.box)\n if dr.length() >= 0.5*self.pad:\n return True \n return False", "def wait_start_success(self) -> None:\n try:\n if self.uses_before_pod is not None:\n self.uses_before_pod.wait_start_success()\n if self.uses_after_pod is not None:\n self.uses_after_pod.wait_start_success()\n if self.head_pod is not None:\n self.head_pod.wait_start_success()\n if self.gateway_pod is not None:\n self.gateway_pod.wait_start_success()\n for shard_id in self.shards:\n self.shards[shard_id].wait_start_success()\n except:\n self.close()\n raise", "def fusable(self) -> bool:\n if not self._pre_check() or not self.has_crossing_len2_ob():\n return False\n new_tiling = self._tiling.add_obstructions(self.obstructions_to_add())\n\n return (\n self._tiling == new_tiling\n and self._check_isolation_level()\n and all(\n self._can_component_fuse_assumption(assumption)\n for assumption in self._tiling.assumptions\n )\n )", "def test_ready_false(self):\n\n mock_player = Mock()\n mock_stage = Mock()\n mock_stage._ready_list = []\n mock_stage.game.players = [mock_player, Mock()]\n\n JobStage.ready(mock_stage, mock_player)\n self.assertFalse(mock_stage.can_end.called)", "def running(self) -> bool:", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())", "def is_ready(self):\n ready = True\n for browser in self.browsers:\n if 'exe' in self.browsers[browser]:\n exe = self.browsers[browser]['exe']\n if not os.path.isfile(exe):\n logging.critical(\"Browser executable is missing for %s: '%s'\", browser, exe)\n ready = False\n return ready", "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def is_ready_to_run(self, at_time):\n return (self.next_time - at_time) <= 0", "def ready(self):\n return True", "def check_if_full(self):\n pass", "def alive(units):\n res = False\n for i in units:\n if i.get_health > 0:\n res = True\n break\n return res", "async def _async_has_devices(hass) -> bool:\n gree_discovery = Discovery(DISCOVERY_TIMEOUT)\n devices = await gree_discovery.scan(wait_for=DISCOVERY_TIMEOUT)\n return len(devices) > 0" ]
[ "0.6885012", "0.6798123", "0.6798123", "0.67851114", "0.6722148", "0.6583287", "0.6581712", "0.63905674", "0.62930745", "0.6158512", "0.61108637", "0.6062468", "0.6051119", "0.59692967", "0.59578604", "0.5939921", "0.59331906", "0.5890832", "0.5845868", "0.5824541", "0.5808965", "0.5783782", "0.5770312", "0.57315373", "0.5730855", "0.57266945", "0.5724709", "0.5714111", "0.57105076", "0.57096696", "0.5705998", "0.56724596", "0.56649077", "0.56531626", "0.56474435", "0.5646728", "0.56392473", "0.56362545", "0.56267124", "0.5621953", "0.5602304", "0.55915356", "0.5587181", "0.55819505", "0.55672044", "0.5563339", "0.55485535", "0.55441797", "0.55414927", "0.5534931", "0.5532262", "0.5524656", "0.5524645", "0.55201143", "0.55091774", "0.55083346", "0.5494758", "0.5494634", "0.5490117", "0.54883635", "0.54855525", "0.54782265", "0.5475617", "0.547475", "0.5473971", "0.54705137", "0.5468734", "0.54640925", "0.5460647", "0.54495734", "0.5444971", "0.54393905", "0.542236", "0.54203326", "0.5418448", "0.5415621", "0.54131025", "0.5406912", "0.5397293", "0.5396209", "0.53961736", "0.53951347", "0.53902334", "0.5385911", "0.53717", "0.5364959", "0.53628224", "0.5362459", "0.5361758", "0.53564066", "0.53563833", "0.5352701", "0.5352235", "0.53520775", "0.53513545", "0.5347861", "0.53455836", "0.5344008", "0.534195", "0.5337726" ]
0.55383885
49
terminate the instance of this starter (it should kill all its managed services)
def terminate_instance(self, keep_instances=False): lh.subsubsection("terminating instances for: " + str(self.name)) logging.info( "StarterManager: Terminating starter instance: %s", str(self.default_starter_args + self.arguments) ) logging.info("This should terminate all child processes") self.instance.terminate() logging.info("StarterManager: waiting for process to exit") exit_code = self.instance.wait() self.add_logfile_to_report() # workaround BTS-815: starter exits 15 on the wintendo: if IS_WINDOWS and exit_code == 15: exit_code = 0 if exit_code != 0: raise Exception("Starter %s exited with %d" % (self.basedir, exit_code)) old_log = self.basedir / "arangodb.log.old" logging.info( "StarterManager: done - moving logfile from %s to %s", str(self.log_file), str(old_log), ) if old_log.exists(): old_log.unlink() self.log_file.rename(old_log) for instance in self.all_instances: instance.rename_logfile() if not instance.detect_gone(): print("Manually terminating instance!") instance.terminate_instance(False) if keep_instances: for i in self.all_instances: i.pid = None i.ppid = None return False # Clear instances as they have been stopped and the logfiles # have been moved. ret = False for instance in self.all_instances: print("u" * 80) if instance.search_for_warnings(True): ret = True self.is_leader = False self.all_instances = [] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def terminate(self):\n self._running = False", "def terminate(self):\n self._worker.kill()", "def close(self):\n subprocess.call([\"pkill\", \"controller\"])", "def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')", "def terminate(self) -> None:\n self.robot.terminate_all()", "def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None", "def terminate(self):\n self.unregister()\n self.zk.stop()\n self.zk.close()", "def terminate(self):\n self._running = False", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "async def terminate(self, restart=False) -> None:\n pass", "def terminate(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.terminate()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def gracefully_terminate(self):\n self.running = False", "def terminate(self, hostname):\n # unique names\n matches = list(filter(lambda x: x.name == hostname, self.instances))\n\n if len(matches) == 0:\n # already terminated\n return\n elif len(matches) == 1:\n instance = matches[0]\n # terminate it\n self.names.append(instance.name)\n self.instances.remove(instance)\n # actual deletion from openstack\n status = self.nova.servers.get(instance.id).status\n\n while status == 'BUILD':\n time.sleep(5)\n status = self.nova.servers.get(instance.id).status\n print(\"Waiting for VM to finish BUILD before terminating.\")\n instance.delete()\n print(\"Worker VM \" + hostname + \" deleted.\")\n else:\n # inconsistency in the system\n raise ValueError('More than one of same name in self.instances')", "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Change time.sleep to wait until process of same service but different PID is up and running\n time.sleep(0.5)", "def terminate(self):\n self._proc.terminate()", "def terminate(self):\n if self.is_connected:\n self.close()\n\n self.factory.manager.terminate()", "def terminate_all(self):\n self._stop_all('terminate')", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def stop(self):\n self.scion_sh('stop')", "def stop(params) -> None:\n check_root()\n stop_streamer(params)\n unload_kernel_module(params)\n stop_microservice(params)", "def stop():\n if env.latest and not env.python3:\n sudo('/bin/systemctl stop demo-latest.service', shell=False)\n elif env.latest and env.python3:\n sudo('/bin/systemctl stop demo-latest-py3.service', shell=False)\n else:\n # demo site is multi instance, cant do supervisor for now\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def destroy(self):\n\n dcgm_agent.dcgmShutdown()\n self._thread_pool.terminate()\n self._thread_pool.close()", "def test_terminate_run(self):\n pass", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def stop():\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def terminate_controller(cls, args, config):\n logging.debug(\"MOLNSController.terminate_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n instance_list = config.get_all_instances(controller_id=controller_obj.id)\n logging.debug(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n print(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n # Check if they are running or stopped\n if len(instance_list) > 0:\n for i in instance_list:\n if i.worker_group_id is None:\n status = controller_obj.get_instance_status(i)\n if status == controller_obj.STATUS_RUNNING or status == controller_obj.STATUS_STOPPED:\n print \"Terminating controller running at {0}\".format(i.ip_address)\n controller_obj.terminate_instance(i)\n else:\n worker_name = config.get_object_by_id(i.worker_group_id, 'WorkerGroup').name\n worker_obj = cls._get_workerobj([worker_name], config)\n status = worker_obj.get_instance_status(i)\n if status == worker_obj.STATUS_RUNNING or status == worker_obj.STATUS_STOPPED:\n print \"Terminating worker '{1}' running at {0}\".format(i.ip_address, worker_name)\n worker_obj.terminate_instance(i)\n else:\n print \"No instance running for this controller\"", "def shutdown(self):\n\n self.log.debug(\"Shutting down %s application\" % self.app_name)\n cmd_output = admin_tasks.manage_service(self.app_name, 'stop')\n if cmd_output:\n self.log.debug('Getting application process data')\n self.log.info('Application service has been shutdown')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.error('Application service shutdown failed')\n sys.exit(1)", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def shutdown(self):\n self.exit_app()", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "def terminate():\n sys.exit()", "def terminate():\n leds.on()\n time.sleep(1)\n leds.off()\n\n GPIO.cleanup()", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def terminate(self):\n if self.proc:\n logging.info(\"Terminating Proxy Server...\")\n self.proc.terminate()\n self.proc = None", "def force_stop(self):\n\n # Stopping thread\n self.quit()\n\n # Killing all running processes\n ProcessManager(self.cf_process).close_all_child()\n ProcessManager(self.server_process).close_all_child()", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def terminate(self):\n return", "def terminate(self):\n return", "def teardown(self):\n # Only terminate if instance is running\n if self.instance:\n instance_status = aws.check_instance_status(self.config, self.instance.id)\n if instance_status == \"running\":\n aws.terminate_ec2_instance(self.config, self.instance.id)\n super().teardown()", "def stop(self):\n self.api.stop()", "def tear_down(self):\n self.destroy_env()\n self.dut.kill_all()", "def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)", "def shutdown():\n # attempt to call cleanup() for the running service\n try:\n service_class.cleanup()\n except Exception:\n sys.stderr.write(\"Error in %s.cleanup():\\n\" % service_class.__name__)\n traceback.print_exc(file=sys.stderr)\n sys.stderr.flush()\n\n # \"yarn dev\" doesn't pass SIGTERM to its children - to be safe, kill all\n # subprocesses of the child process first\n try:\n # children() returns parent processes first - start with children\n # instead to make killing \"yarn dev\" more reliable\n for subchild in reversed(child.children(recursive=True)):\n try:\n subchild.terminate()\n except psutil.NoSuchProcess:\n # we may have already caused it to exit by killing its parent\n pass\n child.terminate()\n except psutil.NoSuchProcess:\n # child already exited\n pass\n\n child.wait()\n if exit_mode == ExitMode.CHILD and child.returncode != 0:\n sys.stdout.buffer.write(child_stdout.to_bytes())\n sys.stdout.flush()\n sys.stderr.write(\n \"Subprocess %r exited with error %i:\\n\"\n % (command, child.returncode)\n )\n sys.stderr.buffer.write(child_stderr.to_bytes())\n sys.stderr.flush()", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def destroy():\n # instance first\n old_vm = _existing.vm\n _destroy_resource('vm')\n if not dry and old_vm is not None:\n # Wait for instance to be fully terminated before carrying on or we will have\n # dependency issues.\n print('Waiting for instance to be terminated before deleting other resources...')\n old_vm.wait_until_terminated()\n time.sleep(1) # One would think that wait for terminated should be enough...\n\n _destroy_resource('disk')\n\n # detach before destroy\n _detach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n _destroy_resource('igw')\n\n # sg and sub before vpc\n _destroy_resource('sg')\n _destroy_resource('sub')\n\n _destroy_resource('vpc')", "def terminate(self):", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stopService(self):\n self.world.stop()", "def stop(self):\n if self.p.is_alive():\n self.p.terminate()", "async def stop(self):\n debug(\"stop() called\")\n\n if self.n2vc and self._running and not self._stopping:\n self._running = False\n self._stopping = True\n\n # Destroy the network service\n try:\n await self.n2vc.DestroyNetworkService(self.ns_name)\n except Exception as e:\n debug(\n \"Error Destroying Network Service \\\"{}\\\": {}\".format(\n self.ns_name,\n e,\n )\n )\n\n # Wait for the applications to be removed and delete the containers\n for application in self.charms:\n try:\n\n while True:\n # Wait for the application to be removed\n await asyncio.sleep(10)\n if not await self.n2vc.HasApplication(\n self.ns_name,\n application,\n ):\n break\n\n # Need to wait for the charm to finish, because native charms\n if self.state[application]['container']:\n debug(\"Deleting LXD container...\")\n destroy_lxd_container(\n self.state[application]['container']\n )\n self.state[application]['container'] = None\n debug(\"Deleting LXD container...done.\")\n else:\n debug(\"No container found for {}\".format(application))\n except Exception as e:\n debug(\"Error while deleting container: {}\".format(e))\n\n # Logout of N2VC\n try:\n debug(\"stop(): Logging out of N2VC...\")\n await self.n2vc.logout()\n self.n2vc = None\n debug(\"stop(): Logging out of N2VC...Done.\")\n except Exception as ex:\n debug(ex)\n\n # Let the test know we're finished.\n debug(\"Marking test as finished.\")\n # self._running = False\n else:\n debug(\"Skipping stop()\")", "def on_shutdown(self):\n self.factory.core.master_local_token.cleanup()\n self.api.stop()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.server.stop()\n self.driver.quit()", "def terminate(self):\n self._proxy.ibroadcast(\n component_type=\"actor\", tag=MessageTag.EXIT, session_type=SessionType.NOTIFICATION\n )\n self.logger.info(\"Exiting...\")", "def __del__(self):\n self._proc.kill()", "def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def terminate(self):\n if self._process and self._process.is_alive():\n self.log.info(\"Sending termination message to manager.\")\n try:\n self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)\n except ConnectionError:\n pass", "def stop(self):\n self.killed = True", "def stop(self):\n # Cleanup platform first.\n self.cleanup()\n\n if self.init_lhost:\n self._lhost.stop()\n\n self.status = False # pylint: disable=attribute-defined-outside-init", "def stop():\n app = get_vistrails_application()\n app.finishSession()\n app.save_configuration()\n app.destroy()", "def __exit__(self, exc_type, exc_val, exc_tb):\n printy(\"Cleaning after myself...\")\n self.key.delete()\n if self.instance:\n self.instance.terminate()\n # wait for the machine to terminate\n self.wait_for_status(48)\n\n self.sec_grp.delete()\n os.remove(self.key.name + \".pem\")\n printy(\"Builder teardown complete\")", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def kill(self):\n\n self.proc.kill()", "def cleanup(self):\n\t\tprint('ServiceRegister cleanup started.')\n\t\tself.process.terminate()\n\t\tself.process.wait()\n\t\tprint('ServiceRegister cleanup done.')", "def stopzmq(self):\n\n self.context.destroy()", "def stop(self):\n self.microblaze.reset()", "def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()", "def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)", "def stop(self):\n for c in self.openstack_endpoints.values():\n c.stop()\n #for c in self.openstack_endpoints.values():\n # if c.server_thread:\n # print(\"Waiting for WSGIServers to be stopped ...\")\n # c.server_thread.join()", "def stop(self):\n logger.info('Orchestrator is stopping')\n self._stop = True\n if self.settings.PARALLEL_EXECUTION:\n self.pool.terminate()\n self.pool.join()", "def _destruct(self, should_close=False):\n if (self.running or should_close) and not self.existing:\n self.running = False\n self._starting = False\n\n\n # Wait for the process to start.\n time.sleep(1)\n # kill the minecraft process and its subprocesses\n try:\n shutil.rmtree(self.instance_dir)\n except:\n print(\"Failed to delete the temporary minecraft directory.\")\n\n if self._kill_minecraft_via_malmoenv(self.host, self.port):\n # Let the minecraft process term on its own terms.\n time.sleep(2)\n\n # Now lets try and end the process if anything is laying around\n try:\n InstanceManager._reap_process_and_children(psutil.Process(self.minecraft_process.pid))\n except psutil.NoSuchProcess: \n pass\n\n self.watcher_process.terminate()\n\n if self in InstanceManager._instance_pool:\n InstanceManager._instance_pool.remove(self)\n self.release_lock()\n pass", "def stop(self):\n self.__logger__.info(\"Stopping Ozone Cluster\")\n call([Command.docker_compose, \"-f\", self.docker_compose_file, \"down\"])\n Blockade.blockade_destroy()", "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False\n self._terminate()", "def tearDownClass(cls):\n TestDataspaceWrapper.SERVER_STARTED.terminate()\n os.remove(DB)", "def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)", "def stop(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('stop')\n _validate_force(force, 'stop')\n\n logger.notice('Stopping Cloudify Manager services...')\n for component in components:\n if not component.skip_installation:\n component.stop()\n logger.notice('Cloudify Manager services successfully stopped!')\n _print_time()", "def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue", "def __exit__(self):\n self._stop_all()", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def teardown(self):\n\n\t\tself.shutdown = True\n\t\tself.terminate_process()", "def terminate(self):\n self.sock.close()\n try:\n self.process.terminate()\n self.process.wait(timeout=self.STOP_TIMEOUT)\n except TimeoutExpired:\n self.process.kill()\n shutil.rmtree(self.rundir)", "def stop_instance(InstanceId=None, Force=None):\n pass", "def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services", "def kill(self):\n self._destruct()\n pass", "def __del__(self):\n if self.running:\n self.stop()", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def stop_app():\n try:\n get_gui_controller(create_if_missing=False).stop()\n except threadprop.NoControllerThreadError:\n stop_all_controllers(sync=False)", "def __del__(self):\n self.shutdown()" ]
[ "0.7309345", "0.71649116", "0.7040128", "0.6952882", "0.6948429", "0.69379115", "0.69061065", "0.68995476", "0.6895416", "0.6874266", "0.6873975", "0.6823828", "0.68103665", "0.6805319", "0.6787299", "0.6752569", "0.6720022", "0.66706365", "0.6654536", "0.6638244", "0.6632896", "0.6629232", "0.6606192", "0.6605709", "0.6582731", "0.6575973", "0.65611106", "0.65441686", "0.6541875", "0.6537609", "0.6534146", "0.6529632", "0.6503729", "0.65023744", "0.6491055", "0.6471047", "0.6469066", "0.6464074", "0.6453714", "0.6447117", "0.64468247", "0.6446169", "0.64461607", "0.643726", "0.64354604", "0.64121443", "0.64050806", "0.64050806", "0.6397654", "0.63953173", "0.63946503", "0.63906235", "0.6388346", "0.638489", "0.6383549", "0.6383472", "0.63763714", "0.63756", "0.6368878", "0.63687974", "0.63678694", "0.63677263", "0.63644934", "0.63604486", "0.63603514", "0.63599116", "0.6351323", "0.6345067", "0.6340096", "0.63356197", "0.63320106", "0.6322064", "0.6313122", "0.6313122", "0.63130033", "0.631082", "0.631077", "0.63072693", "0.63061464", "0.63028824", "0.6296882", "0.62937164", "0.62925094", "0.62900865", "0.6280134", "0.62796015", "0.6279356", "0.62791675", "0.62776333", "0.6275885", "0.6274596", "0.6271459", "0.62671876", "0.62591803", "0.6256472", "0.6254473", "0.6252307", "0.6251882", "0.6251005", "0.6246376" ]
0.6931023
6
kill the instance of this starter (it won't kill its managed services)
def kill_instance(self): logging.info("StarterManager: Killing: %s", str(self.default_starter_args + self.arguments)) self.instance.kill() try: logging.info(str(self.instance.wait(timeout=45))) self.add_logfile_to_report() except Exception as ex: raise Exception("Failed to KILL the starter instance? " + repr(self)) from ex logging.info("StarterManager: Instance now dead.") self.instance = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Change time.sleep to wait until process of same service but different PID is up and running\n time.sleep(0.5)", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n\n self.proc.kill()", "def close(self):\n subprocess.call([\"pkill\", \"controller\"])", "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def stop(self):\n self.scion_sh('stop')", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def stop():\n if env.latest and not env.python3:\n sudo('/bin/systemctl stop demo-latest.service', shell=False)\n elif env.latest and env.python3:\n sudo('/bin/systemctl stop demo-latest-py3.service', shell=False)\n else:\n # demo site is multi instance, cant do supervisor for now\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def stop(self):\n self.killed = True", "def stop_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"stop\", service_name])", "def kill(self):\n self.send_signal(signal.SIGKILL)", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def stop():\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def kill(self):\n self._process.kill()", "def terminate(self):\n self._running = False", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def stop(params) -> None:\n check_root()\n stop_streamer(params)\n unload_kernel_module(params)\n stop_microservice(params)", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "async def kill(self, restart: bool = False) -> None:\n pass", "def kill(self):\n self.error_code = 'KILLED'\n self.running = False", "def kill(self):\n \n self.killSlavePids()", "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def remote_kill():", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False", "def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()", "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)", "def kill(self):\n self.child.kill()", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def stop(self):\n\n # immediate is necessary if it's in recovery (for now).\n # we don't care the result.\n master = gp.MasterStop(\"Stopping Master Standby\",\n self.datadir, mode='immediate')\n master.run()", "def stop(self):\n self.api.stop()", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def stop():\n server = current_server()\n server.stop()", "def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True", "def stop(self):\n if self._process is not None:\n self._process.terminate()", "def stop_server(self):\n p = pexpect.spawn('/usr/bin/pkill', ['-f', self.wallet_bin_path])\n p.wait()\n if p.status is not 0:\n raise ValueError('Error pkilling ETH:\\n{}'.format(p.read()))", "def __del__(self):\n self._proc.kill()", "def kill(self, id):", "def kill(self, id):", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def kill(self):\n #overridden for documentation purposes\n stackless.tasklet.kill(self)", "def stopService(self):\n self.world.stop()", "def stop_app():\n try:\n get_gui_controller(create_if_missing=False).stop()\n except threadprop.NoControllerThreadError:\n stop_all_controllers(sync=False)", "def stop(self):\n self.microblaze.reset()", "def stop(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"stop\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def kill(self):\n self._destruct()\n pass", "def terminate(self):\n self._worker.kill()", "def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def force_stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGINT)", "def stop(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n\n try:\n file_ipmi_console_pid = \"{}/{}/.ipmi_console.pid\".\\\n format(config.infrasim_home, instance)\n with open(file_ipmi_console_pid, \"r\") as f:\n pid = f.readline().strip()\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n os.remove(file_ipmi_console_pid)\n except IOError:\n # When pid file is missing, by e.g., node destroy,\n # find process id by instance name\n if instance == \"default\":\n process_name = \"ipmi-console start$\"\n else:\n process_name = \"ipmi-console start {}\".format(instance)\n\n ps_cmd = r\"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1\".format(process_name)\n logger_ic.warning(\"Fail to find ipmi console pid file, check by:\")\n logger_ic.warning(\"> {}\".format(ps_cmd))\n _, pid = run_command(cmd=ps_cmd)\n logger_ic.warning(\"ipmi console pid got: {}\".format(pid))\n if not pid:\n logger_ic.warning(\"ipmi console for instance {} is not running\".format(instance))\n return\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n except Exception:\n logger_ic.warning(traceback.format_exc())\n pass", "def kill(self):\n self._exit = True", "def cli(ctx):\n with process_manager.process_manager(**ctx.parent.cm_kwargs) as pm:\n pm.shutdown()", "def terminate(self):\n self._running = False", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def _destruct(self, should_close=False):\n if (self.running or should_close) and not self.existing:\n self.running = False\n self._starting = False\n\n\n # Wait for the process to start.\n time.sleep(1)\n # kill the minecraft process and its subprocesses\n try:\n shutil.rmtree(self.instance_dir)\n except:\n print(\"Failed to delete the temporary minecraft directory.\")\n\n if self._kill_minecraft_via_malmoenv(self.host, self.port):\n # Let the minecraft process term on its own terms.\n time.sleep(2)\n\n # Now lets try and end the process if anything is laying around\n try:\n InstanceManager._reap_process_and_children(psutil.Process(self.minecraft_process.pid))\n except psutil.NoSuchProcess: \n pass\n\n self.watcher_process.terminate()\n\n if self in InstanceManager._instance_pool:\n InstanceManager._instance_pool.remove(self)\n self.release_lock()\n pass", "def power_off(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.stop()\n self.instance_waiter.wait(instance, self.instance_waiter.STOPPED)\n return True", "def terminate(self):\n self.unregister()\n self.zk.stop()\n self.zk.close()", "def GET_kill(self):\n sys.exit(0)", "def platform_stop(self):\n self.platform.stop()", "def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()", "def kill(self):\n self.status = Modem.Status.KILL", "def kill(self):\n if self.process is not None:\n LOGGER.info('Killing command...')\n self.process.kill()\n self.process = None", "def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')", "async def terminate(self, restart=False) -> None:\n pass", "def kill(self):\n self.rpc.call(MsfRpcMethod.SessionMeterpreterSessionKill, [self.sid])", "def kill(ctx, analytic_host, analytic_port):\n client = aceclient.ConfigClient(host=analytic_host, port=analytic_port)\n client.kill()", "def stop_framework (driver):\n status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1\n driver.stop();\n sys.exit(status)", "def stop():\n app = get_vistrails_application()\n app.finishSession()\n app.save_configuration()\n app.destroy()", "def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')", "def kill(self):\n if self.client is None:\n # never started, can't stop - should be warning or exception?\n return False\n try:\n self.client.kill()\n except Py4JError:\n logger.debug(\"Error while attempting to kill\", exc_info=1)\n # fallback\n self.yarn_api.kill(self.app_id)\n if self.proc is not None:\n self.client_gateway.shutdown()\n if on_windows:\n call([\"cmd\", \"/c\", \"taskkill\", \"/f\", \"/t\", \"/pid\",\n str(self.proc.pid)])\n self.proc.terminate()\n self.proc.communicate()\n self.proc = None\n self.client = None\n out = self.runtime_status() == 'KILLED'\n return out", "def kill(self):\n if self.g is not None:\n self.g.kill()", "def daemonControlStop (self):\n self.stop()", "def killExperiment(self, **kwargs):\n if kwargs['kill']=='YES':\n killRobot.sshKill()", "def terminate(self, hostname):\n # unique names\n matches = list(filter(lambda x: x.name == hostname, self.instances))\n\n if len(matches) == 0:\n # already terminated\n return\n elif len(matches) == 1:\n instance = matches[0]\n # terminate it\n self.names.append(instance.name)\n self.instances.remove(instance)\n # actual deletion from openstack\n status = self.nova.servers.get(instance.id).status\n\n while status == 'BUILD':\n time.sleep(5)\n status = self.nova.servers.get(instance.id).status\n print(\"Waiting for VM to finish BUILD before terminating.\")\n instance.delete()\n print(\"Worker VM \" + hostname + \" deleted.\")\n else:\n # inconsistency in the system\n raise ValueError('More than one of same name in self.instances')", "def stop(self):\n if self.p.is_alive():\n self.p.terminate()", "def stop( self ):\n log.info( \"Stopping mesosbox\" )\n self.__patch_etc_hosts( { 'mesos-master': None } )", "def kill(self):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if not currentApplication in self.__appsThatCantBeKilled:\r\n self.phone.comment('exit.kill()')\r\n self.phone.sx(self.__killCommand)\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.warn('Not allowed to kill \"%s\" application using SX' % currentApplication)", "def kill(self):\n\t\tself.kill_subcomponents()\n\t\tself._subcomponents.clear()\n\t\tself.bug_world = None\n\n\t\ttry:\n\t\t\tself.ci.deregister_all()\n\t\texcept:\n\t\t\tpass", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def gracefully_terminate(self):\n self.running = False", "def _kill_kernel(self):", "def _kill():\n\n messagebox.showerror(\n title=const.TSSD_ERROR_TITLE,\n message=const.TSSD_ERROR_MSG\n )\n\n sys.exit(3)", "def stop(self, bGraceful = False):\n\t\treturn Job(SDK.PrlVm_Stop(self.handle, bGraceful)[0])" ]
[ "0.7303505", "0.7108762", "0.70873225", "0.70490146", "0.69961184", "0.6964912", "0.6952806", "0.69508827", "0.6916327", "0.688939", "0.686323", "0.681936", "0.6806005", "0.6758173", "0.6745181", "0.6741397", "0.6677145", "0.6674688", "0.6668985", "0.6658009", "0.6651729", "0.66455394", "0.6630754", "0.66288483", "0.6611984", "0.66058433", "0.6586204", "0.6570232", "0.65641314", "0.6548713", "0.654383", "0.6542271", "0.65409946", "0.65334624", "0.6494666", "0.64941245", "0.6493329", "0.6473647", "0.6471612", "0.64496976", "0.64360094", "0.6420235", "0.6413615", "0.6412743", "0.6407743", "0.64011997", "0.6400608", "0.63966334", "0.63884026", "0.63831156", "0.63831156", "0.6375844", "0.63741684", "0.637043", "0.6367443", "0.6365705", "0.6347196", "0.6345836", "0.63408154", "0.63364685", "0.6335955", "0.6321923", "0.63189334", "0.6318522", "0.6317023", "0.63124174", "0.6304137", "0.6291569", "0.6280326", "0.6275224", "0.62739635", "0.6269066", "0.62675196", "0.62665087", "0.6265895", "0.62635654", "0.6263396", "0.6253893", "0.6243941", "0.6237992", "0.62341505", "0.6226847", "0.6224762", "0.6221743", "0.62195706", "0.6219132", "0.6214339", "0.62125874", "0.62095565", "0.620679", "0.6205588", "0.6201305", "0.61968225", "0.61964595", "0.6195696", "0.6190823", "0.61890054", "0.6183554", "0.6183461", "0.6176019" ]
0.7906417
0
replace the parts of the installation with information after an upgrade kill the starter processes of the old version revalidate that the old arangods are still running and alive replace the starter binary with a new one. this has not yet spawned any children
def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True): # On windows the install prefix may change, # since we can't overwrite open files: old_version = self.cfg.version self.default_starter_args = new_install_cfg.default_starter_args.copy() self.enterprise = new_install_cfg.enterprise self.replace_binary_setup_for_upgrade(new_install_cfg) with step("kill the starter processes of the old version"): logging.info("StarterManager: Killing my instance [%s]", str(self.instance.pid)) self.kill_instance() with step("revalidate that the old arangods are still running and alive"): self.detect_instance_pids_still_alive() if relaunch: with step("replace the starter binary with a new one," + " this has not yet spawned any children"): self.respawn_instance(new_install_cfg.version) logging.info("StarterManager: respawned instance as [%s]", str(self.instance.pid)) self.arangosh = None self.detect_arangosh_instances(new_install_cfg, old_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def upgrade(self):", "def upgrade(self):", "def replace_binary_setup_for_upgrade(self, new_install_cfg):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n self.cfg.set_directories(new_install_cfg)\n if self.cfg.hot_backup_supported:\n self.hotbackup_args = [\n \"--all.rclone.executable\",\n self.cfg.real_sbin_dir / \"rclone-arangodb\",\n ]", "def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def _prepare_self_update(settings):\n\n\t# sanity check: ensure that that this routine only runs once\n\tif portage._bin_path != portage.const.PORTAGE_BIN_PATH:\n\t\treturn\n\n\t# Load lazily referenced portage submodules into memory,\n\t# so imports won't fail during portage upgrade/downgrade.\n\t_preload_elog_modules(settings)\n\tportage.proxy.lazyimport._preload_portage_submodules()\n\n\t# Make the temp directory inside $PORTAGE_TMPDIR/portage, since\n\t# it's common for /tmp and /var/tmp to be mounted with the\n\t# \"noexec\" option (see bug #346899).\n\tbuild_prefix = os.path.join(settings[\"PORTAGE_TMPDIR\"], \"portage\")\n\tportage.util.ensure_dirs(build_prefix)\n\tbase_path_tmp = tempfile.mkdtemp(\n\t\t\"\", \"._portage_reinstall_.\", build_prefix)\n\tportage.process.atexit_register(shutil.rmtree, base_path_tmp)\n\n\torig_bin_path = portage._bin_path\n\tportage._bin_path = os.path.join(base_path_tmp, \"bin\")\n\tshutil.copytree(orig_bin_path, portage._bin_path, symlinks=True)\n\n\torig_pym_path = portage._pym_path\n\tportage._pym_path = os.path.join(base_path_tmp, \"pym\")\n\tshutil.copytree(orig_pym_path, portage._pym_path, symlinks=True)\n\n\tfor dir_path in (base_path_tmp, portage._bin_path, portage._pym_path):\n\t\tos.chmod(dir_path, 0o755)", "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))", "def main():\n\n # a quick way to verify the version\n if getscriptversion:\n print('This script is running version: ' + scriptversion)\n exit(0)\n\n # verify that environmental and script requirements are met\n requirements()\n\n # pretty the screen up\n clear()\n # do the MD5 checksum\n checkmd5sum()\n if not user or not password:\n getcreds()\n\n # if device_file is provided parse the lines into a list of devices\n if device_file:\n with open(device_file) as line:\n devices = line.readlines()\n devices = [x.strip() for x in devices]\n else:\n devices = args.devices.split(\",\")\n\n for device in devices:\n\n device = Acos(device)\n print('')\n print('')\n print(dev_addr + ' ' + '{:*^100}'.format('Begin upgrade log for ' + dev_addr))\n print(dev_addr + ' ' + '{:*^100}'.format('Performing pre-upgrade checks'))\n\n # check if the device is online before running\n status = device.checkstatus()\n if status == 'FAIL':\n continue\n\n # authenticate to the device\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n # get the device hostname\n device.get_hostname()\n\n # get the currently running version\n version = device.get_running_ver()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing upgrade'))\n\n # if we are running 4.1.0 we have to use a different upgrade method\n if '4.1.0' in version:\n response = device.gui_upgrade(user, password)\n if response == 'FAIL':\n continue\n # for other versions just use the normal method\n else:\n response = device.upgrade()\n if response == 'FAIL':\n continue\n bootvar = device.get_bootvar()\n\n # if the user has specified they'd like to update the boot variable\n if updatebootvar:\n # why do work that we don't have to\n if partition in bootvar:\n print(dev_addr + ' Bootvar update requested, but not necessary, device already set to boot from ' + partition)\n # if you're not already set to boot from the partition we installed to, update the bootvar\n else:\n device.update_bootvar()\n # if the user wants to reboot to initialize the new code reboot the box\n if reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n if not reboot:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + 'You have requested the device not reboot, in order to initialize the new code you will need to reboot the device')\n # if you install to a partition the device won't reboot to, we probably want to stop you from shooting yourself in the foot\n elif not partition in bootvar:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + ' You have chosen to install to the partition that the device does not currently boot from.')\n print(dev_addr + ' If you wish for the device to run the new code upon reboot you need to update the boot variable manually.')\n if reboot:\n print(dev_addr + ' You have also requested a reboot which will not invoke the new code, SKIPPING REBOOT')\n elif reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n # technically we could still use the old AXAPI token, however for sake of code clarity we're going to do a quick log off then back on\n # the alternative would be having to shove the remaining steps below into each of the appropriate loops making this a bit more\n # spaghettish than it already is\n else:\n device.axapi_logoff()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing post-upgrade checks'))\n\n # since it is very likely the box has rebooted, and our old token is gone, lets get a new one\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n\n # find out where the device was booted from\n bootdefault = device.get_bootvar()\n\n # get the version of the currently booted partition\n device.get_ver(bootdefault)\n\n # get the current boot variable\n device.get_bootvar()\n\n # get the current running version\n device.get_running_ver()\n\n # log off\n device.axapi_logoff()\n print(dev_addr + ' ' + '{:*^100}'.format(' End upgrade log for ' + dev_addr))", "def tweak_new_filesystem(root_dir):\n\n # create a symlink for insserv\n force_symlink('../usr/lib/insserv/insserv',\n os.path.join(root_dir, 'sbin/insserv'))\n\n # create a symlink for awk\n force_symlink('mawk', os.path.join(root_dir, 'usr/bin/awk'))\n\n # Nvidia keeps packaging up a broken post-install script for their cudnn\n # deb. Freaking nvidia\n cudnn_postinst_path = 'var/lib/dpkg/info/libcudnn6-dev.postinst'\n cudnn_postinst_path = os.path.join(root_dir, cudnn_postinst_path)\n\n if os.path.exists(cudnn_postinst_path):\n with open(cudnn_postinst_path, 'r') as infile:\n content = infile.read()\n if not content.startswith(\"#!\"):\n with open(cudnn_postinst_path, 'w') as outfile:\n outfile.write('#! /bin/sh\\n')\n outfile.write(content)\n\n # NOTE(josh): patch the base-packages post-install hook so it doesn't\n # complain about files in /var/run\n basefiles_path = os.path.join(root_dir,\n 'var/lib/dpkg/info/base-files.postinst')\n if os.path.exists(basefiles_path):\n apply_patch_text(BASE_FILES_PATCH, root_dir)\n\n # NOTE(josh): ifupdown should depend on initscripts, but it doesn't\n status_path = os.path.join(root_dir, 'var/lib/dpkg/status')\n tempfile_path = status_path + '.tmp'\n with open(tempfile_path, 'wb') as outfile:\n with open(status_path, 'rb') as infile:\n for line in infile:\n outfile.write(line)\n if line.strip() == 'Package: ifupdown':\n break\n\n for line in infile:\n if line.startswith('Depends: '):\n line = ', '.join(line.strip().split(', ') + ['initscripts']) + '\\n'\n outfile.write(line)\n break\n else:\n outfile.write(line)\n\n for line in infile:\n outfile.write(line)\n os.rename(tempfile_path, status_path)\n\n # NOTE(josh): resolvconf tries to a write a file in this directory\n try:\n target_path = os.path.join(root_dir, 'run/resolvconf/interface')\n os.makedirs(target_path)\n except OSError:\n if not os.path.isdir(target_path):\n raise\n\n # NOTE(josh): Can't postinst makedev without CAP_MKNOD\n if os.getuid() != 0:\n makedev_postinst = os.path.join(root_dir,\n 'var/lib/dpkg/info/makedev.postinst')\n if os.path.exists(makedev_postinst):\n os.rename(makedev_postinst, makedev_postinst + '.bak')\n\n # remove temporary/boostrap files\n files_to_remove = ['etc/apt/sources.list.d/bootstrap.list']\n\n for filename in files_to_remove:\n file_path = os.path.join(root_dir, filename)\n if os.path.exists(file_path):\n os.remove(file_path)", "def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n i.terminate_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n True,\n )\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n [],\n False,\n )", "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def deploy():\n update_treesheets()\n restart_treesheets()", "def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def pre_stop_backup_cores(self, env):\n import params\n env.set_params(params)\n\n if compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:\n solr_home_dir=params.solr_data_dir\n else: #4.1.0.0\n solr_home_dir=params.old_lib_dir + \"/data\"\n\n unique = get_unique_id_and_date()\n backup_solr_dir=\"/tmp/upgrades/{0}/solr_{1}\".format(params.version, unique)\n backup_solr_cores=\"/tmp/solr/cores\"\n\n if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):\n os.makedirs(backup_solr_dir)\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_dir),\n sudo=True\n )\n\n if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:\n Directory(backup_solr_cores,\n action=\"delete\",\n create_parents=True)\n\n Directory(backup_solr_cores,\n mode=0755,\n cd_access='a',\n owner=params.solr_user,\n create_parents=True,\n group=params.user_group\n )\n\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_cores),\n user=params.solr_user\n )", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)", "def _update(self):\n candidates = _find_running_exe(path.join(self.run_dir, \"osiris\"))\n\n try:\n if not candidates: # No process running found\n self.processes = None\n # Try to find a job in queue\n jobs = _get_grid_jobs()\n if not jobs: # Either no qstat or empty list\n self.running_mode = \"\"\n else:\n script_path = path.abspath(path.join(self.run_dir, \"start.sh\"))\n valid_jobs = list(filter(lambda j: j[\"script\"] == script_path, jobs))\n if valid_jobs:\n if len(valid_jobs) > 1:\n logger.warning(\"More than one grid job was found for the run.\")\n self.job = valid_jobs[0]\n self.running_mode = \"grid\"\n else: # No queued job\n self.running_mode = \"\"\n\n else:\n self.processes = list(map(psutil.Process, candidates))\n self.running_mode = \"local\"\n\n except psutil.NoSuchProcess:\n # If the processes have died before processing was completed.\n self.processes = None\n self.running_mode = \"\"", "def update():\n with cd(env.directory):\n\n # update plone\n result = sudo('git pull', user=env.deploy_user)\n quick_update = 'Already up-to-date.' in result\n\n if quick_update:\n # Plonesite Recipe replaces site on the fly\n print 'UPDATE: No full Buildout required: {0:s}'.format(result)\n # buildout\n stop()\n sudo('./bin/buildout install plonesite', user=env.deploy_user)\n start()\n\n else:\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n\n sudo('rm -rf ./var/blobstorage', user=env.deploy_user)\n sudo('rm -rf ./var/filestorage', user=env.deploy_user)\n sudo('rm .installed.cfg', user=env.deploy_user)\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo('./bin/zeoclient_debug adduser admin admin', user=env.deploy_user) # noqa: E501\n\n # load page twice to fill cache and prevent a bug showing raw html\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501", "def replaceAll(folderCurrent):\n folder7zip = os.path.abspath(os.path.join(folderCurrent, '_thirdparty',\n '7zip'))\n folderRelease = os.path.abspath(os.path.join(folderCurrent,\n '../', '_RELEASE'))\n folderRoot = os.path.abspath(os.path.join(folderCurrent,\n '../'))\n norelease = 0\n os.chdir(folderRelease)\n releaseNumber = []\n for file in glob.glob(\"*.zip\"):\n releaseName = file.replace('.zip', '')\n LatestVersion = re.compile(r'(\\d+)$').search(releaseName).group(1)\n releaseNumber.append(LatestVersion)\n if not releaseNumber:\n releaseNumber = ['0']\n norelease = 1\n versionNumberString = ':'.join(releaseNumber)\n versionNumber = findLargestNumber(versionNumberString)\n if (norelease == 1):\n versionNumber = versionNumber\n else:\n versionNumber = versionNumber + 1\n versionNumber = str(versionNumber)\n print(versionNumber)\n fileReleaseName = 'PC2DedicatedServerWrapper_1.' + versionNumber + '.zip'\n\n fileRelase = os.path.abspath(os.path.join(folderRelease,\n fileReleaseName))\n\n os.chdir(folder7zip)\n os.system('7za.exe a -t7z \"' + fileRelase + '\" \"' + folderRoot + '\" -xr!_SRC -xr!.git* -xr!_RELEASE -xr!DedicatedServerWrapperGUI*')", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501", "def test_update(executable):\n from tempfile import mkdtemp\n from shutil import rmtree\n from pylada.jobfolder.jobfolder import JobFolder\n from pylada.process.jobfolder import JobFolderProcess\n from pylada import default_comm\n from functional import Functional\n\n root = JobFolder()\n for n in xrange(3):\n job = root / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n supp = JobFolder()\n for n in xrange(3, 6):\n job = supp / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n\n comm = default_comm.copy()\n comm['n'] = 4\n\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n assert program.keepalive \n\n # compute current jobs.\n program.start(comm)\n program.wait()\n assert hasattr(program, '_comm')\n\n # compute second set of updated jobs\n program.update(supp)\n program.wait()\n\n finally:\n try: rmtree(dir)\n except: pass\n\n # check with deleteold=True\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n assert program.keepalive \n\n # compute current jobs.\n program.start(comm)\n program.wait()\n assert hasattr(program, '_comm')\n\n # compute second set of updated jobs\n program.update(supp, deleteold=True)\n assert hasattr(program, '_comm')\n program.wait()\n\n finally:\n try: rmtree(dir)\n except: pass", "def upgrade(self, command=\"upgrade\"):\n # execute pkgin\n popen = Popen([self.pkgin_bin, \"-y\", command], stdout=PIPE, stdin=PIPE,\n stderr=PIPE)\n # retrieve output streams\n (stdoutdata, stderrdata) = popen.communicate()\n # if pkgin error\n if(stderrdata):\n # remove the line feed\n error = stderrdata[0:-1]\n raise PykginError(error)\n # retrieve output\n output_raw = stdoutdata\n # create a list which contain each packages\n output_whole_list = output_raw.split('\\n')\n # add infos to a dict\n output = {}\n for line in output_whole_list:\n if line.find(\"to be upgraded:\") != -1:\n # extract usefull string\n infos = line.split(':')[1].split('(')\n # extract string which contains packages list\n packages_list = infos[0].strip().split(' ')\n # extract version of each packages\n packages_upgraded = []\n for pkg in packages_list:\n packages_upgraded.append(\\\n self.__extract_package_version(pkg))\n output['packages_upgraded'] = packages_upgraded\n if line.find(\"to be installed:\") != -1:\n # extract usefull string\n infos = line.split(':')[1].split('(')\n # extract string which contains packages list\n packages_list = infos[0].strip().split(' ')\n # extract download and install size from the rest of the\n # string\n download_size = infos[1].split(' ')[0]\n install_size = infos[1].split(' ')[3]\n # extract version of each packages\n packages_installed = []\n for pkg in packages_list:\n packages_installed.append(\\\n self.__extract_package_version(pkg))\n output['packages_installed'] = packages_installed\n output['download_size'] = download_size\n output['install_size'] = install_size\n\n return output", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def onPreFork(self):", "def main():\n parser = setup_parser()\n args = parser.parse_args()\n\n global LOG\n if args.debug:\n LOG.setLevel(logging.DEBUG)\n\n changes = retrieve_pkgdb_change()\n LOG.debug('%s changes retrieved' % len(changes))\n orphaned = {}\n unorphaned = {}\n changed = {}\n for change in changes:\n pkg_name = change['msg']['package_listing']['package']['name']\n owner = change['msg']['package_listing']['owner']\n branch = change['msg']['package_listing']['collection']['branchname']\n user = change['msg']['agent']\n LOG.debug('%s changed to %s by %s on %s' % (\n pkg_name, owner, user, branch))\n pkg = PkgChange(\n name=pkg_name,\n summary=change['msg']['package_listing']['package']['summary'],\n branch=branch,\n new_owner=owner,\n user=user,\n )\n\n if owner == 'orphan':\n if pkg_name in orphaned:\n orphaned[pkg_name].add_branch(branch)\n else:\n orphaned[pkg_name] = pkg\n elif owner == user:\n if pkg_name in orphaned:\n del(orphaned[pkg_name])\n\n if pkg_name in unorphaned:\n unorphaned[pkg_name].add_branch(branch)\n else:\n unorphaned[pkg_name] = pkg\n else:\n if pkg_name in orphaned:\n del(orphaned[pkg_name])\n\n if pkg_name in changed:\n changed[pkg_name].add_branch(branch)\n else:\n changed[pkg_name] = pkg\n\n # Orphaned packages might have been deprecated:\n retired_info = retrieve_pkgdb_retired()\n retired = {}\n for pkg in retired_info:\n pkg_name = pkg['msg']['package_listing']['package']['name']\n LOG.debug('Retired: %s' % (pkg_name))\n if pkg_name in orphaned:\n pkg = orphaned[pkg_name]\n del(orphaned[pkg_name])\n pkg.new_owner = 'retired'\n retired[pkg_name] = pkg\n\n hours = int(DELTA) / 3600\n report = 'Change in ownership over the last %s hours\\n' % hours\n report += '=' * (40 + len(str(hours))) + '\\n'\n\n report += '\\n%s packages were orphaned\\n' % len(orphaned)\n report += '-' * (len(str(len(orphaned))) + 23) + '\\n'\n for pkg in orphaned:\n report += orphaned[pkg].to_string() + '\\n'\n report += ' ' * 5 + orphaned[pkg].summary + '\\n'\n report += ' ' * 5 + 'https://admin.fedoraproject.org/pkgdb/'\\\n 'acls/name/%s\\n' % orphaned[pkg].name\n\n report += '\\n%s packages unorphaned\\n' % len(unorphaned)\n report += '-' * (len(str(len(unorphaned))) + 20) + '\\n'\n for pkg in unorphaned:\n if unorphaned[pkg].unorphaned():\n report += unorphaned[pkg].to_string() + '\\n'\n\n report += '\\n%s packages were retired\\n' % len(retired)\n report += '-' * (len(str(len(retired))) + 23) + '\\n'\n for pkg in retired:\n report += retired[pkg].to_string() + '\\n'\n report += ' ' * 5 + retired[pkg].summary + '\\n'\n report += ' ' * 5 + 'https://admin.fedoraproject.org/pkgdb/'\\\n 'acls/name/%s\\n' % retired[pkg].name\n\n report += '\\n%s packages changed owner\\n' % len(changed)\n report += '-' * (len(str(len(changed))) + 23) + '\\n'\n for pkg in changed:\n if not changed[pkg].unorphaned():\n report += changed[pkg].to_string() + '\\n'\n\n report += '\\n\\nSources: https://github.com/pypingou/fedora-owner-change'\n\n if args.nomail:\n print report\n else:\n send_report(report)", "def _restart(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_update_resumed_install(self):\n ctx, graph = self._make_ctx_and_graph()\n\n node = self._make_node(\n operations={\n 'cloudify.interfaces.lifecycle.create':\n self._make_operation(operation='plugin1.op1'),\n 'cloudify.interfaces.lifecycle.delete':\n self._make_operation(operation='plugin1.op2')\n },\n plugins=[{'name': 'plugin1', 'package_name': 'plugin1'}]\n )\n instance = self._make_instance()\n pr = self._make_lifecycle_processor(\n ctx, graph,\n nodes=[node],\n instances=[instance]\n )\n with current_workflow_ctx.push(ctx):\n pr.install()\n\n # after creating the install graph, resume it - it should first\n # delete the instance, before re-installing it\n ctx.resume = True\n instance['state'] = 'creating'\n pr._update_resumed_install(graph)\n\n delete_task_index = None\n install_task_index = None\n for ix, task in enumerate(graph.linearize()):\n if task.name == 'plugin1.op1':\n install_task_index = ix\n elif task.name == 'plugin1.op2':\n delete_task_index = ix\n\n assert install_task_index is not None\n assert delete_task_index is not None\n assert delete_task_index < install_task_index", "def _prepareNewChild(self):\n\n # Register exit listener. We cannot immediately spawn new child when we\n # get a modified event. Must wait that child has closed database etc.\n atexit.register(self._childExitHandler)\n\n # Make sure that PID files and locks stay here, because dying child\n # will clear them.\n self.makeLockFile()\n self.makePidFile()\n\n self.storage_index.restore()\n\n notify(NewChildForked(self))\n\n autoinclude.include_deferred()\n fiveconfigure.install_deferred()", "def clean_master():", "def _restartProcessNormal(self) -> None:\n\n if IS_WIN_SVC in sys.argv:\n reactor.callFromThread(reactor.stop)\n return\n\n python = sys.executable\n argv = list(sys.argv)\n\n def addExe(val):\n if not \"run_peek_\" in val:\n return val\n if isWindows and not val.lower().endswith(\".exe\"):\n return val + \".exe\"\n return val\n\n argv = map(addExe, argv)\n os.execl(python, python, *argv)", "def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])", "def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n num_attributes = self.params.get(\"num_attributes\", '/run/attrtests/*')\n ior_api = self.params.get(\"api\", '/run/ior/*')\n mount_dir = self.params.get(\"mount_dir\", '/run/dfuse/*')\n self.show_daos_version(all_hosts, hosts_client)\n\n # (2)Create pool container and pool attributes\n self.log.info(\"(2)==Create pool attributes.\")\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = self.get_daos_command()\n pool_attr_dict = self.create_data_set(num_attributes)\n self.pool.pool.set_attr(data=pool_attr_dict)\n self.verify_pool_attrs(pool_attr_dict)\n self.container.close()\n self.pool.disconnect()\n\n # (3)Setup and run IOR\n self.log.info(\"(3)==Setup and run IOR.\")\n result = run_pcmd(hosts_client, \"mkdir -p {}\".format(mount_dir))\n ior_timeout = self.params.get(\"ior_timeout\", '/run/ior/*')\n iorflags_write = self.params.get(\"write_flg\", '/run/ior/iorflags/*')\n iorflags_read = self.params.get(\"read_flg\", '/run/ior/iorflags/*')\n testfile = os.path.join(mount_dir, \"testfile\")\n testfile_sav = os.path.join(mount_dir, \"testfile_sav\")\n testfile_sav2 = os.path.join(mount_dir, \"testfile_sav2\")\n symlink_testfile = os.path.join(mount_dir, \"symlink_testfile\")\n # (3.a)ior dfs\n if ior_api in (\"DFS\", \"POSIX\"):\n self.log.info(\"(3.a)==Run non-HDF5 IOR write and read.\")\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n\n # (3.b)ior hdf5\n elif ior_api == \"HDF5\":\n self.log.info(\"(3.b)==Run IOR HDF5 write and read.\")\n hdf5_plugin_path = self.params.get(\"plugin_path\", '/run/hdf5_vol/')\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.fail(\"##(3)Unsupported IOR api {}\".format(ior_api))\n\n # (3.c)ior posix test file with symlink\n if ior_api == \"POSIX\":\n self.log.info(\"(3.c)==Symlink mounted testfile.\")\n result = run_pcmd(hosts_client, \"cd {}\".format(mount_dir))\n result = run_pcmd(hosts_client, \"ls -l {}\".format(testfile))\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav2))\n self.check_result(result)\n result = run_pcmd(\n hosts_client, \"ln -vs {0} {1}\".format(testfile_sav2, symlink_testfile))\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"ls -l {}\".format(symlink_testfile))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n\n # Verify pool attributes before upgrade\n self.log.info(\"(3.2)==verify pool attributes before upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n\n # (4)dmg system stop\n self.log.info(\"(4)==Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (5)Upgrade\n self.log.info(\"(5)==Upgrade RPMs to 2.2.\")\n self.upgrade(hosts_server, hosts_client)\n\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n # (6)Restart servers\n self.log.info(\"(6)==Restart servers.\")\n self.restart_servers()\n\n # (7)Verification after upgrade\n # Restart agent\n self.log.info(\"(7.1)====Restarting rel_2.2 agent after upgrade.\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool_id)\n self.daos_cmd.pool_query(pool=pool_id)\n\n # Verify pool attributes\n self.log.info(\"(7.2)====Verifying pool attributes after upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.daos_ver_after_upgraded(hosts_client)\n\n # Verify IOR data and symlink\n self.log.info(\"(7.3)====Verifying container data IOR read.\")\n if ior_api == \"DFS\":\n self.log.info(\"(7.a)==Run IOR DFS read verification.\")\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n elif ior_api == \"HDF5\":\n self.log.info(\"(7.b)==Run IOR HDF5 read verification.\")\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.log.info(\"(7.c)==Run Symlink check after upgraded.\")\n result = run_pcmd(\n hosts_client,\n \"dfuse --mountpoint {0} --pool {1} --container {2}\".format(\n mount_dir, pool_id, self.container))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(symlink_testfile, testfile_sav2))\n self.check_result(result)\n\n # (8)Dmg pool get-prop\n self.log.info(\"(8)==Dmg pool get-prop after RPMs upgraded before Pool upgraded\")\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n\n # (9)Pool property verification after upgraded\n self.log.info(\"(9)==Dmg pool upgrade and get-prop after RPMs upgraded\")\n\n if fault_on_pool_upgrade and self.has_fault_injection(hosts_client):\n self.log.info(\"(9.1a)==Pool upgrade with fault-injection.\")\n self.pool_upgrade_with_fault(hosts_client, pool_id)\n else:\n self.log.info(\"(9.1b)==Pool upgrade.\")\n result = run_pcmd(hosts_client, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n self.log.info(\"(9.2)==verify pool attributes after pool-upgraded.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.pool.destroy()\n\n # (10)Create new pool\n self.log.info(\"(10)==Create new pool after rpms Upgraded\")\n self.add_pool(connect=False)\n pool2_id = self.pool.identifier\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool2_id)\n self.daos_cmd.pool_query(pool=pool2_id)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool2_id))\n self.check_result(result)\n\n # (11)Downgrade and cleanup\n self.log.info(\"(11)==Downgrade and cleanup.\")\n if ior_api == \"POSIX\":\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n self.pool.destroy()\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"(11.1)==Downgrade RPMs to 2.0.3.\")\n self.downgrade(hosts_server, hosts_client)\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n\n # (12)Cleanup restart server and agent\n self.log.info(\"(12)==Restart 2.0 servers and agent.\")\n self.restart_servers()\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n if fault_on_pool_upgrade and not self.has_fault_injection(hosts_client):\n self.fail(\"##(12)Upgraded-rpms did not have fault-injection feature.\")\n self.log.info(\"==(12)Test passed\")", "def install_baseos(self):\n\n # Check that DFT path is valid\n if not os.path.isdir(self.project.project_definition[\"configuration\"][\"dft-base\"]):\n logging.critical(\"Path to DFT installation is not valid : %s\",\n self.project.project_definition[\"configuration\"][\"dft-base\"])\n exit(1)\n\n # Ensure target rootfs mountpoint exists and is a dir\n if not os.path.isdir(self.project.rootfs_mountpoint):\n os.makedirs(self.project.rootfs_mountpoint)\n else:\n if (\"keep_rootfs_history\" in self.project.project_definition[\"configuration\"] and\n self.project.project_definition[\"configuration\"][\"keep_rootfs_history\"]):\n logging.warn(\"target rootfs mount point already exists : \" + self.project.rootfs_mountpoint)\n# TODO\n logging.critical(\"TODO : handle history : \" + self.project.rootfs_mountpoint)\n exit(1)\n# It looks like i need to add a symlink from history to current\n# It should be optional with overwrite on factory_setup_definition\n# Depending on keeping history or not. So far not available\n# default behavior is not to keep history\n else:\n\n# TODO security hole !!!!!\n# Protect path generation to avoid to remove / !!!\n sudo_command = 'sudo rm -fr \"' + self.project.rootfs_mountpoint +'\"'\n self.execute_command(sudo_command)\n os.makedirs(self.project.rootfs_mountpoint)\n\n # Check if the archive has to be used instead of doing a debootstraping\n # for real. Only if the archive exist...\n if self.project.dft.use_cache_archive and self.cache_archive_is_available:\n self.fake_generate_debootstrap_rootfs()\n else:\n # In any other cases, do a real debootstrap call\n self.generate_debootstrap_rootfs()\n\n # Test if the archive has to be updated\n if self.project.dft.update_cache_archive:\n # But only do it if we haven't bee using the cache, or it\n # would be extracted, then archived again.\n if self.project.dft.use_cache_archive:\n self.update_rootfs_archive()\n\n # Launch Ansible to install roles identified in configuration file\n self.install_packages()\n\n # Once installation has been played, we need to do some cleanup\n # like ensute that no mount bind is still mounted, or delete the\n # DFT ansible files\n self.cleanup_installation_files()\n\n # Remove QEMU if it has been isntalled. It has to be done in the end\n # since some cleanup tasks could need QEMU\n if self.use_qemu_static:\n self.cleanup_qemu()", "def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']", "def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p1.communicate()\n\n\t\tresult = {}\n\t\tresult['install'] = []\n\t\tresult['update'] = []\n\t\tresult['remove'] = []\n\t\tfor line in stdout.split('\\n'):\n\t\t\t# upgrade:\n\t\t\t# Inst univention-updater [3.1.1-5] (3.1.1-6.408.200810311159 192.168.0.10)\n\t\t\t# inst:\n\t\t\t# Inst mc (1:4.6.1-6.12.200710211124 oxae-update.open-xchange.com)\n\t\t\t#\n\t\t\t# *** FIX ***\tthe above example lines ignore the fact that there's\n\t\t\t#\t\t\t\tsome extra text (occasionally) after the last closing\n\t\t\t#\t\t\t\tparenthesis. Until now, I've seen only a pair of empty\n\t\t\t#\t\t\t\tbrackets [], but who knows...\n\t\t\tmatch = re.search('^Inst (\\S+)\\s+(.*?)\\s*\\((\\S+)\\s.*\\)',line)\n\t\t\tif match:\n\t\t\t\tpkg = match.group(1)\n\t\t\t\told = match.group(2)\n\t\t\t\tver = match.group(3)\n\t\t\t\tif old:\n\t\t\t\t\tresult['update'].append([pkg,ver])\n\t\t\t\telse:\n\t\t\t\t\tresult['install'].append([pkg,ver])\n\t\t\telif line.startswith('Remv '):\n\t\t\t\tl=line.split(' ')\n\t\t\t\tpkg = l[1]\n\t\t\t\tver = _('unknown')\n\t\t\t\tif len(l) > 2:\n\t\t\t\t\tver = l[2].replace('[','').replace(']','')\n\t\t\t\tresult['remove'].append([pkg,ver])\n\n\n\t\t# sort package names?\n\t\tresult['update'] = sorted(result['update'])\n\t\tresult['install'] = sorted(result['install'])\n\t\tresult['remove'] = sorted(result['remove'])\n\n\t\tself.finished(request.id,result)", "def prepare(self,avahiInstallOnly=False,ignoreUpgradeError=False): \n content=\"\"\"<?xml version=\\\"1.0\\\" standalone=\\'no\\'?>\n<!--*-nxml-*-->\n<!DOCTYPE service-group SYSTEM \"avahi-service.dtd\">\n<!-- $Id$ -->\n<service-group>\n<name replace-wildcards=\"yes\">daascluster %h</name>\n\n<service>\n<type>_daascluster._tcp</type>\n<port>9999</port>\n</service>\n\n<service>\n<type>_ssh._tcp</type>\n<port>22</port>\n</service>\n\n</service-group>\n\"\"\"\n content=content.replace(\"daascluster\",self.cluster.domainname.replace(\".\",\"__\"))\n tmpfile=q.system.fs.joinPaths(q.dirs.tmpDir,\"avahi\")\n q.system.fs.writeFile(tmpfile,content) \n \n q.transaction.start(\"Try to configure nodes for cluster usage (will use SSH to do so).\") \n q.transaction.start(\"Ping machine %s\" %self.ipaddr)\n if not q.system.net.pingMachine(self.ipaddr,5):\n q.console.echo(\"ERROR: Could not ping to machine %s, please check machine is reacheable.\"%self.ipaddr)\n q.transaction.stop()\n else:\n q.transaction.stop() #ping\n ##q.transaction.start(\"Open SSH connection to %s\" %self.ipaddr)\n ##sshclient=q.clients.ssh.createClient(ipaddr,\"root\",rootpasswd,60) \n if avahiInstallOnly==False:\n q.transaction.start(\"Upgrade ubuntu on %s to newest packages, this can take a long time (apt-get update & upgrade).\" %self.ipaddr) \n self.execute(\"apt-get update\",False)\n #returncode,stdout,stderr=self.execute(\"apt-get upgrade -y\",False)\n #if returncode>0:\n #if not ignoreUpgradeError or q.qshellconfig.interactive==False or not q.console.askYesNo(\"Could not upgrade system, do you want to ignore and continue?\"):\n #raise \"Could not upgrade system (apt-get upgrade), probably because there was interactivity required.\"\n q.transaction.start(\"Install mc on %s\" %self.ipaddr) \n self.execute(\"apt-get install mc -y\")\n q.transaction.stop()\n q.transaction.stop()\n else:\n q.transaction.start(\"Update ubuntu package metadata on %s (apt-get update).\" %self.ipaddr) \n self.execute(\"apt-get update\",False)\n q.transaction.stop() \n \n q.transaction.start(\"Install avahi on %s\" %self.ipaddr) \n self.execute(\"apt-get install avahi-daemon avahi-utils -y\",False)\n self.execute(\"mkdir -p /etc/avahi/services\") \n ftp=self.getSftpConnection()\n q.logger.log(\"put %s to /etc/avahi/services/daascluster.service\" % tmpfile)\n ftp.put(tmpfile,\"/etc/avahi/services/daascluster.service\")\n q.transaction.stop() #reload avahi\n q.transaction.start(\"Reload Avahi Config\")\n self.execute(\"avahi-daemon --reload\")\n q.transaction.stop() #end of avahi\n q.transaction.start(\"Disable ssh name resolution\")\n self.execute(\"echo 'UseDNS no' >> /etc/ssh/sshd_config\",silent=True)\n self.execute(\"/etc/init.d/ssh restart\",silent=True)\n q.transaction.stop() \n \n q.transaction.stop() #end of ssh connection\n \n \n #if q.qshellconfig.interactive:\n #if copyqbase or q.console.askYesNo(\"Do you want to copy qbasedir to remote node over ssh?\"):\n ##self._removeRedundantFiles()\n #if rsync==False:\n #sshclient.copyDirTree(\"/opt/qbase3/\")\n #sshclient.copyDirTree(\"/opt/code/\")\n #else:\n #q.system.process.executeWithoutPipe(\"rsync -avzEp -e ssh /opt/qbase3/ root@%s:/opt/qbase3/ \" %self.ipaddr)\n #q.system.process.executeWithoutPipe(\"rsync -avzEp -e ssh /opt/qbase3/ root@%s:/opt/code/ \" %self.ipaddr)", "def __init__(self):\n self.update_os_packages()\n self.upgrade_os_packages()", "def _on_parent_process_kill(self):", "def restore_old_install(self):\n USER.info('%s: Restoring Old Install', self.recipe.name)\n shutil.move(self.back_dir, self.recipe.install_dir)\n pakit.conf.IDB[self.recipe.name] = self.old_entry\n walk_and_link(self.recipe.install_dir, self.recipe.link_dir)", "def _provision_package(self):", "def update_debianbuild ():\n\n prev_ace_ver = None\n\n path = get_path('ACE', 'debian', 'control')\n\n mask = re.compile (\"(libace|libACE|libkokyu|libKokyu|libnetsvcs)([^\\s,:]*-)(\\d+\\.\\d+\\.\\d+)([^\\s,:]*)\")\n\n def update_ver (match):\n return match.group (1) + match.group (2) + comp_versions[\"ACE_version\"] + match.group (4)\n\n # update debian/control\n with open (path, 'r+') as control_file:\n new_ctrl = \"\"\n for line in control_file.readlines ():\n if re.search (\"^(Package|Depends|Suggests):\", line) is not None:\n line = mask.sub (update_ver, line)\n elif re.search ('^Replaces:', line) is not None:\n line = line.replace (old_comp_versions[\"ACE_version\"], comp_versions[\"ACE_version\"])\n\n new_ctrl += line\n\n if opts.take_action:\n control_file.seek (0)\n control_file.truncate (0)\n control_file.writelines (new_ctrl)\n else:\n print (\"New control file:\")\n print (\"\".join (new_ctrl))\n\n return [path]", "def upgrade_os_packages(self):\n self.summarize_operation(\"Upgrading OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get upgrade -y\"))", "def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)", "def self_update(args):\n\n try:\n import install_vee\n except ImportError:\n root = os.path.abspath(os.path.join(__file__, '..', '..', '..'))\n sys.path.append(root)\n import install_vee\n\n install_vee.main([\n '--prefix', args.home.root,\n '--no-bashrc',\n ])", "def post_extract(env_name='root'):\n prefix = prefix_env(env_name)\n info_dir = join(prefix, 'info')\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n dist = '%(name)s-%(version)s-%(build)s' % meta\n if FORCE:\n run_script(prefix, dist, 'pre-unlink')\n link(prefix, dist, linktype=None)\n shutil.rmtree(info_dir)", "def processRestart(name):\n imrclient.update_server_info()\n imrclient.process_restart(name)\n\n # FIXME: this is a workaround until the Bima processes are fixed\n # FIXME: such that their initialized monitor points actually work\n m = re.match(r'^\\s*(th|dm|rm|mh|if|ot|ch)bima([1-9])\\s*$', name)\n if m:\n ant = int(m.group(2)) + 6\n print 'WARNING: You MUST re-initialize C%d by using removeAntenna(%d)' % (ant, ant)\n print 'WARNING: followed by addAntenna(%d) in the correct subarray' % (ant, )", "def _updateComponentsInfo(self):\n def removeItems(processDetail, measurements):\n self._components.remove(processDetail)\n self._compMeasurements.remove(measurements)\n\n myName = self.__class__.__name__\n # dictionary[componentName] = componentPID\n componentsInfo = self._getComponentsInfo()\n for processDetail, measurements in zip(self._components, self._compMeasurements):\n try:\n newPID = componentsInfo[processDetail.name]\n if int(newPID) == processDetail.pid:\n # ok, component still runs under the same PID\n # update list of child processes (some may have (dis)appeared)\n logging.debug(\"Component %s runs under the same PID, refreshing\"\n \" list of child processes ...\" % processDetail.getDetails())\n try:\n processDetail.refresh()\n except NoSuchProcess as ex:\n logging.error(\"Could not update list of children processes \"\n \"for %s, reason: %s\" % (processDetail.getDetails(), ex))\n del componentsInfo[processDetail.name]\n else:\n logging.warn(\"Component %s seems to have been restarted \"\n \"(different PID:%s, was:%s).\" % (processDetail.name,\n newPID, processDetail.pid))\n try:\n pd = ProcessDetail(newPID, processDetail.name)\n index = self._components.index(processDetail)\n self._components[index] = pd\n measurements.clear()\n except (NoSuchProcess, AccessDenied) as ex:\n logging.error(\"%s: component %s ignored, reason: %s\" % (myName, processDetail.name, ex))\n removeItems(processDetail, measurements)\n except KeyError:\n m = \"Component %s seems not running anymore, removed from polling.\" % processDetail.name\n logging.warning(m)\n removeItems(processDetail, measurements)\n\n if len(componentsInfo) > 0:\n logging.info(\"Some new components appeared since last check ...\")\n for compName, compPID in componentsInfo.items():\n self._setUpProcessDetailAndMeasurements(compPID, compName)", "def modify_base_buildroot(self):\n if \"'%s '\" % self.buildroot_pkgs != pipes.quote(str(self.buildroot_pkgs)+' '):\n # just different test if it contains only alphanumeric characters allowed in packages name\n raise BuilderError(\"Do not try this kind of attack on me\")\n self.root_conn.module_name = \"lineinfile\"\n self.root_conn.module_args = \"\"\"dest=/etc/mock/%s.cfg line=\"config_opts['chroot_setup_cmd'] = 'install @buildsys-build %s'\" regexp=\"^.*chroot_setup_cmd.*$\" \"\"\" % (self.chroot, self.buildroot_pkgs)\n self.mockremote.callback.log('putting %s into minimal buildroot of %s' % (self.buildroot_pkgs, self.chroot))\n results = self.root_conn.run()\n\n is_err, err_results = check_for_ans_error(results, self.hostname, success_codes=[0],\n return_on_error=['stdout', 'stderr'])\n if is_err:\n self.mockremote.callback.log(\"Error: %s\" % err_results)\n myresults = get_ans_results(results, self.hostname)\n self.mockremote.callback.log(\"%s\" % myresults)", "def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')", "def reboot(self, node):", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def basic_env_for_reconfiguration(self):\n snapshot_name = 'basic_env_for_reconfiguration'\n self.check_run(snapshot_name)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n self.show_step(2)\n self.show_step(3)\n\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['compute'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['controller']\n })\n\n self.show_step(4)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.env.make_snapshot(\"basic_env_for_reconfiguration\", is_make=True)", "def run(self):\n\n run_command(['apt-get', 'update'])\n run_command(['apt-get', 'install', '-y', 'unattended-upgrades'])\n run_command(['apt-get', 'upgrade', '-y'])", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def vacuum_cgmd(self):\n\n\t\texstring_dssp = 'except: cannot find dssp at '+gmxpaths['dssp']+\\\n\t\t\t'\\nconsider using the following syntax to download for 64-bit linux:'+\\\n\t\t\t'\\n\\twget ftp://ftp.cmbi.ru.nl/pub/software/dssp/dssp-2.0.4-linux-amd64'+\\\n\t\t\t'\\n\\tor navigate to ftp://ftp.cmbi.ru.nl/pub/software/dssp/'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\t\t\n\t\texstring_martinize = 'except: cannot find martinize at '+gmxpaths['martinize']+\\\n\t\t\t'\\nconsider using the following syntax to download:'+\\\n\t\t\t'\\n\\twget http://md.chem.rug.nl/cgmartini/images/tools/martinize/martinize-2.4/martinize.py'+\\\n\t\t\t'\\n\\tor navigate to http://md.chem.rug.nl/cgmartini/index.php/tools2/proteins-and-bilayers'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\n\t\t#---first test to see if executables are available\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['dssp'])): raise Exception(exstring_dssp)\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['martinize'])): raise Exception(exstring_martinize)\t\n\t\n\t\tcmd = [gmxpaths['martinize'],\n\t\t\t'-f system-input.pdb',\n\t\t\t'-o system-original.top',\n\t\t\t'-x protein-cg.pdb',\n\t\t\t'-ff martini22','-ed',\n\t\t\t'-dssp '+gmxpaths['dssp']]\n\t\tcall(cmd,logfile='log-martinize',cwd=self.rootdir)\n\t\t\n\t\twith open(self.rootdir+'system-original.top') as fp: lines = fp.readlines()\n\t\tself.itp_protein = [l.split()[0] for l in lines if l[:7] == 'Protein']\n\n\t\t#---note that this section leaves out lipids\n\t\tself.itp_lipid = []\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.nprots = [1]\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f protein-cg.pdb',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-convert',cwd=self.rootdir)\n\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t'-o vacuum.gro','-c']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir)\n\t\t\n\t\tself.minimization_method('vacuum')", "def restartFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('start fluidinfo-api-node PORT=%d' % port)\n with settings(warn_only=True):\n sudo('kill -USR1 $(cat /var/run/nginx.pid)')", "def post_installation(self, exc_value):\n pass", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "def anneal(self,anPackages=None,progress=None):\n data = self.data\n data_sizeCrcDate = self.data_sizeCrcDate\n anPackages = set(anPackages or data)\n getArchiveOrder = lambda x: data[x].order\n #--Get remove/refresh files from anPackages\n removes = set()\n for package in anPackages:\n installer = data[package]\n removes |= installer.underrides\n if installer.isActive:\n removes |= installer.missingFiles\n removes |= set(installer.dirty_sizeCrc)\n #--March through packages in reverse order...\n restores = {}\n for package in sorted(data,key=getArchiveOrder,reverse=True):\n installer = data[package]\n #--Other active package. May provide a restore file.\n # And/or may block later uninstalls.\n if installer.isActive:\n files = set(installer.data_sizeCrc)\n myRestores = (removes & files) - set(restores)\n for file in myRestores:\n if installer.data_sizeCrc[file] != data_sizeCrcDate.get(file,(0,0,0))[:2]:\n restores[file] = package\n removes.discard(file)\n #--Remove files\n emptyDirs = set()\n modsDir = dirs['mods']\n for file in removes:\n path = modsDir.join(file)\n path.remove()\n (path+'.ghost').remove()\n data_sizeCrcDate.pop(file,None)\n emptyDirs.add(path.head)\n #--Remove empties\n for emptyDir in emptyDirs:\n if emptyDir.isdir() and not emptyDir.list():\n emptyDir.removedirs()\n #--Restore files\n restoreArchives = sorted(set(restores.itervalues()),key=getArchiveOrder,reverse=True)\n if restoreArchives: \n progress.setFull(len(restoreArchives))\n for index,package in enumerate(restoreArchives):\n progress(index,package.s)\n installer = data[package]\n destFiles = set(x for x,y in restores.iteritems() if y == package)\n if destFiles:\n installer.install(package,destFiles,data_sizeCrcDate,\n SubProgress(progress,index,index+1))", "def do_update(self, node_role_map, node_roles, first_run=False):\n require('use_rds')\n require('pstat_instance')\n require('pstat_url')\n require('project_root')\n require('config_folder')\n require('ssl_prefix')\n require('backup')\n require('aws_access_key_id')\n require('aws_secret_access_key')\n require('sphinx_counter')\n require('key_filename')\n require('calabar_conf_context')\n require('loggly_inputs')\n require('sphinx_counter')\n require('ipsec_confs')\n require('hostname')\n require('enable_periodic_tasks')\n\n logger.info(\"Starting to provision %s\", env.host_string)\n\n for ipsec_name, _ in env.ipsec_confs.items():\n # Require all of the pre-shared key configs\n require('ipsec_psk_%s' % ipsec_name)\n\n if first_run:\n self.do_first_launch_config()\n\n self._stop_celery()\n\n self._update_cache_settings(node_role_map['memcached']['all'])\n self._update_sphinx_settings(\n node_role_map['celery_backend']['same_az'],\n node_roles,\n )\n self._update_celery_backend_settings(\n node_role_map['sphinx_search_indexer']['same_az'],\n )\n ldap_api_nodes = node_role_map['has_ldap_access']\n self._update_ldap_api_endpoint_settings(\n all_ldap_api_nodes=ldap_api_nodes['all'],\n same_az_ldap_api_nodes=ldap_api_nodes['same_az'],\n node_roles=node_roles,\n )\n self._update_celery_ldap_settings(node_roles)\n\n # Package and push the app to the new instance\n env.project_root_src = '/opt/pstat/versions/%(timestamp)s' % env\n source_dir = env.project_root_src\n current_source_dir = None\n if not first_run:\n current_source_dir = env.project_root\n with hide(*fab_output_hides):\n push_source(\n new_source_dir=source_dir,\n current_source_dir=current_source_dir,\n chown=F_CHOWN,\n chmod=\"u+rw,g+rw,o-rw\",\n )\n self._make_media_readable(source_dir)\n self._configure_settings_local(\n source_dir,\n env.pstat_settings,\n chown=F_CHOWN,\n )\n self._configure_settings_target(\n source_dir,\n env.settings_target,\n chown=F_CHOWN,\n )\n self.configure_terrarium(source_dir=source_dir, user=FILE_OWNER)\n self._activate_new_source(\n source_dir,\n [ACTIVE_SOURCE_SYMLINK, env.project_root],\n )\n self._run_db_migrations(user=FILE_OWNER)\n\n # Link up the attachments and upload directories from /mnt/\n self._link_storage_dirs()\n\n self._configure_webservers(node_roles)\n building_search_index = self._build_search_index()\n\n self._create_media_folder()\n self._collect_static_media()\n\n self._create_500_page()\n self._restart_webservers()\n\n # Services managed via supervisord\n self._configure_celery(node_roles)\n self._update_supervisord()\n self._configure_calabar()\n self._configure_ipsec()\n self._start_celery()\n\n self._configure_loggly()\n self._configure_pstat_cron_jobs()\n self._configure_email_sending()\n\n if first_run:\n self._sync_s3_media()\n\n if building_search_index:\n self._wait_for_search_indexing()\n self._ensure_sphinx_running()\n self._configure_sphinx_cron()\n\n logger.info(\"Provisioner completed successfully\")", "def pre_start_migrate_cores(self, env):\n import params\n env.set_params(params)\n\n if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:\n backup_solr_cores=\"/tmp/solr/cores\"\n solr_home_dir=params.solr_data_dir\n\n Directory(format(solr_home_dir),\n owner=params.solr_user,\n create_parents=True,\n group=params.user_group\n )\n\n if os.path.isdir(solr_home_dir) and os.path.isdir(backup_solr_cores):\n Execute(('cp', '-rn', backup_solr_cores+\"/.\", solr_home_dir),\n user=params.solr_user,\n logoutput=True\n )", "def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)", "def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)", "def test_core_files(self):\n # Choose a server find the pid of its daos_engine process\n host = choice(self.server_managers[0].hosts) #nosec\n self.log.info(\"Obtaining pid of the daos_engine process on %s\", host)\n pid = None\n result = run_pcmd([host], \"pgrep --list-full daos_engine\", 20)\n index = 0\n while not pid and index < len(result):\n output = \"\\n\".join(result[index][\"stdout\"])\n match = findall(r\"(\\d+)\\s+[A-Za-z0-9/]+\", output)\n if match:\n pid = match[0]\n index += 1\n if pid is None:\n self.fail(\n \"Error obtaining pid of the daos_engine process on \"\n \"{}\".format(host))\n self.log.info(\"Found pid %s\", pid)\n\n # Send a signal 6 to its daos_engine process\n self.log.info(\"Sending a signal 6 to %s\", pid)\n result = run_pcmd([host], \"sudo kill -6 {}\".format(pid))\n if len(result) > 1 or result[0][\"exit_status\"] != 0:\n self.fail(\"Error sending a signal 6 to {} on {}\".format(pid, host))\n\n # Display the journalctl log for the process that was sent the signal\n self.server_managers[0].manager.dump_logs([host])\n\n # Simplify resolving the host name to rank by marking all ranks as\n # expected to be either running or errored (sent a signal 6)\n self.server_managers[0].update_expected_states(\n None, [\"Joined\", \"Errored\"])", "def test_update_with_fail(executable):\n from tempfile import mkdtemp\n from shutil import rmtree\n from pylada.jobfolder.jobfolder import JobFolder\n from pylada.process.jobfolder import JobFolderProcess\n from pylada.process import Fail\n from pylada import default_comm\n from functional import Functional\n\n root = JobFolder()\n for n in xrange(3):\n job = root / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n root['1'].functional.order = 666\n root['1'].sleep = None\n supp = JobFolder()\n for n in xrange(3, 6):\n job = supp / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n supp['5'].sleep = 0\n supp['5'].functional.order = 666\n\n comm = default_comm.copy()\n comm['n'] = 4\n\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n\n # compute current jobs.\n program.start(comm)\n try: program.wait()\n except Fail: pass\n else: raise Exception()\n assert len(program.errors) == 1\n\n # compute second set of updated jobs\n program.update(supp)\n try: program.wait()\n except Fail: pass\n else: raise Exception()\n assert len(program.errors) == 2\n program.errors.clear()\n\n\n finally:\n try: rmtree(dir)\n except: pass", "def upgrade_cmd(jail, release):\n lgr = ioc_logger.Logger('ioc_cli_upgrade')\n lgr = lgr.getLogger()\n\n jails, paths = IOCList(\"uuid\").list_datasets()\n _jail = {tag: uuid for (tag, uuid) in jails.items() if\n uuid.startswith(jail) or tag == jail}\n\n if len(_jail) == 1:\n tag, uuid = next(iter(_jail.items()))\n path = paths[tag]\n root_path = \"{}/root\".format(path)\n elif len(_jail) > 1:\n lgr.error(\"Multiple jails found for\"\n \" {}:\".format(jail))\n for t, u in sorted(_jail.items()):\n lgr.critical(\" {} ({})\".format(u, t))\n exit(1)\n else:\n lgr.critical(\"{} not found!\".format(jail))\n exit(1)\n\n pool = IOCJson().json_get_value(\"pool\")\n iocroot = IOCJson(pool).json_get_value(\"iocroot\")\n freebsd_version = checkoutput([\"freebsd-version\"])\n status, jid = IOCList.list_get_jid(uuid)\n conf = IOCJson(path).json_load()\n host_release = os.uname()[2]\n jail_release = conf[\"release\"]\n started = False\n\n if conf[\"release\"] == \"EMPTY\":\n lgr.critical(\"Upgrading is not supported for empty jails.\")\n exit(1)\n\n if conf[\"type\"] == \"jail\":\n if not status:\n IOCStart(uuid, tag, path, conf, silent=True)\n status, jid = IOCList.list_get_jid(uuid)\n started = True\n elif conf[\"type\"] == \"basejail\":\n lgr.critical(\"Please run \\\"iocage migrate\\\" before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n elif conf[\"type\"] == \"template\":\n lgr.critical(\"Please convert back to a jail before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n else:\n lgr.critical(\"{} is not a supported jail type.\".format(conf[\"type\"]))\n exit(1)\n\n _freebsd_version = \"{}/releases/{}/root/bin/freebsd-version\".format(\n iocroot, release)\n\n if \"HBSD\" in freebsd_version:\n Popen([\"hbsd-upgrade\", \"-j\", jid]).communicate()\n else:\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(root_path)):\n # 10.3-RELEASE and under lack this flag\n if float(host_release.partition(\"-\")[0][:5]) <= 10.3:\n lgr.critical(\"Host: {} is too old, please upgrade to \"\n \"10.3-RELEASE or above\".format(host_release))\n exit(1)\n\n os.environ[\"PAGER\"] = \"/bin/cat\"\n fetch = Popen([\"freebsd-update\", \"-b\", root_path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(root_path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(root_path),\n \"--currently-running {}\".format(jail_release), \"-r\",\n release, \"upgrade\"], stdin=PIPE)\n fetch.communicate(b\"y\")\n\n while not __upgrade_install__(root_path, release):\n pass\n\n if release[:4].endswith(\"-\"):\n # 9.3-RELEASE and under don't actually have this binary.\n new_release = release\n else:\n with open(_freebsd_version, \"r\") as r:\n for line in r:\n if line.startswith(\"USERLAND_VERSION\"):\n new_release = line.rstrip().partition(\"=\")[\n 2].strip(\n '\"')\n\n IOCJson(path, silent=True).json_set_value(\"release={}\".format(\n new_release))\n\n if started:\n IOCStop(uuid, tag, path, conf, silent=True)\n\n lgr.info(\"\\n{} ({}) successfully upgraded from {} to {}!\".format(\n uuid, tag, jail_release, new_release))", "def upgrade_if_needed(self, restart = True, dependencies = False):\n if self.check():\n print \"Upgrading %s\" % self.pkg\n self.upgrade(dependencies)\n if restart:\n self.restart()", "def patched_fork(self):\n pid = self.original_os_fork()\n if not pid:\n _LOG('Fork detected. Reinstalling Manhole.')\n self.reinstall()\n return pid", "def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages", "def restart_with_reloader():\n print(\"[+] RESTARTING\")\n cwd = os.getcwd()\n args = _get_args_for_reloading()\n new_environ = os.environ.copy()\n new_environ[\"TERMNINJA_SERVER_RUNNING\"] = \"true\"\n cmd = \" \".join(args)\n worker_process = Process(\n target=subprocess.call,\n args=(cmd,),\n kwargs={\"cwd\": cwd, \"shell\": True, \"env\": new_environ},\n )\n worker_process.start()\n return worker_process", "def respawn_instance(self, version, moreargs=None, wait_for_logfile=True):\n assert version is not None\n self.cfg.version = version\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n if moreargs is not None:\n args.extend(moreargs)\n\n logging.info(\"StarterManager: respawning instance %s\", str(args))\n self.instance = psutil.Popen(args)\n self.pid = self.instance.pid\n self.ppid = self.instance.ppid()\n print(\"respawned with PID:\" + str(self.instance.pid))\n if wait_for_logfile:\n self.wait_for_logfile()\n self.wait_for_port_bind()\n else:\n print(\"Waiting for starter to exit\")\n print(\"Starter exited %d\" % self.instance.wait())", "def _forkLifeMain(forkList, addForkQueue):\n needsReplacement = set(forkList)\n\n try:\n def onKillSignal(sig, frame):\n # As the main fork, we do not reap cherrypy's SIGTERM processing.\n # We need to convert SIGTERM into an exception so that we \n # appropriately kill our forks and shutdown.\n raise Exception(\"SIGTERM received\")\n signal.signal(signal.SIGTERM, onKillSignal)\n \n # We don't care about child processes.\n signal.signal(signal.SIGCHLD, signal.SIG_IGN)\n \n while True:\n try:\n oldPid = addForkQueue.get(timeout = 5)\n except Empty:\n # Shouldn't make a new fork, but do check on the ones that\n # are alive.\n pass\n else:\n # Before just starting a new process, make sure this pid is\n # still in our needsReplacement set. If it's not, we've\n # already spawned a replacement child, and spawning another\n # would create too many forks.\n if oldPid in needsReplacement:\n needsReplacement.remove(oldPid)\n pid = os.fork()\n if pid == 0:\n # We're the new child! Hooray! Unset our signal\n # handler as cherrypy will install its own.\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGCHLD, signal.SIG_DFL)\n return\n forkList.append(pid)\n # Add the new pid so it will get replaced\n needsReplacement.add(pid)\n\n # Clean out forkList\n for pid in forkList[:]:\n if not _checkAlive(pid):\n forkList.remove(pid)\n # And restart a new one when one dies\n addForkQueue.put(pid)\n\n except:\n # If there was any error, kill all forks and exit\n _killForks(forkList)\n raise", "def upgrade_environment(self, db_dummy=None):\n self.log.debug(\"upgrading existing environment for %s plugin.\" % \n PLUGIN_NAME)\n db_installed_version = self.get_version()\n #cursor = db.cursor()\n with self.env.db_transaction as db:\n if db_installed_version < 0:\n # Initial installation\n db(\"\"\"\n INSERT INTO system (name, value) VALUES ('%s','%s')\n \"\"\" % (DB_SYSTEM_KEY, DB_VERSION))\n db(\"ALTER TABLE ticket ADD COLUMN product TEXT\")\n self.log.debug(\"creating initial db tables for %s plugin.\" % \n PLUGIN_NAME)\n \n db_connector, dummy = DatabaseManager(self.env)._get_connector()\n for table in self.SCHEMA:\n for statement in db_connector.to_sql(table):\n db(statement)\n db_installed_version = self.get_version()\n \n if db_installed_version == 1:\n from multiproduct.model import Product\n products = Product.select(self.env)\n for prod in products:\n db(\"\"\"UPDATE ticket SET product=%s\n WHERE product=%s\"\"\", (prod.prefix, prod.name))\n \n db(\"\"\"UPDATE system SET value=%s\n WHERE name=%s\"\"\", (DB_VERSION, DB_SYSTEM_KEY))\n self.log.info(\"Upgraded multiproduct db schema from version %d\"\n \" to %d\" % (db_installed_version, DB_VERSION))", "def apply_maintenance_update(self):\n logger.info(\"Applying maintenance updates on master node\")\n self.env.admin_install_updates()\n\n logger.info(\"Applying maintenance updates on slaves\")\n slaves_mu_script_url = (\n \"https://github.com/Mirantis/tools-sustaining/\"\n \"raw/master/scripts/mos_apply_mu.py\")\n\n path_to_mu_script = \"/tmp/mos_apply_mu.py\"\n\n with self.env.d_env.get_admin_remote() as remote:\n remote.check_call(\"wget {uri} -O {path}\".format(\n uri=slaves_mu_script_url,\n path=path_to_mu_script)\n )\n\n remote.check_call(\n \"python {path} \"\n \"--env-id={identifier} \"\n \"--user={username} \"\n \"--pass={password} \"\n \"--tenant={tenant_name} --update\".format(\n path=path_to_mu_script,\n identifier=self.cluster_id,\n **conf.KEYSTONE_CREDS\n )\n )\n\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['controller', ])\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['compute', ])\n\n logger.info(\"Restarting all OpenStack services\")\n\n logger.info(\"Restarting services on controllers\")\n ha_services = (\n \"p_heat-engine\",\n \"p_neutron-plugin-openvswitch-agent\",\n \"p_neutron-dhcp-agent\",\n \"p_neutron-metadata-agent\",\n \"p_neutron-l3-agent\")\n non_ha_services = (\n \"heat-api-cloudwatch\",\n \"heat-api-cfn\",\n \"heat-api\",\n \"cinder-api\",\n \"cinder-scheduler\",\n \"nova-objectstore\",\n \"nova-cert\",\n \"nova-api\",\n \"nova-consoleauth\",\n \"nova-conductor\",\n \"nova-scheduler\",\n \"nova-novncproxy\",\n \"neutron-server\",\n )\n for controller in controllers:\n with self.fuel_web.get_ssh_for_nailgun_node(\n controller) as remote:\n for service in ha_services:\n remote_ops.manage_pacemaker_service(remote, service)\n for service in non_ha_services:\n remote_ops.manage_service(remote, service)\n\n logger.info(\"Restarting services on computes\")\n compute_services = (\n \"neutron-plugin-openvswitch-agent\",\n \"nova-compute\",\n )\n for compute in computes:\n with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:\n for service in compute_services:\n remote_ops.manage_service(remote, service)", "def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)", "def clean_up(self):\n dist.destroy_process_group()", "def main():\n\n do_install, do_uninstall, notify_once = parse_args()\n if do_install:\n install()\n elif do_uninstall:\n uninstall()\n else:\n notify_outdated_formula(always_notify=not notify_once)", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)", "def run(self):\n if pakit.conf.IDB.get(self.recipe.name, None) is None:\n print(self.recipe.name + ': Not Installed')\n return\n\n walk_and_unlink(self.recipe.install_dir, self.recipe.link_dir)\n try:\n shutil.rmtree(self.recipe.install_dir)\n except OSError: # pragma: no cover\n pass\n del pakit.conf.IDB[self.recipe.name]\n pakit.conf.IDB.write()", "def reboot(self):\n raise NotImplementedError", "def reinstall(self):\n with _LOCK:\n if not (self.thread.is_alive() and self.thread in _ORIGINAL__ACTIVE):\n self.thread = self.thread.clone(bind_delay=self.reinstall_delay)\n if self.should_restart:\n self.thread.start()", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def rebuildLookdev(self):\n self.logger.info('Reverting Lookdev')\n # TODO", "def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode", "def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r", "def reboot(*args):\n args = list(sys.argv) + list(args)\n if args[0] == 'python' or not args[0]:\n args[0] = BIN_PYTHON\n elif os.path.basename(sys.argv[0]) in ['lore', 'lore.exe']:\n args[0] = BIN_LORE\n try:\n os.execv(args[0], args)\n except Exception as e:\n if args[0] == BIN_LORE and args[1] == 'console' and JUPYTER_KERNEL_PATH:\n print(ansi.error() + ' Your jupyter kernel may be corrupt. Please remove it so lore can reinstall:\\n $ rm ' + JUPYTER_KERNEL_PATH)\n raise e", "def post_install(self, installable_pkgs):\n pass", "def new():\n run('pew new --dont-activate --python={0} '\n '{1}'.format(python_bin, package_name()))\n verun('pip install --upgrade wheel')\n verun('pip install --upgrade pip')", "def pre_installation(self):\n pass" ]
[ "0.63134474", "0.5887858", "0.5887858", "0.58364576", "0.57861626", "0.57441574", "0.57368964", "0.57141185", "0.57095736", "0.56810385", "0.56625885", "0.56388724", "0.5615103", "0.5612521", "0.5526397", "0.55071336", "0.5483531", "0.5466706", "0.54461366", "0.5432789", "0.5417732", "0.541656", "0.5408389", "0.53810793", "0.53488153", "0.53396404", "0.53326", "0.5324019", "0.5318601", "0.53064966", "0.5302002", "0.5301089", "0.5286642", "0.5282554", "0.52806747", "0.5279812", "0.5268657", "0.5264315", "0.5260623", "0.52589685", "0.52530915", "0.52340823", "0.52299005", "0.52297825", "0.5227332", "0.522308", "0.52207094", "0.52051044", "0.52013546", "0.51964736", "0.5189841", "0.5184785", "0.5178475", "0.51672345", "0.51570284", "0.5154265", "0.5152379", "0.51493025", "0.51446867", "0.5138203", "0.5135676", "0.51341605", "0.5130659", "0.5124138", "0.51007664", "0.51003766", "0.5100048", "0.5100026", "0.509646", "0.5095683", "0.50929976", "0.50851655", "0.50728345", "0.5072556", "0.50720215", "0.50673777", "0.5060376", "0.50601214", "0.5055756", "0.50517315", "0.50450575", "0.503602", "0.5031082", "0.5028165", "0.50275356", "0.50268066", "0.50255376", "0.502169", "0.50213844", "0.5017119", "0.50169283", "0.50141233", "0.50136834", "0.5006075", "0.5005379", "0.50046337", "0.49851108", "0.49847493", "0.49836192", "0.49791157" ]
0.7012954
0
kill specific instances of this starter (it won't kill starter itself)
def kill_specific_instance(self, which_instances): for instance_type in which_instances: for instance in self.all_instances: if instance.instance_type == instance_type: instance.terminate_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def killAll(controller=False):", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def kill(self):\n \n self.killSlavePids()", "def kill_sync_processes(self, force, rev):\n for i in self.all_instances:\n if i.is_sync_instance():\n if not force and i.pid_file is not None and rev >= semver.VersionInfo.parse(\"0.15.0\"):\n print(\"Skipping manual kill\")\n return\n logging.info(\"manually killing syncer: \" + str(i.pid))\n i.terminate_instance()", "def kill_all():\n compose_kill_all()", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def killExperiment(self, **kwargs):\n if kwargs['kill']=='YES':\n killRobot.sshKill()", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def kill(self, id):", "def kill(self, id):", "def remote_kill():", "def kill_all(self):\n self._stop_all('kill')", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def kill(targets, controller=False):", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def kill(self):\n kill_cmds = [\n \"sudo pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n self.log.info(\"Killing any server processes\")\n pcmd(self._hosts, \"; \".join(kill_cmds), False, None, None)", "def stop(self):\n self.killed = True", "def kill(name, signal=9, exact=False):\n for pid in find(name, exact):\n run(\"kill -s {0} {1}\".format(signal, pid))", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def stopEngines():\n pass", "async def kill(self, restart: bool = False) -> None:\n pass", "def crash_instances(self):\n try:\n if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING:\n print(\"generating coredump for \" + str(self.instance))\n gcore = psutil.Popen([\"gcore\", str(self.instance.pid)], cwd=self.basedir)\n print(\"launched GCORE with PID:\" + str(gcore.pid))\n gcore.wait()\n self.kill_instance()\n else:\n print(\"NOT generating coredump for \" + str(self.instance))\n except psutil.NoSuchProcess:\n logging.info(\"instance already dead: \" + str(self.instance))\n\n for instance in self.all_instances:\n instance.crash_instance()", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def stop_scripts():\n print \"*** WARNING ***: This is about to kill all python processes\"\n run(\"killall python\")", "def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return", "def kill(self):\n processes = ['MicrosoftEdge.exe', 'MicrosoftEdgeCP.exe', 'plugin-container.exe',\n 'browser_broker.exe', 'smartscreen.exe']\n for exe in processes:\n subprocess.call(['taskkill', '/F', '/T', '/IM', exe])", "def hard_stop_drivers(self, drivers_to_stop: Set[str]):\n for process in find_processes():\n if process.comm in drivers_to_stop:\n process.kill()", "def stopSpawing(self):\n self.girderManager.stopSpawing()", "def kill(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].stop()", "def kill_manager(self) -> None:\n\n for p in self.process_list:\n p.terminate()\n # NOTE: Seems Python does not appreciate if close is called too quickly.\n sleep(0.5)\n # Release the resources held by the Proess (Python 3.7 and up)\n p.close()", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def stop_instance(InstanceId=None, Force=None):\n pass", "def restartAll(self):\n for name in self.processes:\n self.stopProcess(name)", "def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n if kill_instance:\n instance.kill_instance()\n instance.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def kill(self):\n\t\tself.kill_subcomponents()\n\t\tself._subcomponents.clear()\n\t\tself.bug_world = None\n\n\t\ttry:\n\t\t\tself.ci.deregister_all()\n\t\texcept:\n\t\t\tpass", "def kill(self):\n self.send_signal(signal.SIGKILL)", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def stopProcesses(*args):\n _stopProcessSet(_running)", "def _kill_running_processes(self):\n # Kill any rouge processes that are still running.\n with _thread_lock:\n killed = []\n for pid in self._pids:\n try:\n os.kill(pid, _KILLED_BY_ANYPYTOOLS)\n killed.append(str(pid))\n except:\n pass\n self._pids.clear()", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)", "def kill(self):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if not currentApplication in self.__appsThatCantBeKilled:\r\n self.phone.comment('exit.kill()')\r\n self.phone.sx(self.__killCommand)\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.warn('Not allowed to kill \"%s\" application using SX' % currentApplication)", "def kill_processes(self):\n for proc in self.processes:\n if proc['proc'].poll() is not None:\n proc['proc'].terminate()", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def unreal_kills(self, unreal_kills):\n\n self._unreal_kills = unreal_kills", "def kill_all(self, procname):\n procs = self.find_processes_by_name(procname)\n for proc in procs:\n result = self.kill_process(proc['PID'])\n if not result['HasExited']:\n for i in xrange(3):\n result = self.kill_process(result['PID'], False)\n if result['HasExited']:\n break\n else:\n raise MicroManagerError(\"Process with name'{}' and PID '{}' would not exit on machine '{}'.\".format(procname, proc['PID'], self.hostname))", "def teardown_autokill(module_name):\r\n killer = _KILLER_THREADS.get(module_name)\r\n if killer is not None:\r\n killer.cancel()", "def shutdown_all(self, now=False):", "def stop(self):\n for process in self.process:\n process.stop()", "def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )", "def kill(self):\n\n self.proc.kill()", "def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def power_off(self):\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.power_off(manager=self.manager)\n except:\n self.logger.error(\"Error with VM '%s'\" % vm.name)\n raise", "def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n i.terminate_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n True,\n )\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n [],\n False,\n )", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def kill(self):\n self._exit = True", "def __call__(self):\n for tmp_file in filter(lambda x: x.exists(), self.temp_files):\n tmp_file.unlink()\n\n for proc in self.processes:\n try:\n os.kill(proc, signal.SIGTERM)\n except ProcessLookupError:\n pass", "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Change time.sleep to wait until process of same service but different PID is up and running\n time.sleep(0.5)", "def kill_celery():\n try:\n subprocess.call(\n ['celery', 'multi', 'stop', '2', '-A', 'celery_worker.celery', '--logfile=celery_logs/celery-worker-%n.log',\n '--pidfile=celery_logs/celery-worker-%n.pid'])\n os.system('pkill -f celery')\n except Exception:\n click.echo('Exception occurred. Run code locally')", "def on_StopNode_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n print(\"We will kill all gman process!\")\n reply = QMessageBox.question(self, '确认', '确认kill所有gman任务吗', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n autokillGman()\n self.OnlyDisplay(\"kill -9 |grep gman\")\n else:\n print(\"Keep GMAN run.......!\")", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def shutdown_system():\n yield None\n active = active_children()\n for child in active:\n child.kill()", "def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()", "def _kill_kernel(self):", "def phone_kill(self) -> None:", "def stop_run(arn=None):\n pass", "def kill_running_es_instances(node_prefix):\n def elasticsearch_process(p):\n return p.name() == \"java\" and any(\"elasticsearch\" in e for e in p.cmdline()) and any(\"node.name=rally\" in e for e in p.cmdline())\n\n logger.info(\"Killing all processes which match [java], [elasticsearch] and [%s]\" % node_prefix)\n kill_all(elasticsearch_process)", "def stop_master_worker():\n print(\"Stopping master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True", "def check_stop(cmd, filterstr, retry=1, nrinstances=0):\n\n found = get_filtered_pids(filterstr)\n for i in range(retry):\n if len(found) == nrinstances:\n return\n # print \"START:%s\"%cmd\n execute(cmd, die=False)\n time.sleep(1)\n found = get_filtered_pids(filterstr)\n for item in found:\n kill(int(item), 9)\n found = get_filtered_pids(filterstr)\n\n if len(found) != 0:\n raise j.exceptions.RuntimeError(\"could not stop %s, found %s nr of instances.\" % (cmd, len(found)))", "def kill(self):\n self.error_code = 'KILLED'\n self.running = False", "def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass", "def stopEngine(self, engine_id):\n for access_id, v in self.engine_instances.iteritems():\n if engine_id == v:\n self.processManager.stopProcess(engine_id)\n del self.engine_instances[access_id]\n break", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def _destruct(self, should_close=False):\n if (self.running or should_close) and not self.existing:\n self.running = False\n self._starting = False\n\n\n # Wait for the process to start.\n time.sleep(1)\n # kill the minecraft process and its subprocesses\n try:\n shutil.rmtree(self.instance_dir)\n except:\n print(\"Failed to delete the temporary minecraft directory.\")\n\n if self._kill_minecraft_via_malmoenv(self.host, self.port):\n # Let the minecraft process term on its own terms.\n time.sleep(2)\n\n # Now lets try and end the process if anything is laying around\n try:\n InstanceManager._reap_process_and_children(psutil.Process(self.minecraft_process.pid))\n except psutil.NoSuchProcess: \n pass\n\n self.watcher_process.terminate()\n\n if self in InstanceManager._instance_pool:\n InstanceManager._instance_pool.remove(self)\n self.release_lock()\n pass", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def kill(self):\n # stuff\n pygame.sprite.Sprite.kill(self)", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def kill(self):\n self.active = False\n self.wakeup()\n self.join()", "def kill(self):\n #overridden for documentation purposes\n stackless.tasklet.kill(self)", "def stop(self):\n self.scion_sh('stop')", "def stopdaemons(self):\n # TODO: we may want to improve this if we had the PIDs from the\n # specific EMANE daemons that we\"ve started\n cmd = [\"killall\", \"-q\", \"emane\"]\n stop_emane_on_host = False\n if emane.VERSION > emane.EMANE091:\n for node in self.getnodes():\n if hasattr(node, \"transport_type\") and \\\n node.transport_type == \"raw\":\n stop_emane_on_host = True\n continue\n if node.up:\n node.cmd(cmd, wait=False)\n # TODO: RJ45 node\n else:\n stop_emane_on_host = True\n if stop_emane_on_host:\n subprocess.call(cmd)\n subprocess.call([\"killall\", \"-q\", \"emanetransportd\"])", "def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)", "def rdp_kill_all():\n logger.info(\"Kill all RDP sessions\")\n os.system(\"taskkill /FI \\\"IMAGENAME eq mstsc.exe\\\" /F\")", "def killTests(self):\n self.ledController.blink()\n if self.session is not None:\n print(\"ProggerStateManager: Terminating tests...\")\n self.session.terminate()\n try:\n print(\"ProggerStateManager: Waiting for tests to terminate...\")\n self.session.wait(3)\n print(\"ProggerStateManager: Tests have been Terminated.\")\n except:\n print(\"ProggerStateManager: Forced to kill the tests....\", sys.exc_info()[0])\n traceback.print_exc()\n if self.session is not None:\n self.session.kill()\n time.sleep(3)\n self.session = None", "def kill_sync_instance_by_pid(self, pid):\n # Get the list of known pids to ensure we only kill one of those\n running_data = self.data_storage.running_data\n\n self.logger.debug(\n \"Attempting to kill PID '\" + str(pid) + \"'\"\n )\n\n known_pids = []\n\n # Gets PIDs of all the known unison processes\n known_pids = [int(running_data[d]['pid']) for d in running_data]\n\n # TODO: Rewrite this function, it can probably be done with reduce()\n # RESOLUTION: Rewritten above, this kept in case it doesn't work\n # for entry in running_data:\n # running_data[entry]\n # known_pids.append(int(running_data[entry]['pid']))\n\n # TODO: Finish this error checking logic here, currently it doesn't check the PID\n\n # Try and kill with sigint (same as ctrl+c), if we are allowed to\n\n # First make sure the process exists\n if not psutil.pid_exists(pid):\n self.logger.info(\n \"PID \" + str(pid) + \" was not found. Perhaps already dead?\"\n )\n return\n\n # Then make sure it's a process we started\n elif pid not in known_pids:\n\n shortmsg = (\n \"PID #\" + str(pid) + \" is not managed by UnisonCTRL. \" +\n \"Refusing to kill. See logs for more information.\"\n )\n\n longmsg = (\n \"PID #\" + str(pid) + \" is not managed by UnisonCTRL. \" +\n \"Refusing to kill. Your data files are likely corrupted. \" +\n \"Kill all running unison instances on this system, \" +\n \"delete everything in '\" + self.config['running_data_dir'] +\n \"/*', and run UnisonCTRL again.\"\n )\n\n self.logger.critical(longmsg)\n\n raise RuntimeError(shortmsg)\n\n # Finally, kill the process if it exists and we started it\n else:\n return self.kill_pid(pid)", "def kill(self):\n self.child.kill()", "def kill_server(hosts):\n kill_cmds = [\n \"pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n # Intentionally ignoring the exit status of the command\n pcmd(hosts, \"; \".join(kill_cmds), False, None, None)" ]
[ "0.70153147", "0.68872017", "0.68046296", "0.6784485", "0.6766652", "0.6715903", "0.6670214", "0.660583", "0.660212", "0.6543471", "0.65000117", "0.65000117", "0.64805627", "0.6475557", "0.6455918", "0.6429913", "0.6328956", "0.6207044", "0.6205789", "0.62014425", "0.6166641", "0.6160382", "0.61603194", "0.6150022", "0.61052334", "0.6098099", "0.60844517", "0.60338074", "0.60096866", "0.59968233", "0.5989136", "0.5988512", "0.59746593", "0.5960365", "0.5960031", "0.5957743", "0.5955842", "0.59464264", "0.5940313", "0.5933435", "0.5930457", "0.5929705", "0.5924717", "0.5914253", "0.5913876", "0.5898448", "0.588844", "0.5880566", "0.58785594", "0.586979", "0.5867231", "0.5866018", "0.5861588", "0.5850378", "0.58492965", "0.5844189", "0.58359486", "0.5835349", "0.5831087", "0.5830862", "0.5829462", "0.5819718", "0.581655", "0.58089453", "0.58032256", "0.5799489", "0.5794741", "0.5793987", "0.5792557", "0.5789079", "0.5775399", "0.57684094", "0.57682276", "0.5763132", "0.57576936", "0.57439065", "0.5742906", "0.5738626", "0.5733267", "0.5731994", "0.5724912", "0.5719949", "0.57080275", "0.57023656", "0.56986284", "0.56957686", "0.56929785", "0.56856894", "0.56856894", "0.56842333", "0.5681343", "0.5680188", "0.56636506", "0.5661527", "0.5656785", "0.5655603", "0.5647921", "0.5646702", "0.56462187", "0.5644744" ]
0.7285985
0
launch the instances of this starter with optional arguments
def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False): for instance_type in which_instances: for instance in self.all_instances: if instance.instance_type == instance_type: if kill_instance: instance.kill_instance() instance.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, waitpid, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self, **kwargs) -> None:\n ...", "def start( *args, **kwargs ):", "def launch(self):", "def run(self, args):\n\n return", "def run(self, args):\n pass", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def main(cls, *args, **kwargs):\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)", "def main(args=None):", "def main(args=None):", "def run(self, args, **kwargs):\n raise NotImplementedError()", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def run(self, *args, **kwargs):\n pass", "def run(self, **kwargs):", "def run(self, **kwargs):\n pass", "def _start(args=None):\n options = _parse_args(args)\n main(**options)", "def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)", "def _runner(self, classpath, main, jvm_options, args):", "def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()", "def Run(self, args):\n pass", "def main(args):", "def main(args):", "def quick_run(self, *args):\n self.inputs(*args)\n self.run()", "def main(self, options):\n raise NotImplementedError", "def startup(self, override_args=None):\n self._app_name = sys.argv[0]\n if override_args:\n my_args = [self._app_name]\n my_args = my_args + override_args\n else:\n my_args = sys.argv[:]\n\n if len(my_args) != 2:\n print(\"\"\"Usage:\n\n {0} sitl\n run with built-in SITL simulator\n {0} render\n just render the behaviour tree\n {0} <connection string>\n connect as prescribed and fly the mission\"\"\".format((self._app_name)))\n elif my_args[1] == 'sitl':\n self._sitl = dronekit_sitl.start_default(lat=self._sitl_lat,\n lon=self._sitl_lon)\n self._connection_string = self._sitl.connection_string()\n print(\"Using SITL via {}\".format(self._connection_string))\n self.connect()\n elif my_args[1] == 'render':\n print(\"Rendering only\")\n self.render()\n else:\n self._connection_string = my_args[1]\n print(f\"Attempting to connect via {self._connection_string}\")\n self.connect()", "def instances(args, config):\n print('Does something? More to come.')", "def main(args=None):\n pass", "def launch(config):\n \n launch_with_configs([config])", "def main() -> None:\n init(args=sys.argv[1:])", "def run(self, args: argparse.Namespace) -> None:\n pass", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def entrypoint(cls):\n try:\n cls().run(sys.argv[1:])\n except KeyboardInterrupt:\n pass", "def main(self, params):\n pass", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def __init__(self, args):\n super().__init__()\n self.args = args\n # get the controller using the command line arguments\n self.get_controller(args)", "def initialise(self, args, environ):", "def main(args=None):\n args = args or sys.argv[1:]\n parser = parse_options()\n common.main_cli(experiment_parse_and_run, parser, args)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def Start(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)", "def Start(self, *args, **kwargs):\r\n\t\tpayload = { \"Arg1\": self }\r\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\r\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\r\n\t\treturn self._execute('start', payload=payload, response_object=None)", "def run():\n # main(sys.argv[1:])\n main()", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def run(self, **kwargs):\n app = self.create_app()\n\n app.run(host=self.host, port=self.port, **kwargs)", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def startapp():", "def __init__(self, args):\n super().__init__()\n self.args = args", "def __init__(self, args, logger: MainLogger, log_start_t=0):\n\n super().__init__(args, logger)\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n # Find id of the first policy team - Only supported for one policy team in the build plan\n teams = args.env_args[\"match_build_plan\"]\n self.policy_team_id = get_policy_team_id(teams)\n if self.args.headless_controls:\n controls = HeadlessControls(env=self.env)\n controls.daemon = True\n controls.start()\n\n self.episode_limit = self.env.episode_limit\n self.t = 0 # current time step within the episode\n self.log_start_t = log_start_t # timestep to start logging from\n self.t_env = 0 # total time steps for this runner in the provided environment across multiple episodes\n self.phi: FeatureFunction = feature_func_REGISTRY[self.args.sfs] if self.args.sfs else None\n self.home_batch = None\n self.home_mac = None\n self.new_batch_fn = None", "def start_app():\n args = parse_args()\n data = MutualExclusionData(args)\n if args.method == 'pingpong':\n ping_pong.create_all(data)\n elif args.method == 'ricart_agrawala':\n ricart_agrawala.create_all(data)\n elif args.method == 'lamport':\n lamport.create_all(data)\n else:\n raise ValueError(\n 'Unsupported method. Please choose pingpong or ricart_agrawala')\n\n print 'Starting processes'\n try:\n for proc in data.processes:\n proc.start()\n for proc in data.processes:\n proc.join()\n finally:\n data.close()\n print 'Pipe closed'", "def main(self, params):\n raise NotImplementedError('main() must be implemented.')", "def __init__(self, args):\n\n self._mapp = {\n 'top_ips': ATopIps,\n 'request_rate': ARequests,\n 'top_sources': ATopSources\n }\n\n self.active = False\n self.ag = self.setup(args)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main(args=None):\n\n program = Program(\n name='Ansible Customer Invoke taks to run \"ansible-galaxy\" commands',\n namespace=Collection.from_module(ansible_galaxy_tasks),\n version='0.1.0-alpha+001')\n\n program.run(args)", "def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)", "def run(self, **kwargs: Any) -> None:\n raise NotImplementedError", "def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"start\", payload=payload, response_object=None)", "def run(self, *args, **kwargs) -> typing.Any:\n pass", "def __init__(self, **kwargs):\n\n self.options = {**self.DEFAULTS, **kwargs}\n self.engine = self.start_matlab_engine()\n self.spm_directory = self.get_spm_directory()", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def __init__(self, args):\n if len(args) < 5:\n raise ProgArgumentsErr(args[0])\n else:\n (config_file_name, clients_file_name, transactions_file_name, compnies_file_name) = args[1:5]\n \n Application.setConfigFileName(config_file_name)\n Application.setClientsFileName(clients_file_name)\n Application.setTransactiosFileName(transactions_file_name)\n Application.setCompaniesFileName(compnies_file_name)\n \n self.clientMgr = ClientManager.getInstance()\n self.tradesAppl = TradingApplication.getInstance()\n self.sec = SecurityManager.getInstance()", "def __init__(__self__, *,\n app_initial_activity: Optional[pulumi.Input[str]] = None,\n bootstrap_package_id: Optional[pulumi.Input[str]] = None,\n bootstrap_runner_class: Optional[pulumi.Input[str]] = None,\n max_depth: Optional[pulumi.Input[int]] = None,\n max_steps: Optional[pulumi.Input[int]] = None):\n if app_initial_activity is not None:\n pulumi.set(__self__, \"app_initial_activity\", app_initial_activity)\n if bootstrap_package_id is not None:\n pulumi.set(__self__, \"bootstrap_package_id\", bootstrap_package_id)\n if bootstrap_runner_class is not None:\n pulumi.set(__self__, \"bootstrap_runner_class\", bootstrap_runner_class)\n if max_depth is not None:\n pulumi.set(__self__, \"max_depth\", max_depth)\n if max_steps is not None:\n pulumi.set(__self__, \"max_steps\", max_steps)", "def main(cls):\n raise NotImplementedError", "def __init__(self, *args):\n \n self.steps = args", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(args):\n app = Application()\n if args and args.markets:\n app.set_markets(args.markets)\n if args and args.symbols:\n app.set_symbols(args.symbols)\n app.print_message()\n\n if args and app.markets:\n file_path = './../'\n\n scrapper = scrapping.Scrapper(app.markets)\n scrapper.get_symbols(f\"{file_path}data/stocks.json\")\n\n if len(app.symbols) > 0:\n companies = {}\n for symbol in app.symbols:\n file_name = f\"{file_path}data/{symbol}_financials.json\"\n companies[symbol] =\\\n scrapper.get_fundamental_analysis(symbol,\n file_name)\n print(companies)\n analysis_companies = analysis.Analyze(companies, app.symbols)\n result = analysis_companies.calculate()\n print(result)\n\n logger.info(args)", "def start():\n trio.run(_main)", "def run(self, *args, **kwargs):\n raise NotImplementedError('Tasks must define the run method.')", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def main():\n args = parseCommandLineArguments()\n\n\n chemkin1 = args.chemkin1[0]\n speciesDict1 = args.speciesDict1[0]\n if args.thermo1: \n thermo1 = args.thermo1[0]\n else:\n thermo1 = None\n chemkin2 = args.chemkin2[0]\n speciesDict2 = args.speciesDict2[0]\n if args.thermo2: \n thermo2 = args.thermo2[0]\n else:\n thermo2 = None\n\n kwargs = {\n 'web': args.web,\n 'wd': os.getcwd()\n }\n\n execute(chemkin1, speciesDict1, thermo1, chemkin2, speciesDict2, thermo2, **kwargs)", "def main(self, args=None, **extra):\n if args is None:\n args = get_os_args()\n return super().main(args=preprocess_argument_list(args), **extra)", "def launch(self, *prelaunch: [callable, [callable, list]], show_all=True):\n self.present()\n if show_all:\n self.show_all()\n for x in prelaunch:\n if len(x) == 1:\n x[0]()\n else:\n x[0](*x[1:])\n Gtk.main()", "def _run_args(cls, args: Optional[List[str]] = None):\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def __init__(self, args):\n self.args = args", "def run_starter(self, expect_to_fail=False):", "def run(self, main, **kwargs):\n logger = logging.getLogger(\"HorovodRunner\")\n logger.warning(\n \"You are running the open-source version of HorovodRunner. \"\n \"It only does basic checks and invokes the main function, \"\n \"which is for local development only. \"\n \"Please use Databricks Runtime ML 5.0+ to distribute the job.\")\n main(**kwargs)", "def setup(self, app_args):\n raise NotImplementedError", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id$\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option( \"-p\", \"--proc\", dest=\"processors\", type=\"int\",\n help = \"use # processors [%default]\" )\n\n parser.set_defaults(\n processors = 1 )\n\n\n options, args = E.Start( parser, argv = argv )\n\n t1 = Test( RunnerGat, \n small_test_segmented_workspaces(), \n [ ValidatorNumSamples,\n ValidatorSegmentDistribution ] )\n\n t1.run( options.stdout, \n processors = options.processors )\n\n E.Stop()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def __init__(self, args=False):\n self.args = args", "def main(properties=properties, options=options, **custom_options):\n return init(**dict(options, **custom_options))(**properties)", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def main() -> None:\n args = _get_arguments()\n\n file_level_logging = logging.DEBUG if args.log_to_file else None\n setup_logger(logging.INFO, file_level_logging)\n\n if not os.path.exists(args.smiles):\n mol = Molecule(smiles=args.smiles)\n if mol.rd_mol is None:\n logger().error(\n f\"The --smiles argument ({args.smiles})\"\n \" does not point to an existing file or is a valid RDKit SMILES.\"\n \" Cannot start retrosynthesis planning.\"\n )\n return\n\n if args.nproc:\n _multiprocess_smiles(args)\n return\n\n multi_smiles = os.path.exists(args.smiles)\n\n finder = AiZynthFinder(configfile=args.config)\n _select_stocks(finder, args)\n post_processing = _load_postprocessing_jobs(args.post_processing)\n finder.expansion_policy.select(args.policy or finder.expansion_policy.items[0])\n if args.filter:\n finder.filter_policy.select(args.filter)\n else:\n finder.filter_policy.select_all()\n\n params = [\n args.smiles,\n finder,\n args.output,\n args.cluster,\n args.route_distance_model,\n post_processing,\n args.checkpoint,\n ]\n if multi_smiles:\n _process_multi_smiles(*params)\n else:\n params = params[:-1]\n _process_single_smiles(*params)", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def Start(self, *args, **kwargs):\n\t\tpayload = { \"Arg1\": self.href }\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\n\t\treturn self._execute('start', payload=payload, response_object=None)", "def train_entry_point():", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs" ]
[ "0.7010809", "0.68640345", "0.68409586", "0.678276", "0.6754254", "0.6719668", "0.6687372", "0.66585034", "0.66585034", "0.66445416", "0.66290414", "0.6617419", "0.6617419", "0.6592643", "0.65777135", "0.65322596", "0.6527857", "0.65144086", "0.6507485", "0.64961606", "0.6448825", "0.64197844", "0.64197844", "0.6368987", "0.63543594", "0.63221514", "0.63027006", "0.62864465", "0.6264844", "0.6264259", "0.62549114", "0.62048197", "0.61806434", "0.6165215", "0.616126", "0.61605084", "0.6115339", "0.6091965", "0.60798967", "0.60775757", "0.60756487", "0.60455185", "0.60320795", "0.6029253", "0.6015803", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59926903", "0.59926903", "0.5986971", "0.59849024", "0.59682286", "0.5966443", "0.59584504", "0.5948167", "0.5945795", "0.5945795", "0.5943931", "0.59434986", "0.59351856", "0.5918149", "0.59156996", "0.5909986", "0.59042156", "0.58847505", "0.58766466", "0.5875019", "0.5870938", "0.5869132", "0.5867363", "0.5857622", "0.58441377", "0.58293694", "0.5829088", "0.58260745", "0.5817269", "0.5817001", "0.5814678", "0.58142847", "0.58138", "0.5813413", "0.5799856", "0.57968825", "0.5795155", "0.5794701", "0.5793296", "0.57905465", "0.5785022", "0.5772392", "0.5771418", "0.5771418", "0.5771418", "0.57674897", "0.5766518", "0.57651865" ]
0.0
-1
launch the instances of this starter with optional arguments
def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False): for instance_type in which_instances: for i in self.all_instances: if i.instance_type == instance_type: if kill_instance: i.kill_instance() i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, waitpid, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self, **kwargs) -> None:\n ...", "def start( *args, **kwargs ):", "def launch(self):", "def run(self, args):\n\n return", "def run(self, args):\n pass", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def main(cls, *args, **kwargs):\n assert not (bool(args) and bool(kwargs))\n if args:\n return cls._run_args(args)\n elif kwargs:\n return cls._run_kwargs(kwargs)\n else:\n return cls._run_args(None)", "def main(args=None):", "def main(args=None):", "def run(self, args, **kwargs):\n raise NotImplementedError()", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def run(self, *args, **kwargs):\n pass", "def run(self, **kwargs):", "def run(self, **kwargs):\n pass", "def _start(args=None):\n options = _parse_args(args)\n main(**options)", "def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)", "def _runner(self, classpath, main, jvm_options, args):", "def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()", "def Run(self, args):\n pass", "def main(args):", "def main(args):", "def quick_run(self, *args):\n self.inputs(*args)\n self.run()", "def main(self, options):\n raise NotImplementedError", "def startup(self, override_args=None):\n self._app_name = sys.argv[0]\n if override_args:\n my_args = [self._app_name]\n my_args = my_args + override_args\n else:\n my_args = sys.argv[:]\n\n if len(my_args) != 2:\n print(\"\"\"Usage:\n\n {0} sitl\n run with built-in SITL simulator\n {0} render\n just render the behaviour tree\n {0} <connection string>\n connect as prescribed and fly the mission\"\"\".format((self._app_name)))\n elif my_args[1] == 'sitl':\n self._sitl = dronekit_sitl.start_default(lat=self._sitl_lat,\n lon=self._sitl_lon)\n self._connection_string = self._sitl.connection_string()\n print(\"Using SITL via {}\".format(self._connection_string))\n self.connect()\n elif my_args[1] == 'render':\n print(\"Rendering only\")\n self.render()\n else:\n self._connection_string = my_args[1]\n print(f\"Attempting to connect via {self._connection_string}\")\n self.connect()", "def instances(args, config):\n print('Does something? More to come.')", "def main(args=None):\n pass", "def launch(config):\n \n launch_with_configs([config])", "def main() -> None:\n init(args=sys.argv[1:])", "def run(self, args: argparse.Namespace) -> None:\n pass", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def entrypoint(cls):\n try:\n cls().run(sys.argv[1:])\n except KeyboardInterrupt:\n pass", "def main(self, params):\n pass", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def __init__(self, args):\n super().__init__()\n self.args = args\n # get the controller using the command line arguments\n self.get_controller(args)", "def initialise(self, args, environ):", "def main(args=None):\n args = args or sys.argv[1:]\n parser = parse_options()\n common.main_cli(experiment_parse_and_run, parser, args)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def Start(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)", "def Start(self, *args, **kwargs):\r\n\t\tpayload = { \"Arg1\": self }\r\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\r\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\r\n\t\treturn self._execute('start', payload=payload, response_object=None)", "def run():\n # main(sys.argv[1:])\n main()", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def run(self, **kwargs):\n app = self.create_app()\n\n app.run(host=self.host, port=self.port, **kwargs)", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def run(self, **kwargs) -> None:\n raise NotImplementedError()", "def startapp():", "def __init__(self, args):\n super().__init__()\n self.args = args", "def __init__(self, args, logger: MainLogger, log_start_t=0):\n\n super().__init__(args, logger)\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n # Find id of the first policy team - Only supported for one policy team in the build plan\n teams = args.env_args[\"match_build_plan\"]\n self.policy_team_id = get_policy_team_id(teams)\n if self.args.headless_controls:\n controls = HeadlessControls(env=self.env)\n controls.daemon = True\n controls.start()\n\n self.episode_limit = self.env.episode_limit\n self.t = 0 # current time step within the episode\n self.log_start_t = log_start_t # timestep to start logging from\n self.t_env = 0 # total time steps for this runner in the provided environment across multiple episodes\n self.phi: FeatureFunction = feature_func_REGISTRY[self.args.sfs] if self.args.sfs else None\n self.home_batch = None\n self.home_mac = None\n self.new_batch_fn = None", "def start_app():\n args = parse_args()\n data = MutualExclusionData(args)\n if args.method == 'pingpong':\n ping_pong.create_all(data)\n elif args.method == 'ricart_agrawala':\n ricart_agrawala.create_all(data)\n elif args.method == 'lamport':\n lamport.create_all(data)\n else:\n raise ValueError(\n 'Unsupported method. Please choose pingpong or ricart_agrawala')\n\n print 'Starting processes'\n try:\n for proc in data.processes:\n proc.start()\n for proc in data.processes:\n proc.join()\n finally:\n data.close()\n print 'Pipe closed'", "def main(self, params):\n raise NotImplementedError('main() must be implemented.')", "def __init__(self, args):\n\n self._mapp = {\n 'top_ips': ATopIps,\n 'request_rate': ARequests,\n 'top_sources': ATopSources\n }\n\n self.active = False\n self.ag = self.setup(args)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main(args=None):\n\n program = Program(\n name='Ansible Customer Invoke taks to run \"ansible-galaxy\" commands',\n namespace=Collection.from_module(ansible_galaxy_tasks),\n version='0.1.0-alpha+001')\n\n program.run(args)", "def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)", "def run(self, **kwargs: Any) -> None:\n raise NotImplementedError", "def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"start\", payload=payload, response_object=None)", "def run(self, *args, **kwargs) -> typing.Any:\n pass", "def __init__(self, **kwargs):\n\n self.options = {**self.DEFAULTS, **kwargs}\n self.engine = self.start_matlab_engine()\n self.spm_directory = self.get_spm_directory()", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def __init__(self, args):\n if len(args) < 5:\n raise ProgArgumentsErr(args[0])\n else:\n (config_file_name, clients_file_name, transactions_file_name, compnies_file_name) = args[1:5]\n \n Application.setConfigFileName(config_file_name)\n Application.setClientsFileName(clients_file_name)\n Application.setTransactiosFileName(transactions_file_name)\n Application.setCompaniesFileName(compnies_file_name)\n \n self.clientMgr = ClientManager.getInstance()\n self.tradesAppl = TradingApplication.getInstance()\n self.sec = SecurityManager.getInstance()", "def __init__(__self__, *,\n app_initial_activity: Optional[pulumi.Input[str]] = None,\n bootstrap_package_id: Optional[pulumi.Input[str]] = None,\n bootstrap_runner_class: Optional[pulumi.Input[str]] = None,\n max_depth: Optional[pulumi.Input[int]] = None,\n max_steps: Optional[pulumi.Input[int]] = None):\n if app_initial_activity is not None:\n pulumi.set(__self__, \"app_initial_activity\", app_initial_activity)\n if bootstrap_package_id is not None:\n pulumi.set(__self__, \"bootstrap_package_id\", bootstrap_package_id)\n if bootstrap_runner_class is not None:\n pulumi.set(__self__, \"bootstrap_runner_class\", bootstrap_runner_class)\n if max_depth is not None:\n pulumi.set(__self__, \"max_depth\", max_depth)\n if max_steps is not None:\n pulumi.set(__self__, \"max_steps\", max_steps)", "def main(cls):\n raise NotImplementedError", "def __init__(self, *args):\n \n self.steps = args", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(args):\n app = Application()\n if args and args.markets:\n app.set_markets(args.markets)\n if args and args.symbols:\n app.set_symbols(args.symbols)\n app.print_message()\n\n if args and app.markets:\n file_path = './../'\n\n scrapper = scrapping.Scrapper(app.markets)\n scrapper.get_symbols(f\"{file_path}data/stocks.json\")\n\n if len(app.symbols) > 0:\n companies = {}\n for symbol in app.symbols:\n file_name = f\"{file_path}data/{symbol}_financials.json\"\n companies[symbol] =\\\n scrapper.get_fundamental_analysis(symbol,\n file_name)\n print(companies)\n analysis_companies = analysis.Analyze(companies, app.symbols)\n result = analysis_companies.calculate()\n print(result)\n\n logger.info(args)", "def start():\n trio.run(_main)", "def run(self, *args, **kwargs):\n raise NotImplementedError('Tasks must define the run method.')", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def main():\n args = parseCommandLineArguments()\n\n\n chemkin1 = args.chemkin1[0]\n speciesDict1 = args.speciesDict1[0]\n if args.thermo1: \n thermo1 = args.thermo1[0]\n else:\n thermo1 = None\n chemkin2 = args.chemkin2[0]\n speciesDict2 = args.speciesDict2[0]\n if args.thermo2: \n thermo2 = args.thermo2[0]\n else:\n thermo2 = None\n\n kwargs = {\n 'web': args.web,\n 'wd': os.getcwd()\n }\n\n execute(chemkin1, speciesDict1, thermo1, chemkin2, speciesDict2, thermo2, **kwargs)", "def main(self, args=None, **extra):\n if args is None:\n args = get_os_args()\n return super().main(args=preprocess_argument_list(args), **extra)", "def launch(self, *prelaunch: [callable, [callable, list]], show_all=True):\n self.present()\n if show_all:\n self.show_all()\n for x in prelaunch:\n if len(x) == 1:\n x[0]()\n else:\n x[0](*x[1:])\n Gtk.main()", "def _run_args(cls, args: Optional[List[str]] = None):\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def __init__(self, args):\n self.args = args", "def run_starter(self, expect_to_fail=False):", "def run(self, main, **kwargs):\n logger = logging.getLogger(\"HorovodRunner\")\n logger.warning(\n \"You are running the open-source version of HorovodRunner. \"\n \"It only does basic checks and invokes the main function, \"\n \"which is for local development only. \"\n \"Please use Databricks Runtime ML 5.0+ to distribute the job.\")\n main(**kwargs)", "def setup(self, app_args):\n raise NotImplementedError", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id$\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option( \"-p\", \"--proc\", dest=\"processors\", type=\"int\",\n help = \"use # processors [%default]\" )\n\n parser.set_defaults(\n processors = 1 )\n\n\n options, args = E.Start( parser, argv = argv )\n\n t1 = Test( RunnerGat, \n small_test_segmented_workspaces(), \n [ ValidatorNumSamples,\n ValidatorSegmentDistribution ] )\n\n t1.run( options.stdout, \n processors = options.processors )\n\n E.Stop()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def __init__(self, args=False):\n self.args = args", "def main(properties=properties, options=options, **custom_options):\n return init(**dict(options, **custom_options))(**properties)", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def main() -> None:\n args = _get_arguments()\n\n file_level_logging = logging.DEBUG if args.log_to_file else None\n setup_logger(logging.INFO, file_level_logging)\n\n if not os.path.exists(args.smiles):\n mol = Molecule(smiles=args.smiles)\n if mol.rd_mol is None:\n logger().error(\n f\"The --smiles argument ({args.smiles})\"\n \" does not point to an existing file or is a valid RDKit SMILES.\"\n \" Cannot start retrosynthesis planning.\"\n )\n return\n\n if args.nproc:\n _multiprocess_smiles(args)\n return\n\n multi_smiles = os.path.exists(args.smiles)\n\n finder = AiZynthFinder(configfile=args.config)\n _select_stocks(finder, args)\n post_processing = _load_postprocessing_jobs(args.post_processing)\n finder.expansion_policy.select(args.policy or finder.expansion_policy.items[0])\n if args.filter:\n finder.filter_policy.select(args.filter)\n else:\n finder.filter_policy.select_all()\n\n params = [\n args.smiles,\n finder,\n args.output,\n args.cluster,\n args.route_distance_model,\n post_processing,\n args.checkpoint,\n ]\n if multi_smiles:\n _process_multi_smiles(*params)\n else:\n params = params[:-1]\n _process_single_smiles(*params)", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def Start(self, *args, **kwargs):\n\t\tpayload = { \"Arg1\": self.href }\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\n\t\treturn self._execute('start', payload=payload, response_object=None)", "def train_entry_point():", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs" ]
[ "0.7010809", "0.68640345", "0.68409586", "0.678276", "0.6754254", "0.6719668", "0.6687372", "0.66585034", "0.66585034", "0.66445416", "0.66290414", "0.6617419", "0.6617419", "0.6592643", "0.65777135", "0.65322596", "0.6527857", "0.65144086", "0.6507485", "0.64961606", "0.6448825", "0.64197844", "0.64197844", "0.6368987", "0.63543594", "0.63221514", "0.63027006", "0.62864465", "0.6264844", "0.6264259", "0.62549114", "0.62048197", "0.61806434", "0.6165215", "0.616126", "0.61605084", "0.6115339", "0.6091965", "0.60798967", "0.60775757", "0.60756487", "0.60455185", "0.60320795", "0.6029253", "0.6015803", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59939927", "0.59926903", "0.59926903", "0.5986971", "0.59849024", "0.59682286", "0.5966443", "0.59584504", "0.5948167", "0.5945795", "0.5945795", "0.5943931", "0.59434986", "0.59351856", "0.5918149", "0.59156996", "0.5909986", "0.59042156", "0.58847505", "0.58766466", "0.5875019", "0.5870938", "0.5869132", "0.5867363", "0.5857622", "0.58441377", "0.58293694", "0.5829088", "0.58260745", "0.5817269", "0.5817001", "0.5814678", "0.58142847", "0.58138", "0.5813413", "0.5799856", "0.57968825", "0.5795155", "0.5794701", "0.5793296", "0.57905465", "0.5785022", "0.5772392", "0.5771418", "0.5771418", "0.5771418", "0.57674897", "0.5766518", "0.57651865" ]
0.0
-1
kill, launch the instances of this starter with optional arguments and restart
def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True): for instance_type in which_instances: for i in self.all_instances: if i.instance_type == instance_type: i.terminate_instance() i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, True, ) i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, [], False, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "async def kill(self, restart: bool = False) -> None:\n pass", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n if kill_instance:\n instance.kill_instance()\n instance.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def portkill_main(args=sys.argv[1:]):\n # Probably should use optparse or some such.\n kw = {}\n if '-v' in args:\n kw['verbose'] = True\n args = [a for a in args if a != '-v']\n if '-s' in args:\n index = args.index('-s')\n kw['sleeptime'] = args[index + 1]\n args = args[:index] + args[index+2:]\n portkill(*args, **kw)\n return 0", "def killExperiment(self, **kwargs):\n if kwargs['kill']=='YES':\n killRobot.sshKill()", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def restart(reason, *args, **kwargs):\n logging.info(\"Restarting: %s\" % reason)\n os.execv(sys.argv[0], sys.argv)", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def restart():\n stop()\n start()", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def remote_kill():", "def restart():\n run_commands('python manage.py supervisor restart all')", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def restart(self):\n self.stop()\n self.start(init=False)", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)", "def restart(*args, **kwargs):\n return restart_type(args, kwargs)", "def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def main():\n args = parser.parse_args()\n terminate(\n args.uuid,\n args.time,\n args.limit,\n skip=args.skip\n )", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def restart(self):\n\n self.stop()\n self.start()", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n\n #Kill processes\n print('Restarting scan...... \\n')\n self.kill()\n\n #Delete crawler\n del self.crawler\n self.crawler = self.generate_crawler()\n\n #Give ourselves a second\n time.sleep(2)", "def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")", "def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()", "async def terminate(self, restart=False) -> None:\n pass", "def stop_run(arn=None):\n pass", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def respawn_instance(self, version, moreargs=None, wait_for_logfile=True):\n assert version is not None\n self.cfg.version = version\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n if moreargs is not None:\n args.extend(moreargs)\n\n logging.info(\"StarterManager: respawning instance %s\", str(args))\n self.instance = psutil.Popen(args)\n self.pid = self.instance.pid\n self.ppid = self.instance.ppid()\n print(\"respawned with PID:\" + str(self.instance.pid))\n if wait_for_logfile:\n self.wait_for_logfile()\n self.wait_for_port_bind()\n else:\n print(\"Waiting for starter to exit\")\n print(\"Starter exited %d\" % self.instance.wait())", "def kill_specific_instance(self, which_instances):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n instance.terminate_instance()", "def RestartApp(argv_extra):\n\n p = psutil.Process(os.getpid())\n for handler in p.open_files() + p.connections():\n if handler.fd != -1:\n os.close(handler.fd)\n\n exe = sys.executable\n argv = list(sys.argv)\n argv.append(argv_extra)\n\n os.execl(exe, exe, *argv)", "def restart(self) -> None:", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n \n self.killSlavePids()", "def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)", "def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def stopProcesses(*args):\n _stopProcessSet(_running)", "def restart(self):", "def restart_scrapy_daemon():\n global REPO_BASE_PATH\n logger.info('Scrapy daemon restarting...')\n arguments = ['python'] + [REPO_BASE_PATH+'/deploy/sqs_ranking_spiders/scrapy_daemon.py'] + sys.argv[1:]\n if 'restarted' not in arguments:\n arguments += ['restarted']\n else:\n logger.error('Error while restarting scrapy daemon. '\n 'Already restarted.')\n return\n logging.info('Starting %s with args %s' % (sys.executable, arguments))\n os.execv(sys.executable, arguments)", "def kill_all():\n compose_kill_all()", "def GET_kill(self):\n sys.exit(0)", "def reboot_instance(InstanceId=None):\n pass", "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def restart_celery():\n os.system('flask kill_celery')\n os.system('flask celery')", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def start_instance(InstanceId=None):\n pass", "def restart(self, timestamp=0.0, **keywords):\n self.services.debug('restart() method called')\n pass", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def kill(targets, controller=False):", "def restart_worker_sig_handler(signum, frame):\n worker.logger.warn(\"Restarting celeryd (%s)\" % (\n \" \".join(sys.argv)))\n worker.stop()\n os.execv(sys.executable, [sys.executable] + sys.argv)", "def killAll(controller=False):", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def start_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"start %s\" % item.strip())", "def do_kill(self, args):\n if (len(args.split()) < 1):\n self.__bad_arguments(\"kill\")\n else:\n print \"Killed \" + args + \".\"\n player_killed = args.split()[0]\n AssassinsManager.remove_assassin(self.assassins_manager, player_killed)", "def restartAll(self):\n for name in self.processes:\n self.stopProcess(name)", "def restart_llap(self, env):\n Logger.info(\"Custom Command to retart LLAP\")\n import params\n env.set_params(params)\n\n if params.security_enabled:\n self.do_kinit()\n\n self._llap_stop(env)\n self._llap_start(env)", "def kill(self):\n\n self.proc.kill()", "def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def restart(self):\n pass", "def poweroff(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n log.info(\"Poweroff ...\")", "def stop_scripts():\n print \"*** WARNING ***: This is about to kill all python processes\"\n run(\"killall python\")", "def hook (self, *args, **kwargs):\n self.launch([\"--fastexit\"])", "def crash_instances(self):\n try:\n if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING:\n print(\"generating coredump for \" + str(self.instance))\n gcore = psutil.Popen([\"gcore\", str(self.instance.pid)], cwd=self.basedir)\n print(\"launched GCORE with PID:\" + str(gcore.pid))\n gcore.wait()\n self.kill_instance()\n else:\n print(\"NOT generating coredump for \" + str(self.instance))\n except psutil.NoSuchProcess:\n logging.info(\"instance already dead: \" + str(self.instance))\n\n for instance in self.all_instances:\n instance.crash_instance()", "def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))", "def restartFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('start fluidinfo-api-node PORT=%d' % port)\n with settings(warn_only=True):\n sudo('kill -USR1 $(cat /var/run/nginx.pid)')", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def handle(self, *args, **options):\n try:\n with open(\"/gunicorn.pid\") as f:\n pid = int(f.read().strip())\n os.kill(pid, signal.SIGHUP)\n except FileNotFoundError: # Not running a gunicorn process\n subprocess.call([\"supervisorctl\", \"-c\", \"/supervisor_task.conf\",\n \"restart\", \"all\"])", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def restart_worker_sig_handler(*args):\n set_in_sighandler(True)\n safe_say('Restarting celeryd (%s)' % (' '.join(sys.argv), ))\n import atexit\n atexit.register(_clone_current_worker)\n from celery.worker import state\n state.should_stop = True", "def _main(argv):\n _in_controlled_env = (2 <= len(argv)) and ('_in_controlled_env' == argv[1])\n\n if not _in_controlled_env:\n return _relaunch_in_controlled_env(argv)\n\n else:\n return _cmd(argv)", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def stopXDAQLaunchers(symbolMap, options):\n\tfor host in symbolMap.allHosts:\n\t\tif options.verbose > 1: print \"Stopping xdaqLauncher for %-20s on %s:%d\" % (host.name, host.host, host.port)\n\t\tsendCmdToLauncher(host.host, host.lport, 'STOPLAUNCHER')", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def stop(args, config):\n print('Stops an HPC fleet \"{}\"'.format(args))", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def kill(self, app, **extra_args):\n assert isinstance(\n app, Task), \"Core.kill: passed an `app` argument which is not\"\\\n \" a `Task` instance.\"\n if isinstance(app, Application):\n self.__kill_application(app, **extra_args)\n else:\n self.__kill_task(app, **extra_args)", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def restart(self):\r\n pass", "def exit(self, *args):\n self.stop('all')\n sys.exit(1)", "def restart_program():\r\n\tpython = sys.executable\r\n\tos.execl(python, python, * sys.argv)\r\n\troot = tk.Tk()", "def _restart(self):\n pass", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def kill_celery():\n try:\n subprocess.call(\n ['celery', 'multi', 'stop', '2', '-A', 'celery_worker.celery', '--logfile=celery_logs/celery-worker-%n.log',\n '--pidfile=celery_logs/celery-worker-%n.pid'])\n os.system('pkill -f celery')\n except Exception:\n click.echo('Exception occurred. Run code locally')", "def _kill_launchfile(self):\r\n if len(self.process_list) is 0:\r\n print(\"[ToyCarEpisodeMonitor._terminate()]: no process to terminate\")\r\n else:\r\n for p in self.process_list:\r\n p.send_signal(signal.SIGINT)\r\n while p.poll() is None:\r\n print (\r\n \"[SimulatorEpisodeMonitor._terminate()]: \"\r\n \"simulator process {} termination in progress...\"\r\n ).format(p.pid)\r\n time.sleep(1.0)\r\n print (\r\n \"[ToyCarEpisodeMonitor._terminate()]: \"\r\n \"simulator proc {} terminated with exit code {}\"\r\n ).format(p.pid, p.returncode)\r\n self.process_list = []\r\n print(\"[ToyCarEpisodeMonitor]: termination done!\")\r\n\r\n return" ]
[ "0.69379675", "0.66118777", "0.659606", "0.6544262", "0.6499166", "0.6498725", "0.64666843", "0.6448177", "0.64149165", "0.6343142", "0.6260202", "0.6211733", "0.6203442", "0.614886", "0.6116213", "0.6090153", "0.6088133", "0.60815567", "0.60769475", "0.6035262", "0.6033425", "0.60120356", "0.60066783", "0.6005686", "0.5988398", "0.59309036", "0.59202784", "0.5912546", "0.58709514", "0.5832987", "0.5829491", "0.5829491", "0.58270186", "0.5822124", "0.58112717", "0.5807039", "0.5791732", "0.5789567", "0.57847685", "0.5769037", "0.57603496", "0.57578325", "0.5754255", "0.57497853", "0.57143503", "0.57100356", "0.5706676", "0.56699705", "0.5668732", "0.5658631", "0.56568897", "0.5654935", "0.5654916", "0.564358", "0.5636409", "0.5622752", "0.56213915", "0.5610087", "0.56088805", "0.55960435", "0.5584506", "0.5573502", "0.5560363", "0.5557053", "0.5553331", "0.55526584", "0.5550853", "0.5549723", "0.55392396", "0.55295974", "0.5525325", "0.55115163", "0.55020946", "0.54960644", "0.54928845", "0.54846656", "0.54756683", "0.54738456", "0.5472689", "0.5471759", "0.5470759", "0.5469087", "0.5462502", "0.54611856", "0.54603875", "0.54509425", "0.5446836", "0.5444956", "0.5443512", "0.54412174", "0.54303336", "0.54303336", "0.54289275", "0.5422536", "0.54215485", "0.5420339", "0.54171336", "0.54158396", "0.54106796", "0.54101086" ]
0.5673631
47
Terminate arangod(s). Let the starter restart them.
def restart_arangods(self): for instance in self.all_instances: instance.kill_instance() instance.rename_logfile() self.detect_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate():\n dislin.disfin()", "def terminate():\n sys.exit()", "def terminate(self):", "def terminate_all(self):\n self._stop_all('terminate')", "async def terminate(self, restart=False) -> None:\n pass", "def terminate(self) -> None:\n self.robot.terminate_all()", "def terminate(self):\n self._running = False", "def terminate(self):\n return", "def terminate(self):\n return", "def terminate(self):\n\t\tself.raise_exc(SystemExit)", "def terminate(self):\n self.terminated = True", "def exit(self, *args):\n self.stop('all')\n sys.exit(1)", "def terminate(self):\n self._running = False", "def terminate():\n leds.on()\n time.sleep(1)\n leds.off()\n\n GPIO.cleanup()", "def gracefully_terminate(self):\n self.running = False", "def terminate(self):\n self.raise_exc(SystemExit)", "def cleanup_and_exit():\n logger.warn(\"Terminating the program\")\n try:\n for key in connections:\n try:\n connections[key].disconnect()\n except AttributeError:\n pass\n for s in sensors:\n try:\n sensors[s].cleanup()\n except AttributeError:\n pass\n except:\n pass\n sys.exit(0)", "def halt():\n\n jobs = [j.name for j in config.all_jobs]\n nomad.stop_and_wait(jobs)", "def __exit__(self):\n self._stop_all()", "def exit(self):\n self.runtime.halted = True", "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False\n self._terminate()", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "def test_terminate_run(self):\n pass", "async def __aexit__(self, *args) -> None:\n self.stop()\n await self.join()", "def terminate(self):\n raise NotImplementedError()", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def terminate(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.terminate()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def terminate():\r\n pygame.quit()\r\n os._exit(1)", "def halt(self):\n\n print(\"Halt program. Exit emulator.\")\n self.running = False\n sys.exit()", "def terminate(self):\n for processor in self._processors.values():\n Stats.decr(\n \"dag_processing.processes\", tags={\"file_path\": processor.file_path, \"action\": \"terminate\"}\n )\n processor.terminate()", "def exit_program():\n quit()", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "def stop(self):\r\n self.terminating = True", "def terminate(self):\r\n self.Roomba.PauseQueryStream()\r\n if self.Roomba.Available() > 0:\r\n z = self.Roomba.DirectRead(self.Roomba.Available())\r\n # print(z)\r\n time.sleep(0.1)\r\n\r\n ## -- Ending Code Starts Here -- ##\r\n self.Roomba.ShutDown() # Shutdown Roomba serial connection\r\n GPIO.cleanup() # Reset GPIO pins for next program\r\n self.logger.terminate()\r\n pass", "def exit_loop(self):\n self.loop.exit()", "def do_exit(self, args) :\r\n\r\n self.__Logger.warn(\"stopping the timer loop\")\r\n\r\n self.cmds[\"SimulatorStartup\"] = True\r\n self.cmds[\"SimulatorShutdown\"] = True\r\n\r\n\r\n return True", "def _terminate_application(signal=None, frame=None):\n\n # We need to check if station was initialized\n if 'station' in globals():\n station.stop_station()\n\n logging.warning('Application terminated', exc_info=not signal)\n\n print('\\nExiting application')", "def kill_all():\n compose_kill_all()", "def _terminateAll(self):\n\n # Termination of all processes\n try :\n for process in self.processes:\n process.terminate()\n except AttributeError:\n pass\n\n return", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def quit():\n\tsys.exit()", "def stop(self):\n for worker in self.workers:\n import sys; sys.stdout.flush()\n try: worker.exec_code('import sys;sys.exit(0)')\n except:\n #should really do something here to\n # trap non-SystemExit errors.\n pass", "def quit_program():\n sys.exit()", "def abortAll(self):\n for partition in self.PCAs:\n self.abortFunction(partition.id)", "def terminate():\n pygame.quit()\n sys.exit(0)", "def terminate_workers(self):\n if self.shared_storage_worker:\n self.shared_storage_worker.set_info.remote(\"terminate\", True)\n self.checkpoint = ray.get(\n self.shared_storage_worker.get_checkpoint.remote()\n )\n if self.replay_buffer_worker:\n self.replay_buffer = ray.get(self.replay_buffer_worker.get_buffer.remote())\n\n print(\"\\nShutting down workers...\")\n\n self.self_play_workers = None\n self.test_worker = None\n self.training_worker = None\n self.reanalyse_worker = None\n self.replay_buffer_worker = None\n self.shared_storage_worker = None", "def _gracefully_stop(self):\n pass", "def shutdown(rc):\n \n # Close the Ephemeris so it can do necessary cleanups.\n #Ephemeris.closeEphemeris()\n \n logging.shutdown()\n \n sys.exit(rc)", "def __exit__(self, *args):\n self.stop()", "def stop_run(arn=None):\n pass", "def terminate(self):\n logger.debug(\"Outbox:terminate\")\n self._lock_terminate.acquire()\n assert self._terminated != True\n self._find.terminate()\n self._sum.terminate()\n self._tag.terminate()\n self._register.terminate()\n self._dispatcher.terminate()\n self._terminated = True\n self._lock_terminate.release()", "def shutdown():\n # command executed after Ctrl+C is pressed\n rospy.loginfo(\"Stop ASRControl\")\n rospy.sleep(1)", "def quit(self):\n \n # Say good bye\n endMsg = 'controller:: Signing Off!'\n print endMsg\n self.dbF.writerow([endMsg]) \n \n # Close serial connections\n self.sp.closeSer()\n self.tc.closeSer()\n self.ard.closeSer()\n \n # Close log files\n self.dataLogFile.close()\n self.debugLogFile.close()", "def call_exit(self, _) -> None:\n self.save_class()\n for _ in range(self.PATH.count(\"/\") + 1):\n self.queue.insert(0, \"quit\")", "def stop() -> None:", "def terminarConexion():\n print(\"Tiempo de respuesta excedido: 10 segundos\")\n print(\"Terminando conexión...\")\n sys.exit(1)", "def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()", "def terminate(self):\n super(ReacherEnv, self).close()", "def quit(self):\n\n self.mount.stopTimers()\n self.measure.timerTask.stop()\n self.relay.timerTask.stop()\n self.timer0_1s.stop()\n self.message.emit('MountWizzard4 manual stopped with quit', 1)\n PyQt5.QtCore.QCoreApplication.quit()\n return True", "def terminate(self):\n plt.close('all')", "def exitprogram():\n sys.exit()", "def terminated(self):\n gc3libs.log.debug(\" ...done.\")", "def terminate(self):\n return terminate(self)", "def _quit(self, *args):\n self.cleanup()", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def exit_program():\n print(\"Good bye\")\n sys.exit()", "def clean_shutdown(self):\n\t\tprint(\"\\nExiting example...\")\n\t\tif self._done:\n\t\t self.set_neutral()\n\t\tif not self._init_state and self._rs.state().enabled:\n\t\t print(\"Disabling robot...\")\n\t\t self._rs.disable()", "def quit(self):\n sys.exit(0)", "def _terminate(self) -> None:\n if not jh.should_execute_silently() or jh.is_debugging():\n logger.info(f\"Terminating {self.symbol}...\")\n\n self.before_terminate()\n\n self._detect_and_handle_entry_and_exit_modifications()\n\n # fake execution of market orders in backtest simulation\n if not jh.is_live():\n store.orders.execute_pending_market_orders()\n\n if jh.is_live():\n self.terminate()\n return\n\n if self.position.is_open:\n store.app.total_open_trades += 1\n store.app.total_open_pl += self.position.pnl\n logger.info(\n f\"Closed open {self.exchange}-{self.symbol} position at {self.position.current_price} with PNL: {round(self.position.pnl, 4)}({round(self.position.pnl_percentage, 2)}%) because we reached the end of the backtest session.\"\n )\n # first cancel all active orders so the balances would go back to the original state\n if self.exchange_type == 'spot':\n self.broker.cancel_all_orders()\n # fake a closing (market) order so that the calculations would be correct\n self.broker.reduce_position_at(self.position.qty, self.position.current_price, self.price)\n self.terminate()\n return\n\n if len(self.entry_orders):\n self._execute_cancel()\n logger.info('Canceled open-position orders because we reached the end of the backtest session.')\n\n self.terminate()", "def kill(self):\n self._destruct()\n pass", "def terminate(self):\n self._proxy.ibroadcast(\n component_type=\"actor\", tag=MessageTag.EXIT, session_type=SessionType.NOTIFICATION\n )\n self.logger.info(\"Exiting...\")", "def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')", "def stop():", "def stop():", "def stop():", "def stop():", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def exit_gracefully():\n print(\"Exiting gracefully\")\n args = sys.argv[1:]\n parsed_args = parse_arguments(args)\n\n db_path = parsed_args.database\n conn = connect_to_database(db_path)\n scenario_id, scenario = get_scenario_id_and_name(\n scenario_id_arg=parsed_args.scenario_id,\n scenario_name_arg=parsed_args.scenario,\n c=conn.cursor(),\n script=\"run_end_to_end\",\n )\n\n # Check if running from queue\n queue_order_id = check_if_in_queue(db_path, scenario)\n remove_from_queue_if_in_queue(db_path, scenario, queue_order_id)\n update_run_status(db_path, scenario, 4)\n\n conn.close()", "def endApplication(self):\n self.running = 0", "def do_quit(self, args):\n print('Good Bye!')\n exit()", "def stop(self):\n print 'closing'\n self.comm.close_all_serial() # Close all serial connections opened with SNAPconnect\n print 'closed'\n sys.exit(0) # Exit the program", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def quit():\r\n autoquit()", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def terminate(exitmsg: str):\n print(exitmsg)\n sys.exit(1)", "def quit():\n raise EmbeddedConsoleExit", "def terminate(self):\n self._terminate = True\n if self.started:\n self.join()", "def terminate(self, _):\n self.execution_manager.terminate()\n self.menu_structure['terminate'] = ('main', [('Continue submitting jobs', self.enable_submission)])\n self.__back_to_main()", "def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None", "def terminate(self):\n self._proc.terminate()", "def kill(self):\n self._exit = True", "def exitProgram():\n canvas.destroy()\n tool.destroy()\n code_editor.destroy()\n sys.exit()", "def exit_engine(self):\n self.stop_flag = True", "def main():\n args = parser.parse_args()\n terminate(\n args.uuid,\n args.time,\n args.limit,\n skip=args.skip\n )", "def terminate(ctx):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n for job in jobs:\n jobid = job['id']\n click.echo('Terminating {}'.format(jobid))\n ctl('terminate', '--jobid', jobid)", "def terminate(self):\n self.keep_going = False\n self.event.set()\n self.join()", "def handle_termination(self):\n pass", "def exit_gracefully(signal, frame):\n\t\ttry: # exiting with game data\n\t\t\tsave_game(player_location, star_cluster.filename)\n\t\texcept NameError: # exiting before game data established\n\t\t\tpass\n\t\tfinally:\n\t\t\tprint(\"\\nExiting program.\")\n\t\t\tsys.exit(0)", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def do_quit(self, args):\n raise SystemExit" ]
[ "0.71201587", "0.6880036", "0.6720558", "0.6588962", "0.64856535", "0.63402265", "0.63243175", "0.62685204", "0.62685204", "0.6242662", "0.62372035", "0.61764723", "0.61611766", "0.61569375", "0.6126655", "0.61082095", "0.60830575", "0.6070568", "0.60174894", "0.6013085", "0.5992476", "0.59584856", "0.59324026", "0.5912554", "0.59015566", "0.5900655", "0.5890198", "0.58495116", "0.5806864", "0.5782288", "0.5773748", "0.57678807", "0.57675207", "0.57617694", "0.57496786", "0.5744556", "0.5741672", "0.57316077", "0.5727618", "0.5727148", "0.57191074", "0.57188034", "0.57164", "0.5710284", "0.5700193", "0.56804967", "0.5680348", "0.56655467", "0.56621027", "0.5658959", "0.56389624", "0.56358826", "0.5620611", "0.5616753", "0.5616492", "0.56164", "0.560978", "0.5609726", "0.560476", "0.55997074", "0.55964136", "0.558698", "0.55709267", "0.5570312", "0.55689085", "0.5568614", "0.5566217", "0.5561492", "0.5556395", "0.5549783", "0.5548988", "0.55472577", "0.5546535", "0.5546535", "0.5546535", "0.5546535", "0.5544482", "0.5538055", "0.55316997", "0.5531059", "0.55309767", "0.55248225", "0.5523346", "0.55178875", "0.5512184", "0.5507421", "0.5507252", "0.5505877", "0.5499814", "0.5498757", "0.549712", "0.54956174", "0.5487274", "0.5485344", "0.54826266", "0.5475673", "0.54747075", "0.54730034", "0.5465958", "0.54621196" ]
0.63488686
5
replace the parts of the installation with information after an upgrade
def replace_binary_setup_for_upgrade(self, new_install_cfg): # On windows the install prefix may change, # since we can't overwrite open files: self.cfg.set_directories(new_install_cfg) if self.cfg.hot_backup_supported: self.hotbackup_args = [ "--all.rclone.executable", self.cfg.real_sbin_dir / "rclone-arangodb", ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):", "def upgrade(self):", "def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p1.communicate()\n\n\t\tresult = {}\n\t\tresult['install'] = []\n\t\tresult['update'] = []\n\t\tresult['remove'] = []\n\t\tfor line in stdout.split('\\n'):\n\t\t\t# upgrade:\n\t\t\t# Inst univention-updater [3.1.1-5] (3.1.1-6.408.200810311159 192.168.0.10)\n\t\t\t# inst:\n\t\t\t# Inst mc (1:4.6.1-6.12.200710211124 oxae-update.open-xchange.com)\n\t\t\t#\n\t\t\t# *** FIX ***\tthe above example lines ignore the fact that there's\n\t\t\t#\t\t\t\tsome extra text (occasionally) after the last closing\n\t\t\t#\t\t\t\tparenthesis. Until now, I've seen only a pair of empty\n\t\t\t#\t\t\t\tbrackets [], but who knows...\n\t\t\tmatch = re.search('^Inst (\\S+)\\s+(.*?)\\s*\\((\\S+)\\s.*\\)',line)\n\t\t\tif match:\n\t\t\t\tpkg = match.group(1)\n\t\t\t\told = match.group(2)\n\t\t\t\tver = match.group(3)\n\t\t\t\tif old:\n\t\t\t\t\tresult['update'].append([pkg,ver])\n\t\t\t\telse:\n\t\t\t\t\tresult['install'].append([pkg,ver])\n\t\t\telif line.startswith('Remv '):\n\t\t\t\tl=line.split(' ')\n\t\t\t\tpkg = l[1]\n\t\t\t\tver = _('unknown')\n\t\t\t\tif len(l) > 2:\n\t\t\t\t\tver = l[2].replace('[','').replace(']','')\n\t\t\t\tresult['remove'].append([pkg,ver])\n\n\n\t\t# sort package names?\n\t\tresult['update'] = sorted(result['update'])\n\t\tresult['install'] = sorted(result['install'])\n\t\tresult['remove'] = sorted(result['remove'])\n\n\t\tself.finished(request.id,result)", "def prepareUninstall():\n pass", "def post_install(self, installable_pkgs):\n pass", "def install_or_upgrade():\n global action_url\n global cgi_executable_path\n global documentation_path\n global full_python_path\n global private_data_directory\n if get_script_mode() == \"install\":\n output(\"\\nInstalling...\")\n elif get_script_mode == \"upgrade\":\n output(\"\\nUpgrading...\")\n if get_script_mode() == \"install\":\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"mkdir -p \" + private_data_directory)\n os.system(\"rm -rf \" + private_data_directory)\n os.system(\"cp -r data \" + private_data_directory)\n os.system(\"chmod 755 \" + private_data_directory + \"/*\")\n os.system(\"chmod 1777 \" + private_data_directory)\n\tsubstitute(private_data_directory + \"/footer.html\", \"ACTION_URL\", \\\n\t action_url)\n elif get_script_mode() == \"upgrade\":\n\tpass\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"mkdir -p \" + cgi_executable_path)\n os.system(\"rm -rf \" + cgi_executable_path)\n os.system(\"cp bin/mobile \" + cgi_executable_path)\n substitute(cgi_executable_path, \"DOCUMENT_ROOT_PATH\", \\\n private_data_directory)\n substitute(cgi_executable_path, \"FULL_ACTION_URL\", action_url)\n substitute(cgi_executable_path, \"FULL_PYTHON_PATH\", full_python_path)\n os.system(\"chmod 0755 \" + cgi_executable_path)\n if get_script_mode() == \"install\":\n substitute(\"doc/README\", \"<the private Mobile Web Proxy data directory>\", \\\n private_data_directory)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"rm -rf \" + documentation_path)\n os.system(\"mkdir -p \" + documentation_path)\n os.system(\"cd doc; cp -rp * \" + documentation_path)\n if get_script_mode() == \"install\":\n output(\\\n\"\"\"\n\nInstallation is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")\n if get_script_mode() == \"upgrade\":\n output(\\\n\"\"\"\n\nThe upgrade is complete. Further information about the Mobile Web Proxy is\navailable in \"\"\" + documentation_path + \"\"\"/README file.\n\nThank you for using the Mobile Web Proxy!\n\n\"\"\")", "def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']", "def post_installation(self, exc_value):\n pass", "def preCommitFixup(self):\n log_method_call(self, self.name)\n # UEFI firmware/bootloader cannot read 1.1 or 1.2 metadata arrays\n if getattr(self.format, \"mountpoint\", None) == \"/boot/efi\":\n self.metadataVersion = \"1.0\"", "def pre_installation(self):\n pass", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def pre_upgrade(self, upgrade_specs):\n pass", "def do_post_install(self, context):\n pass", "def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))", "def _install(self):\n\n pass", "def upgrade(self, command=\"upgrade\"):\n # execute pkgin\n popen = Popen([self.pkgin_bin, \"-y\", command], stdout=PIPE, stdin=PIPE,\n stderr=PIPE)\n # retrieve output streams\n (stdoutdata, stderrdata) = popen.communicate()\n # if pkgin error\n if(stderrdata):\n # remove the line feed\n error = stderrdata[0:-1]\n raise PykginError(error)\n # retrieve output\n output_raw = stdoutdata\n # create a list which contain each packages\n output_whole_list = output_raw.split('\\n')\n # add infos to a dict\n output = {}\n for line in output_whole_list:\n if line.find(\"to be upgraded:\") != -1:\n # extract usefull string\n infos = line.split(':')[1].split('(')\n # extract string which contains packages list\n packages_list = infos[0].strip().split(' ')\n # extract version of each packages\n packages_upgraded = []\n for pkg in packages_list:\n packages_upgraded.append(\\\n self.__extract_package_version(pkg))\n output['packages_upgraded'] = packages_upgraded\n if line.find(\"to be installed:\") != -1:\n # extract usefull string\n infos = line.split(':')[1].split('(')\n # extract string which contains packages list\n packages_list = infos[0].strip().split(' ')\n # extract download and install size from the rest of the\n # string\n download_size = infos[1].split(' ')[0]\n install_size = infos[1].split(' ')[3]\n # extract version of each packages\n packages_installed = []\n for pkg in packages_list:\n packages_installed.append(\\\n self.__extract_package_version(pkg))\n output['packages_installed'] = packages_installed\n output['download_size'] = download_size\n output['install_size'] = install_size\n\n return output", "def __upgrade(self):", "def updateDictFile(self):\n if self.dictFile.vdata.get('version',0): return\n #--Update to version 1\n for name in self.data.keys():\n installer = self.data[name]\n if isinstance(installer,Installer):\n self.data[name] = installer.__copy__(InstallerArchive)\n self.dictFile.vdata['version'] = 1", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def upgrade(self):\n import re\n currversion = getattr(self, '_version', 0)\n if currversion == self.version:\n return 'already running latest version (%s)' % currversion\n # a slightly modified version of propertyitems\n propertyitems = map(lambda i,s=self: (i['id'],getattr(s, i['id'], None)),\n self._properties)\n for propertyitem in propertyitems:\n if not propertyitem[1]:\n pass\n elif propertyitem[0] in ('stylesheet_paths', 'xslt_paths'):\n self.transform_paths = propertyitem[1]\n elif propertyitem[0][:4] == 'xslt':\n transform = getattr(self, propertyitem[1], None)\n if not transform:\n for obj in self._get_path_objs(self.transform_paths):\n transform = getattr(obj, propertyitem[1], None)\n if transform:\n break\n if transform:\n mtype = re.sub('template|renderer', '', transform.meta_type.lower()).strip() \n setattr(self,\n 'transform_%s' % propertyitem[0][5:],\n '%s (%s)' % (propertyitem[1], mtype))\n elif propertyitem[0][:10] == 'stylesheet':\n setattr(self, 'template_%s' % propertyitem[0][11:], propertyitem[1])\n else:\n continue\n try:\n delattr(self, propertyitem[0])\n except:\n pass\n try:\n self._delProperty(propertyitem[0])\n except:\n pass\n\n self._properties = self._def_properties\n self.setup_transformProperties()\n self.setup_schemaProperties()\n \n self._version = self.version\n \n return 'upgraded %s to version %s from version %s' % (self.getId(),\n self._version,\n currversion)", "def data_upgrades():\n pass", "def data_upgrades():\n pass", "def report_version(self, data):\n self.firmata_version.append(data[0]) # add major\n self.firmata_version.append(data[1]) # add minor", "def post_install_pkg(self, installable_pkg):\n pass", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode", "def update_env(self, update):\n self.env[\"unique_identifier\"] = update.find('des:unique_identifier', EFI_NAMESPACES).text\n self.env[\"title\"] = update.find('des:title', EFI_NAMESPACES).text\n self.env[\"short_description\"] = update.find('des:short_description', EFI_NAMESPACES).text\n self.env[\"file_size\"] = int(update.find('des:file_size', EFI_NAMESPACES).text)\n self.env[\"date_released\"] = update.find('des:date_released', EFI_NAMESPACES).text\n self.env[\"md5_signature\"] = update.find('des:md5_signature', EFI_NAMESPACES).text\n self.env[\"version\"] = update.find('des:version', EFI_NAMESPACES).text\n self.env[\"url\"] = update.findall('.//des:location_download', EFI_NAMESPACES)[0].text", "def upgrade_os_packages(self):\n self.summarize_operation(\"Upgrading OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get upgrade -y\"))", "def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n old_version = self.cfg.version\n self.default_starter_args = new_install_cfg.default_starter_args.copy()\n self.enterprise = new_install_cfg.enterprise\n self.replace_binary_setup_for_upgrade(new_install_cfg)\n with step(\"kill the starter processes of the old version\"):\n logging.info(\"StarterManager: Killing my instance [%s]\", str(self.instance.pid))\n self.kill_instance()\n with step(\"revalidate that the old arangods are still running and alive\"):\n self.detect_instance_pids_still_alive()\n if relaunch:\n with step(\"replace the starter binary with a new one,\" + \" this has not yet spawned any children\"):\n self.respawn_instance(new_install_cfg.version)\n logging.info(\"StarterManager: respawned instance as [%s]\", str(self.instance.pid))\n self.arangosh = None\n self.detect_arangosh_instances(new_install_cfg, old_version)", "def full_upgrade(self):\n return self.upgrade(\"full-upgrade\")", "def update_firmware(self) -> str:", "def _provision_package(self):", "def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r", "def upgrade_environment(self, db_dummy=None):\n self.log.debug(\"upgrading existing environment for %s plugin.\" % \n PLUGIN_NAME)\n db_installed_version = self.get_version()\n #cursor = db.cursor()\n with self.env.db_transaction as db:\n if db_installed_version < 0:\n # Initial installation\n db(\"\"\"\n INSERT INTO system (name, value) VALUES ('%s','%s')\n \"\"\" % (DB_SYSTEM_KEY, DB_VERSION))\n db(\"ALTER TABLE ticket ADD COLUMN product TEXT\")\n self.log.debug(\"creating initial db tables for %s plugin.\" % \n PLUGIN_NAME)\n \n db_connector, dummy = DatabaseManager(self.env)._get_connector()\n for table in self.SCHEMA:\n for statement in db_connector.to_sql(table):\n db(statement)\n db_installed_version = self.get_version()\n \n if db_installed_version == 1:\n from multiproduct.model import Product\n products = Product.select(self.env)\n for prod in products:\n db(\"\"\"UPDATE ticket SET product=%s\n WHERE product=%s\"\"\", (prod.prefix, prod.name))\n \n db(\"\"\"UPDATE system SET value=%s\n WHERE name=%s\"\"\", (DB_VERSION, DB_SYSTEM_KEY))\n self.log.info(\"Upgraded multiproduct db schema from version %d\"\n \" to %d\" % (db_installed_version, DB_VERSION))", "def tweak_new_filesystem(root_dir):\n\n # create a symlink for insserv\n force_symlink('../usr/lib/insserv/insserv',\n os.path.join(root_dir, 'sbin/insserv'))\n\n # create a symlink for awk\n force_symlink('mawk', os.path.join(root_dir, 'usr/bin/awk'))\n\n # Nvidia keeps packaging up a broken post-install script for their cudnn\n # deb. Freaking nvidia\n cudnn_postinst_path = 'var/lib/dpkg/info/libcudnn6-dev.postinst'\n cudnn_postinst_path = os.path.join(root_dir, cudnn_postinst_path)\n\n if os.path.exists(cudnn_postinst_path):\n with open(cudnn_postinst_path, 'r') as infile:\n content = infile.read()\n if not content.startswith(\"#!\"):\n with open(cudnn_postinst_path, 'w') as outfile:\n outfile.write('#! /bin/sh\\n')\n outfile.write(content)\n\n # NOTE(josh): patch the base-packages post-install hook so it doesn't\n # complain about files in /var/run\n basefiles_path = os.path.join(root_dir,\n 'var/lib/dpkg/info/base-files.postinst')\n if os.path.exists(basefiles_path):\n apply_patch_text(BASE_FILES_PATCH, root_dir)\n\n # NOTE(josh): ifupdown should depend on initscripts, but it doesn't\n status_path = os.path.join(root_dir, 'var/lib/dpkg/status')\n tempfile_path = status_path + '.tmp'\n with open(tempfile_path, 'wb') as outfile:\n with open(status_path, 'rb') as infile:\n for line in infile:\n outfile.write(line)\n if line.strip() == 'Package: ifupdown':\n break\n\n for line in infile:\n if line.startswith('Depends: '):\n line = ', '.join(line.strip().split(', ') + ['initscripts']) + '\\n'\n outfile.write(line)\n break\n else:\n outfile.write(line)\n\n for line in infile:\n outfile.write(line)\n os.rename(tempfile_path, status_path)\n\n # NOTE(josh): resolvconf tries to a write a file in this directory\n try:\n target_path = os.path.join(root_dir, 'run/resolvconf/interface')\n os.makedirs(target_path)\n except OSError:\n if not os.path.isdir(target_path):\n raise\n\n # NOTE(josh): Can't postinst makedev without CAP_MKNOD\n if os.getuid() != 0:\n makedev_postinst = os.path.join(root_dir,\n 'var/lib/dpkg/info/makedev.postinst')\n if os.path.exists(makedev_postinst):\n os.rename(makedev_postinst, makedev_postinst + '.bak')\n\n # remove temporary/boostrap files\n files_to_remove = ['etc/apt/sources.list.d/bootstrap.list']\n\n for filename in files_to_remove:\n file_path = os.path.join(root_dir, filename)\n if os.path.exists(file_path):\n os.remove(file_path)", "def pre_install(self, installable_pkgs):\n pass", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def __init__(self):\n self.update_os_packages()\n self.upgrade_os_packages()", "def null_upgrade_step(setup_tool):\n pass", "def install_debuginfo(self) -> None:\n pass", "def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def upgrade(self, old_version, new_version):\n pass", "def _update_python_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % self.Version\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def post_extract(env_name='root'):\n prefix = prefix_env(env_name)\n info_dir = join(prefix, 'info')\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n dist = '%(name)s-%(version)s-%(build)s' % meta\n if FORCE:\n run_script(prefix, dist, 'pre-unlink')\n link(prefix, dist, linktype=None)\n shutil.rmtree(info_dir)", "def getSetupPackages(self):\n e = eups.Eups()\n setupProducts = e.getSetupProducts()\n a = \"\"\n\n # create a new list will all products and versions\n allProducts = {}\n for i in setupProducts:\n allProducts[i.name] = i.version\n\n # replace any existing products that we saw on the command line, adding\n # them if they're not already there.\n if self.opts.setup is not None:\n for i, pkg in enumerate(self.opts.setup):\n name = pkg[0]\n version = pkg[1]\n print(\"name = %s, version = %s\" % (name, version))\n allProducts[name] = version\n\n # write out all products, except those that are setup locally.\n for name in allProducts:\n version = allProducts[name]\n if self.platform == \"lsst\":\n a = a + \"setup -j %s %s\\\\n\\\\\\n\" % (name, version)\n else:\n if not version.startswith(\"LOCAL:\"):\n a = a + \"setup -j %s %s\\\\n\\\\\\n\" % (name, version)\n return a", "def _create_initial_install_file():\n if not _are_components_installed():\n touch(INITIAL_INSTALL_FILE)", "def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)", "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def _upgrade(cls, contents):\n metadata = contents.get(\"metadata\", {})\n version = metadata.get(\"cache_version\")\n if version == \"1\":\n metadata.pop(\"cache_version\", None)\n return {\"metadata\": cls.METADATA,\n **{cls.LANG_PREFIX + \"coq\":\n {\"driver\": contents.pop(\"generator\"),\n \"metadata\": contents.pop(\"metadata\"),\n **contents}}}\n return contents", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def update_plugin_data(self, entry):", "def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError", "def test_component_update_get_packages(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)\\nInst b (new from)\\nRemv c (old PKG)\\nRemv d PKG'\n installed, upgraded, removed = self.u.component_update_get_packages()\n self.assertEqual([('b', 'new')], installed)\n self.assertEqual([('a', 'old', 'new')], upgraded)\n self.assertEqual([('c', 'old'), ('d', 'unknown')], removed)", "def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def deployFluidinfo(deploymentPath, revision):\n homePath = os.path.join('/home', env.user)\n revisionPath = os.path.join(deploymentPath, revision)\n sudo('mkdir -p %s' % revisionPath)\n put('fluidinfo.tar.bz2', homePath)\n filePath = os.path.join(homePath, 'fluidinfo.tar.bz2')\n sudo('cp %s %s' % (filePath, revisionPath))\n\n with cd(revisionPath):\n sudo('chown -R fluidinfo %s' % revisionPath)\n sudo('chown fluidinfo fluidinfo.tar.bz2')\n sudo('tar jxvf fluidinfo.tar.bz2', user='fluidinfo')\n sudo('mkdir -p var/log var/log/trace var/run var/tmp',\n user='fluidinfo')\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('virtualenv .', user='fluidinfo')\n sudo('./bin/pip install --use-mirrors '\n '--download-cache=/var/lib/fluidinfo/source-dependencies '\n '-r requirements.txt', user='fluidinfo')\n\n ## Copy configuration files\n\n serverName = os.path.basename(deploymentPath)\n templateData = {'deployment-path': deploymentPath,\n 'server-name': serverName}\n fileCopies = [\n ('fluidinfo/fluidinfo-api.conf.template', '../fluidinfo-api.conf'),\n ('cron/postgres-crontab.template', '../scripts/postgres-crontab'),\n ('cron/fluidinfo-crontab.template', '../scripts/fluidinfo-crontab'),\n\n ('cron/backup-postgresql.sh.template',\n '../scripts/backup-postgresql.sh'),\n\n ('cron/metrics.sh', '../scripts/metrics.sh'),\n ('cron/time-fluidinfo.py', '../scripts/time-fluidinfo.py'),\n ('cron/solr-optimize.sh', '../scripts/solr-optimize.sh')\n\n # TODO: Copy configuration files for nginx, haproxy, logrotate and\n # upstart, these require service restarts if files have changed.\n ]\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('mkdir ../scripts')\n\n for origin, destination in fileCopies:\n specificFilename = os.path.join('deployment', serverName, origin)\n defaultFilename = os.path.join('deployment', 'default', origin)\n origin = (specificFilename\n if os.path.exists(specificFilename)\n else defaultFilename)\n\n sudo('cp {origin} {destination}'.format(**locals()))\n\n for key, value in templateData.iteritems():\n value = value.replace('.', r'\\.').replace('/', '\\/')\n expression = r's/{{ %s }}/%s/g' % (key, value)\n sudo(\"sed -i -e '%s' %s\" % (expression, destination))\n\n sudo('chmod +x ../scripts/backup-postgresql.sh')\n sudo('crontab -u postgres ../scripts/postgres-crontab')\n sudo('crontab -u fluidinfo ../scripts/fluidinfo-crontab')", "def updateFluidDB(serverName):\n # Upload and set up the code.\n deploymentPath = os.path.join('/srv', serverName)\n revision = datetime.utcnow().strftime('%Y%m%d-%H%M')\n revisionPath = os.path.join(deploymentPath, revision)\n\n sudo('mkdir -p {0}'.format(revisionPath))\n\n local('git archive --prefix=fluidinfo/ -v --format tar HEAD | '\n 'bzip2 > fluidinfo.tar.bz2')\n put('fluidinfo.tar.bz2', revisionPath, use_sudo=True)\n\n with cd(revisionPath):\n sudo('mkdir -p var/log var/log/trace var/run var/tmp')\n sudo('chown -R fluidinfo {0}'.format(revisionPath))\n sudo('tar jxvf fluidinfo.tar.bz2', user='fluidinfo')\n\n with cd(os.path.join(revisionPath, 'fluidinfo')):\n sudo('virtualenv .', user='fluidinfo')\n sudo('mkdir -p /var/lib/fluidinfo/pip-cache', user='fluidinfo')\n sudo('./bin/pip install --use-mirrors '\n '--download-cache=/var/lib/fluidinfo/pip-cache '\n '--log /tmp/pip.log '\n '-r requirements.txt', user='fluidinfo')\n # On successful completion, clean up /tmp\n sudo('rm -f /tmp/pip.log')\n\n get(os.path.join(deploymentPath, 'current', 'fluidinfo-api.conf'),\n 'fluidinfo-api.conf')\n config = RawConfigParser()\n with open('fluidinfo-api.conf', 'r') as configFile:\n config.readfp(configFile)\n\n # Copy and set up configuration files.\n deployConfigFiles(\n {'deployment-path': deploymentPath,\n 'server-name': serverName,\n 'revision-path': revisionPath,\n 'solr-url': config.get('index', 'url'),\n 'solr-shards': config.get('index', 'shards'),\n 'postgres-uri': config.get('store', 'main-uri')},\n\n ('fluidinfo/fluidinfo-api.conf.template',\n '{revision-path}/fluidinfo-api.conf'))\n\n local('rm fluidinfo-api.conf')\n\n # TODO: check patches.\n # TODO: update version tag.\n\n with cd(deploymentPath):\n sudo('rm current')\n sudo('ln -fs {0} current'.format(revision))\n\n for port in range(9001, 9009):\n sudo('restart fluidinfo-api-node PORT=%d' % port)", "def install_by_version():\n respond_model = RespondModel()\n respond_model.data = get_all_user_info()\n return respond_model", "def repackage(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)", "def test_update_hyperflex_app_catalog(self):\n pass", "def downgrade():\n pass", "def downgrade():\n pass", "def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))", "def fix_leanpkg_bug():\n leanpkg = Path('leanpkg.toml')\n conf = leanpkg.read_text()\n m = LEAN_VERSION_RE.match(conf)\n if m:\n ver = m.group(1)\n leanpkg.write_text(conf.replace(ver, 'leanprover-community/lean:'+ver))", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def update_os_packages(self):\n self.summarize_operation(\"Updating OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get update -y\"))", "def pre_install_pkg(self, installable_pkg):\n pass", "def load_version_information() -> None:\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()", "def test_update_software_components_for_system_module(self):\n pass", "def restore_old_install(self):\n USER.info('%s: Restoring Old Install', self.recipe.name)\n shutil.move(self.back_dir, self.recipe.install_dir)\n pakit.conf.IDB[self.recipe.name] = self.old_entry\n walk_and_link(self.recipe.install_dir, self.recipe.link_dir)", "def do_upgrade(self, url):\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0", "def _common_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n sudo('''\n export DEBIAN_FRONTEND=noninteractive;\n apt-get update -qq -o Acquire::http::No-Cache=True;\n apt-get upgrade %s\n ''' % self.apt_opts)\n sudo('''\n export DEBIAN_FRONTEND=noninteractive;\n apt-get install %s %s\n ''' % (self.apt_opts,\n ' '.join(self.keyrings + self.general_tools)))\n sudo('mv /etc/localtime /etc/localtime.old')\n if sudo('test -e /usr/share/zoneinfo/UTC').succeeded:\n sudo('cp /usr/share/zoneinfo/UTC /etc/localtime')", "def make_module_extra(self):\n\n txt = super(EB_icc, self).make_module_extra()\n\n txt += \"prepend-path\\t%s\\t\\t%s\\n\" % (self.license_env_var, self.license_file)\n txt += \"prepend-path\\t%s\\t\\t$root/%s\\n\" % ('NLSPATH', 'idb/intel64/locale/%l_%t/%N')\n\n return txt", "def _store_package_metadata(self):", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def install_step(self):\n silent_cfg_names_map = None\n\n if LooseVersion(self.version) < LooseVersion('2013_sp1'):\n # since icc v2013_sp1, silent.cfg has been slightly changed to be 'more standard'\n\n silent_cfg_names_map = {\n 'activation_name': ACTIVATION_NAME_2012,\n 'license_file_name': LICENSE_FILE_NAME_2012,\n }\n\n super(EB_icc, self).install_step(silent_cfg_names_map=silent_cfg_names_map)", "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def prepare_info(self, infoxml_file):\n\n self.info = InfoXML(infoxml_file)\n\n if self.info.name:\n self.LOGGER << f\"Mod Name from info.xml: {self.info.name}\"\n\n self._install_dirname = self.info.name.lower()", "def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def _upgradeIdeviceToVersion1(self):\n log.debug(\"upgrading to version 1\")\n self._title = self.__dict__.get('title', self.title)\n self._author = self.__dict__.get('author', self.title)\n self._purpose = self.__dict__.get('purpose', self.title)\n self._tip = self.__dict__.get('tip', self.title)", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_patch_hyperflex_app_catalog(self):\n pass", "def update_package(self, *args):\r\n\r\n temp = (self.newProj.device[0],\\\r\n self.newProj.device[1],\\\r\n self.devPackage.get(),\\\r\n self.newProj.device[3],\\\r\n self.newProj.device[4])\r\n\r\n del self.newProj.device\r\n\r\n self.newProj.device = temp\r\n\r\n kT.debug_log(self.newProj.device)\r\n\r\n del temp\r\n\r\n return", "def system_info():\n requirements = get_requirements(\"sunpy\")\n groups = get_keys_list(requirements)\n extra_groups = get_extra_groups(groups, ['all', 'dev'])\n base_reqs = get_keys_list(requirements['required'])\n extra_reqs = get_keys_list(requirements['all'])\n missing_packages, installed_packages = find_dependencies(package=\"sunpy\", extras=extra_groups)\n extra_prop = {\"System\": platform.system(),\n \"Arch\": f\"{platform.architecture()[0]}, ({platform.processor()})\",\n \"Python\": platform.python_version(),\n \"sunpy\": version(\"sunpy\")}\n sys_prop = {**installed_packages, **missing_packages, **extra_prop}\n print(\"==============================\")\n print(\"sunpy Installation Information\")\n print(\"==============================\")\n print()\n print(\"General\")\n print(\"#######\")\n if sys_prop['System'] == \"Linux\":\n print(f\"OS: {distro.name()} ({distro.version()}, Linux {platform.release()})\")\n elif sys_prop['System'] == \"Darwin\":\n print(f\"OS: Mac OS {platform.mac_ver()[0]}\")\n elif sys_prop['System'] == \"Windows\":\n print(f\"OS: Windows {platform.release()} {platform.version()}\")\n else:\n print(\"Unknown OS\")\n for sys_info in ['Arch', 'sunpy']:\n print(f'{sys_info}: {sys_prop[sys_info]}')\n print(f'Installation path: {distribution(\"sunpy\")._path}')\n print()\n print(\"Required Dependencies\")\n print(\"#####################\")\n for req in base_reqs:\n print(f'{req}: {sys_prop[req]}')\n print()\n print(\"Optional Dependencies\")\n print(\"#####################\")\n for extra_req in extra_reqs:\n print(f'{extra_req}: {sys_prop[extra_req]}')", "def update_version_files (component):\n\n vprint (\"Updating version files for \" + component)\n\n retval = []\n\n ## Update component/VERSION.txt\n path = get_path(component, \"VERSION.txt\")\n with open (path, \"r+\") as version_file:\n new_version = re.sub (component + \" version .*\",\n \"%s version %s, released %s\" % (component,\n comp_versions[component + \"_version\"],\n release_date),\n version_file.read ())\n if opts.take_action:\n version_file.seek (0)\n version_file.truncate (0)\n version_file.write (new_version)\n else:\n print (\"New version file for \" + component)\n print (new_version)\n\n vprint (\"Updating Version.h for \" + component)\n\n retval.append(path)\n\n ## Update COMPONENT/component/Version.h\n comp_l = len(component + \"_\")\n parts = {k[comp_l:]:v for (k, v) in comp_versions.items() if k.startswith(component)}\n parts[\"comp\"] = component\n version_header = \"\"\"\n// -*- C++ -*-\n// This is file was automatically generated by $ACE_ROOT/bin/make_release.py\n\n#define {comp}_MAJOR_VERSION {major}\n#define {comp}_MINOR_VERSION {minor}\n#define {comp}_MICRO_VERSION {micro}\n#define {comp}_VERSION \\\"{version}\\\"\n#define {comp}_VERSION_CODE 0x{code:x}\n#define {comp}_MAKE_VERSION_CODE(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n\"\"\".format(**parts)\n\n path = get_path(component, component.lower (), \"Version.h\")\n if opts.take_action:\n with open (path, 'w+') as version_h:\n version_h.write (version_header)\n else:\n print (\"New Version.h for \" + component)\n print (version_header)\n\n retval.append(path)\n\n # Update component/PROBLEM-REPORT-FORM\n vprint (\"Updating PRF for \" + component)\n\n version_line_re = re.compile (r\"^\\s*(\\w+) +VERSION ?:\")\n path = get_path(component, \"PROBLEM-REPORT-FORM\")\n\n with open (path, 'r+') as prf:\n new_prf = \"\"\n for line in prf.readlines ():\n match = version_line_re.search (line)\n if match is not None:\n vprint (\"Found PRF Version for \" + match.group (1))\n new_version = comp_versions[match.group(1) + \"_version\"]\n line = version_re.sub (new_version, line)\n\n new_prf += line\n\n if opts.take_action:\n prf.seek (0)\n prf.truncate (0)\n prf.writelines (new_prf)\n else:\n print (\"New PRF for \" + component)\n print (\"\".join (new_prf))\n\n retval.append(path)\n\n return retval", "def _post_install(dir_):\n scapy_locations = get_scapy_locations(get_site_packages())\n for scapy_location in scapy_locations:\n scapy_config = os.path.join(scapy_location, \"config.py\")\n processing_layer_list = False\n for line in fileinput.input(scapy_config, inplace=1, backup=\".bak\"):\n if line.strip().startswith(\"load_layers\"):\n print(line, end=\"\")\n processing_layer_list = True\n else:\n if processing_layer_list and line.strip().endswith(\"]\"):\n # TODO, consider single quote strings, and consider lonely\n # ] characters\n last_quote = line.rfind(\"\\\"\")\n if last_quote > 0 and \"http2\" not in line:\n print(\"%s, \\\"http2\\\" ]\" % line[\n :last_quote + 1], end=\"\")\n processing_layer_list = False\n else:\n print(line)\n processing_layer_list = False\n else:\n print(line, end=\"\")", "def _replace(self, line):\n if \"@VERSION@\" in line:\n return line.replace(\"@VERSION@\", VERSION)\n\n if \"Source0:\" in line: # Dirty hack\n return \"Source0: %{pkgname}-%{version}.tar.gz\"\n\n return line", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def preprocess_raw_upgrade_list(self, raw_upgrade_list):\n # By default, upgrade all package that are not in sync, which is\n # not what you want to do for more evolved package management\n return raw_upgrade_list", "def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def init_installation():\n # Cache the current Content-Length.\n length = get_remote_source_length()\n Cache.set_installation_length(length)", "def main():\n\n # a quick way to verify the version\n if getscriptversion:\n print('This script is running version: ' + scriptversion)\n exit(0)\n\n # verify that environmental and script requirements are met\n requirements()\n\n # pretty the screen up\n clear()\n # do the MD5 checksum\n checkmd5sum()\n if not user or not password:\n getcreds()\n\n # if device_file is provided parse the lines into a list of devices\n if device_file:\n with open(device_file) as line:\n devices = line.readlines()\n devices = [x.strip() for x in devices]\n else:\n devices = args.devices.split(\",\")\n\n for device in devices:\n\n device = Acos(device)\n print('')\n print('')\n print(dev_addr + ' ' + '{:*^100}'.format('Begin upgrade log for ' + dev_addr))\n print(dev_addr + ' ' + '{:*^100}'.format('Performing pre-upgrade checks'))\n\n # check if the device is online before running\n status = device.checkstatus()\n if status == 'FAIL':\n continue\n\n # authenticate to the device\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n # get the device hostname\n device.get_hostname()\n\n # get the currently running version\n version = device.get_running_ver()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing upgrade'))\n\n # if we are running 4.1.0 we have to use a different upgrade method\n if '4.1.0' in version:\n response = device.gui_upgrade(user, password)\n if response == 'FAIL':\n continue\n # for other versions just use the normal method\n else:\n response = device.upgrade()\n if response == 'FAIL':\n continue\n bootvar = device.get_bootvar()\n\n # if the user has specified they'd like to update the boot variable\n if updatebootvar:\n # why do work that we don't have to\n if partition in bootvar:\n print(dev_addr + ' Bootvar update requested, but not necessary, device already set to boot from ' + partition)\n # if you're not already set to boot from the partition we installed to, update the bootvar\n else:\n device.update_bootvar()\n # if the user wants to reboot to initialize the new code reboot the box\n if reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n if not reboot:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + 'You have requested the device not reboot, in order to initialize the new code you will need to reboot the device')\n # if you install to a partition the device won't reboot to, we probably want to stop you from shooting yourself in the foot\n elif not partition in bootvar:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + ' You have chosen to install to the partition that the device does not currently boot from.')\n print(dev_addr + ' If you wish for the device to run the new code upon reboot you need to update the boot variable manually.')\n if reboot:\n print(dev_addr + ' You have also requested a reboot which will not invoke the new code, SKIPPING REBOOT')\n elif reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n # technically we could still use the old AXAPI token, however for sake of code clarity we're going to do a quick log off then back on\n # the alternative would be having to shove the remaining steps below into each of the appropriate loops making this a bit more\n # spaghettish than it already is\n else:\n device.axapi_logoff()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing post-upgrade checks'))\n\n # since it is very likely the box has rebooted, and our old token is gone, lets get a new one\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n\n # find out where the device was booted from\n bootdefault = device.get_bootvar()\n\n # get the version of the currently booted partition\n device.get_ver(bootdefault)\n\n # get the current boot variable\n device.get_bootvar()\n\n # get the current running version\n device.get_running_ver()\n\n # log off\n device.axapi_logoff()\n print(dev_addr + ' ' + '{:*^100}'.format(' End upgrade log for ' + dev_addr))", "def updateMdrizVerHistory(self,build,versions):\n _plist = self.assoc.parlist[0]\n if build == True: _output = _plist['output']\n else: _output = _plist['outdata']\n \n fhdu = pyfits.open(_output,mode='update')\n prihdr = fhdu[0].header\n \n ver_str = \"MultiDrizzle product generated using: \"\n prihdr.add_history(ver_str)\n \n for key in versions:\n if versions[key].find('\\n') < 0:\n prihdr.add_history(key+versions[key])\n else:\n # This will accomodate multi-line comments\n _ver_str = versions[key].split('\\n')\n prihdr.add_history(key)\n for val in _ver_str:\n if val.strip() != '':\n prihdr.add_history(val)\n \n #ver_str = ' MultiDrizzle Version '+str(version)\n #prihdr.add_history(ver_str)\n \n fhdu.close()\n del fhdu", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def extract_if_needed(self):\n\n try:\n self.get_POW().getVersion()\n except rpki.POW.NotVerifiedError:\n self.extract()" ]
[ "0.68529916", "0.68529916", "0.6102295", "0.6096292", "0.60911036", "0.60778755", "0.60303754", "0.59639597", "0.59076554", "0.59010935", "0.58665204", "0.5826871", "0.57834613", "0.57735384", "0.5736566", "0.572258", "0.5700653", "0.56856245", "0.568505", "0.56655", "0.56608546", "0.56608546", "0.56482136", "0.5645946", "0.5629448", "0.56143165", "0.56111974", "0.55964214", "0.55947614", "0.556584", "0.55623996", "0.555246", "0.55050874", "0.54887503", "0.5482243", "0.5477241", "0.5460227", "0.5432081", "0.54280955", "0.5426799", "0.54226476", "0.5405278", "0.5400313", "0.53901947", "0.5381462", "0.53791904", "0.53716475", "0.5362167", "0.5351512", "0.5329752", "0.5328836", "0.5320603", "0.53139204", "0.53129023", "0.5310147", "0.53014576", "0.5300792", "0.52987856", "0.52794105", "0.5253187", "0.52500296", "0.52477545", "0.52445406", "0.5235611", "0.5235611", "0.5224993", "0.52149326", "0.5214437", "0.52124757", "0.52084816", "0.5207931", "0.5207431", "0.51992744", "0.5199147", "0.51853275", "0.5183806", "0.518113", "0.5174682", "0.51647353", "0.5163651", "0.51616883", "0.5157554", "0.5155101", "0.51519716", "0.51497704", "0.51466405", "0.5144441", "0.5141632", "0.5137882", "0.5135383", "0.5132966", "0.5132834", "0.5129752", "0.51270694", "0.5119108", "0.51142836", "0.5112723", "0.51105624", "0.51099765", "0.5105939" ]
0.5961917
8
kill all arangosync instances we posses
def kill_sync_processes(self, force, rev): for i in self.all_instances: if i.is_sync_instance(): if not force and i.pid_file is not None and rev >= semver.VersionInfo.parse("0.15.0"): print("Skipping manual kill") return logging.info("manually killing syncer: " + str(i.pid)) i.terminate_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_all():\n compose_kill_all()", "def kill_all(self):\n self._stop_all('kill')", "def killAll(controller=False):", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)", "def rdp_kill_all():\n logger.info(\"Kill all RDP sessions\")\n os.system(\"taskkill /FI \\\"IMAGENAME eq mstsc.exe\\\" /F\")", "def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )", "def kill_manager(self) -> None:\n\n for p in self.process_list:\n p.terminate()\n # NOTE: Seems Python does not appreciate if close is called too quickly.\n sleep(0.5)\n # Release the resources held by the Proess (Python 3.7 and up)\n p.close()", "def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass", "def kill(self):\n \n self.killSlavePids()", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def kill_all(self) -> None:\n for i in self.ist:\n i.stop_stream()\n i.close()\n for o in self.ost:\n o.stop_stream()\n o.close()", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def remote_kill():", "def terminateAll(self):\n with self.__queueLock:\n for queue in [self.__queue, self.__clientQueue]:\n queue.clear()\n\n for runList in [self.__running, self.__clientRunning]:\n unfinishedRuns = [run for run in runList if run is not None]\n for run in unfinishedRuns:\n run.kill()", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def restartAll(self):\n for name in self.processes:\n self.stopProcess(name)", "def stopdaemons(self):\n # TODO: we may want to improve this if we had the PIDs from the\n # specific EMANE daemons that we\"ve started\n cmd = [\"killall\", \"-q\", \"emane\"]\n stop_emane_on_host = False\n if emane.VERSION > emane.EMANE091:\n for node in self.getnodes():\n if hasattr(node, \"transport_type\") and \\\n node.transport_type == \"raw\":\n stop_emane_on_host = True\n continue\n if node.up:\n node.cmd(cmd, wait=False)\n # TODO: RJ45 node\n else:\n stop_emane_on_host = True\n if stop_emane_on_host:\n subprocess.call(cmd)\n subprocess.call([\"killall\", \"-q\", \"emanetransportd\"])", "def kill_processes(self):\n for proc in self.processes:\n if proc['proc'].poll() is not None:\n proc['proc'].terminate()", "def clean_all(self):\n for p in ['process_manager.py', 'mongo']:\n cmd = (\"ps aux | grep %s | grep -v grep | awk '{ print $2 }'\"\n \" | xargs kill -s 9\") % p\n self._ssh(cmd, use_pwd=False)", "def closeall(self, settings):\n\n for camera in self.camlist:\n camera.cam.stop()\n for light in self.lights:\n light.turn_off()\n if settings.sensors or settings.lights:\n GPIO.cleanup()\n if self.health.stall_p:\n self.health.stall_p.terminate()\n self.health.stall_p.join()\n if settings.send_threading:\n self.send_q.stop_sending()\n self.sender.zmq_socket.setsockopt(zmq.LINGER, 0) # prevents ZMQ hang on exit\n self.sender.close()", "def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()", "def _terminateAll(self):\n\n # Termination of all processes\n try :\n for process in self.processes:\n process.terminate()\n except AttributeError:\n pass\n\n return", "def stopEngines():\n pass", "def killall(connections):\n for connection in connections:\n try: connection.close()\n except: pass", "def kill_specific_instance(self, which_instances):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n instance.terminate_instance()", "def kill(self):\n processes = ['MicrosoftEdge.exe', 'MicrosoftEdgeCP.exe', 'plugin-container.exe',\n 'browser_broker.exe', 'smartscreen.exe']\n for exe in processes:\n subprocess.call(['taskkill', '/F', '/T', '/IM', exe])", "def stop(self):\n for process in self.process:\n process.stop()", "def terminate_all(self):\n self._stop_all('terminate')", "def terminate_all_processes(processes):\n for process in processes:\n process.terminate()", "def cleanup_manager(self) -> None:\n \n for p in self.process_list:\n if p.is_alive():\n p.terminate()\n sleep(1)\n p.close()", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def _kill_running_processes(self):\n # Kill any rouge processes that are still running.\n with _thread_lock:\n killed = []\n for pid in self._pids:\n try:\n os.kill(pid, _KILLED_BY_ANYPYTOOLS)\n killed.append(str(pid))\n except:\n pass\n self._pids.clear()", "def kill(self):\n kill_cmds = [\n \"sudo pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n self.log.info(\"Killing any server processes\")\n pcmd(self._hosts, \"; \".join(kill_cmds), False, None, None)", "def stop(self):\n for module in self.asynchronous:\n module.stop()", "def kill(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].stop()", "def stop_all_nodes(self):\n for node in self.nodes:\n if node.running():\n node.stop()", "def stopSpawing(self):\n self.girderManager.stopSpawing()", "def cleanup(self):\n process_set = [self.hal_process,\n self.manager_process, self.ptp_driver_process, self.fake_driver_process,\n self.res_driver_process,\n self.monitor_driver_process, self.ssd_driver_process]\n process_set.extend(self.agent_obj)\n\n for process_info in process_set:\n if None is not process_info:\n process = process_info['process']\n if None is not process \\\n and self.check_process_status(process) == self.PROCESSSTATE_ALIVE:\n process.terminate()", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def mux_stopall(): \r\n # Map this close to all existing multiplexers\r\n for (key, mux) in MULTIPLEXER_OBJECTS.items():\r\n mux.close()\r\n del MULTIPLEXER_OBJECTS[key]\r\n \r\n # Stop all underlying waitforconns\r\n for key in MULTIPLEXER_WAIT_HANDLES.keys():\r\n # Map stopcomm to each key\r\n mux_stopcomm(key)\r\n \r\n # Remove all the wait functions\r\n for key in MULTIPLEXER_WAIT_FUNCTIONS.keys():\r\n mux_virtual_stopcomm(key)", "def killconnections(self):\n for conn in self._connections:\n try:conn.close()\n except:pass\n self._connections=[]", "def stop_all_agents(self):\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()", "def kill_all(self, procname):\n procs = self.find_processes_by_name(procname)\n for proc in procs:\n result = self.kill_process(proc['PID'])\n if not result['HasExited']:\n for i in xrange(3):\n result = self.kill_process(result['PID'], False)\n if result['HasExited']:\n break\n else:\n raise MicroManagerError(\"Process with name'{}' and PID '{}' would not exit on machine '{}'.\".format(procname, proc['PID'], self.hostname))", "def _clean_actors(self):\n for actor in self.actor_list:\n actor.destroy()", "def stop_all():\n global exporter_objects\n logging.info(\"Starting shutdown of all threads.\")\n for _task in autorx.task_list.keys():\n try:\n autorx.task_list[_task]['task'].stop()\n except Exception as e:\n logging.error(\"Error stopping task - %s\" % str(e))\n\n for _exporter in exporter_objects:\n try:\n _exporter.close()\n except Exception as e:\n logging.error(\"Error stopping exporter - %s\" % str(e))", "def clean_up_threads(*args):\n\n for thread in app.socket_threads:\n if thread:\n thread.kill()\n # after killing the threads, remove them from the list\n app.socket_threads.clear()\n sys.exit()", "def clean_all_instances(self):\n if not self.is_job:\n return\n\n for job_instance in self.instances:\n job_instance.clean()\n self.status = 'CANCELED'", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def KillAllAnts(cls):\n cls.antArray.clear()", "def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()", "def disconnectAllServers():\n _disconnectAllServers()", "def kill_all():\n base = MongoTestServer.get_base_dir()\n\n print \"======================================\"\n print \"Cleaning up previous sessions under \" + base\n print \"======================================\"\n\n for mongo in os.listdir(base):\n if mongo.startswith('tmp'):\n mongo = os.path.join(base, mongo)\n print \"Previous session: \" + mongo\n lock = os.path.join(mongo, 'mongod.lock')\n if os.path.exists(lock):\n print \"Lock file found: \" + lock\n p = subprocess.Popen([\"lsof\", \"-Fp\", \"--\", lock],\n stdout=subprocess.PIPE)\n (out, _) = p.communicate()\n if out:\n pid = out[1:].strip()\n print \"Owned by pid: \" + pid + \" killing...\"\n p = subprocess.Popen([\"kill -9 %s\" % pid], shell=True)\n p.communicate()\n print \"Removing: \" + mongo\n shutil.rmtree(mongo, True)", "def shutdown_system():\n yield None\n active = active_children()\n for child in active:\n child.kill()", "def _stop(self, arbiter):\n self.transport_manager.stop()\n for execution_manager in self.execution_managers:\n execution_manager.stop()", "def cleanup(self):\n log = logging.getLogger('mailman.runner')\n # Send SIGTERMs to all the child processes and wait for them all to\n # exit.\n for pid in self._kids:\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError as error:\n if error.errno == errno.ESRCH:\n # The child has already exited.\n log.info('ESRCH on pid: %d', pid)\n # Wait for all the children to go away.\n while self._kids:\n try:\n pid, status = os.wait()\n self._kids.drop(pid)\n except OSError as error:\n if error.errno == errno.ECHILD:\n break\n elif error.errno == errno.EINTR:\n continue\n raise", "def kill(self):\n\t\tself.kill_subcomponents()\n\t\tself._subcomponents.clear()\n\t\tself.bug_world = None\n\n\t\ttry:\n\t\t\tself.ci.deregister_all()\n\t\texcept:\n\t\t\tpass", "def killJobs(self):\n self.worker_pool.close()\n self.status_pool.close()\n self.failure = True\n for job in self.active:\n try:\n job.killJob()\n except AttributeError:\n raise SchedulerError('killJob method is not defined')\n except: # Job already terminated\n pass\n self.job_queue_count = 0", "def shutdown_all(self, now=False):", "def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return", "def multikill(jobs):\n while True:\n container = jobs.get()\n print 'Stopping: {}'.format(container.name)\n container.kill(9)\n jobs.task_done()", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def DelGPActiveTunnels(asg_name):\n\n logger.info('Deleting GP Active Tunnels CloudWatch alarm for ASG: ' + asg_name)\n alarmname= asg_name + '-cw-gpat'\n common_alarm_func_del(alarmname)\n return", "def stopAllMotors():\n return RoboCaller().call(\"stopAllMotors\", \"void\")", "def cancel_all_instances(self):\n if not self.is_job:\n return\n\n for job_instance in self.instances:\n job_instance.cancel()\n self.status = 'CANCELED'", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def kill(ctx, analytic_host, analytic_port):\n client = aceclient.ConfigClient(host=analytic_host, port=analytic_port)\n client.kill()", "def killall(cleanup=lambda:None, wait_s=16):\n # TODO(infinity0): log this somewhere, maybe\n global _isTerminating, _CHILD_PROCS\n if _isTerminating: return\n _isTerminating = True\n # terminate all\n for proc in _CHILD_PROCS:\n if proc.poll() is None:\n proc.terminate()\n # wait and make sure they're dead\n for i in range(wait_s):\n _CHILD_PROCS = [proc for proc in _CHILD_PROCS\n if proc.poll() is None]\n if not _CHILD_PROCS: break\n time.sleep(1)\n # if still existing, kill them\n for proc in _CHILD_PROCS:\n if proc.poll() is None:\n proc.kill()\n time.sleep(0.5)\n # reap any zombies\n for proc in _CHILD_PROCS:\n proc.poll()\n cleanup()", "def stop(self):\r\n for srv in self._servers:\r\n srv.stop()", "def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()", "def cleanup():\n dist.destroy_process_group()", "def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')", "def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()", "def cleanup():\n broadcast_proc.terminate()\n subprocess.call('sudo hciconfig hci0 noleadv', shell=True)\n if CELL:\n ser_command('Cell off', cell_ser)\n cell_ser.close()\n grovepi.digitalWrite(LED, 0)", "def terminate(self) -> None:\n self.robot.terminate_all()", "def killall(logger=None, everywhere=False):\r\n if not NailgunExecutor.killall:\r\n return False\r\n else:\r\n return NailgunExecutor.killall(logger=logger, everywhere=everywhere)", "def end(self):\n for bot in self.bots:\n bot.stop()\n for sock in self.socks:\n sock.stop()\n self.replyer.stop()\n self.stop()", "def removeMySims(self):\n for sim in self.sims:\n try:\n sim.destroy()\n except:\n sim.removeNode()", "def on_StopNode_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n print(\"We will kill all gman process!\")\n reply = QMessageBox.question(self, '确认', '确认kill所有gman任务吗', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n autokillGman()\n self.OnlyDisplay(\"kill -9 |grep gman\")\n else:\n print(\"Keep GMAN run.......!\")", "def kill_processes(self) -> None:\n for process in [p for p in self.processes if p.is_running()]:\n for child in process.children(recursive=True):\n if child.is_running():\n child.kill()\n\n process.kill()", "def clear_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n for i in instance_list:\n print i\n config.delete_instance(i)\n print \"instance {0} deleted\".format(i.id)\n else:\n print \"No instance found\"", "def disconnect(self):\r\n # TODO: Should destroy all instances which have been started dynamically\r\n raise NotImplementedError", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def kill_running_es_instances(node_prefix):\n def elasticsearch_process(p):\n return p.name() == \"java\" and any(\"elasticsearch\" in e for e in p.cmdline()) and any(\"node.name=rally\" in e for e in p.cmdline())\n\n logger.info(\"Killing all processes which match [java], [elasticsearch] and [%s]\" % node_prefix)\n kill_all(elasticsearch_process)", "def kill(targets, controller=False):", "def killAllMongoProc(host, ports):\n for port in ports.values():\n killMongoProc(host, port)", "def kill_all(name, sig=signal.SIGKILL):\n sig = int(sig)\n for proc in psutil.process_iter():\n if proc.name() == name:\n kill(proc.pid, sig)", "def cleanup_notifiers(notifiers):\n for notifier in notifiers.values():\n notifier.stop()", "def shutdown(self) -> None:\n for worker in self.remote_workers:\n worker.shutdown.remote()\n worker.__ray_terminate__.remote()", "def killtimers(self):\n for timer in self._timers: timer.cancel()\n self._timers = []", "def stop(self):\n # remove all tap interfaces\n for i in range(self._vport_id):\n tapx = 'tap' + str(i)\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)\n self._vport_id = 0\n\n # remove datapath before vswitch shutdown\n dpctl = DPCtl()\n dpctl.del_dp()\n\n super(OvsVanilla, self).stop()\n\n # give vswitch time to terminate before modules are removed\n time.sleep(5)\n self._module_manager.remove_modules()", "def __del__(self):\n self._proc.kill()", "def hard_stop_drivers(self, drivers_to_stop: Set[str]):\n for process in find_processes():\n if process.comm in drivers_to_stop:\n process.kill()", "def kill(self, id):", "def kill(self, id):", "def cleanup():\n for th in THREAD_REGISTER.values():\n th.exit()\n th.join(timeout=3)", "def stop_scripts():\n print \"*** WARNING ***: This is about to kill all python processes\"\n run(\"killall python\")", "def clean_up(self):\n dist.destroy_process_group()" ]
[ "0.7347588", "0.71552086", "0.7108252", "0.7059267", "0.6879104", "0.6862512", "0.6845278", "0.6730945", "0.66946965", "0.6599361", "0.65697455", "0.6569324", "0.6562354", "0.65175766", "0.64793795", "0.644303", "0.644055", "0.64267015", "0.6424852", "0.6371221", "0.6370731", "0.6346451", "0.62971336", "0.6270442", "0.6266686", "0.6265793", "0.62586194", "0.62578285", "0.6251097", "0.6250128", "0.6231643", "0.62301993", "0.62234473", "0.6207526", "0.62008363", "0.6195103", "0.6191337", "0.61886144", "0.61766213", "0.6163408", "0.6152746", "0.6137409", "0.61356676", "0.613564", "0.6133202", "0.6128956", "0.6109462", "0.6103502", "0.6100968", "0.6097353", "0.606584", "0.60640204", "0.60639924", "0.6043127", "0.6041695", "0.603432", "0.6029518", "0.602392", "0.60182613", "0.6007443", "0.60012984", "0.5994022", "0.59877986", "0.5985628", "0.5982444", "0.5966586", "0.5962198", "0.59617436", "0.5960884", "0.59560615", "0.59404063", "0.5928427", "0.5914679", "0.5902345", "0.58968246", "0.5894077", "0.58888274", "0.5881822", "0.5871139", "0.58654976", "0.5864916", "0.58624864", "0.58588606", "0.5858156", "0.5851454", "0.58382946", "0.5836629", "0.58306205", "0.5819095", "0.58126277", "0.5790655", "0.5788292", "0.5787218", "0.5779202", "0.5777117", "0.57705605", "0.57705605", "0.5768126", "0.5762257", "0.57590324" ]
0.6927068
4
we use a starter, to tell daemon starters to perform the rolling upgrade
def command_upgrade(self): args = [ self.cfg.bin_dir / "arangodb", "upgrade", "--starter.endpoint", self.get_http_protocol() + "://127.0.0.1:" + str(self.get_my_port()), ] logging.info("StarterManager: Commanding upgrade:") lh.log_cmd(" ".join([str(arg) for arg in args])) self.upgradeprocess = psutil.Popen( args, # stdout=subprocess.PIPE, # stdin=subprocess.PIPE, # stderr=subprocess.PIPE, universal_newlines=True, ) print("Upgrade commander has PID:" + str(self.upgradeprocess.pid))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def upgrade(self):", "def upgrade(self):", "def main():\n updater = VersionUpdater('PowerDNS-Admin')\n updater.run()", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def null_upgrade_step(setup_tool):\n pass", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def test_relaunch_deployment_run(self):\n pass", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def __init__(self, series, openstack=None, source=None, stable=False):\n super(SwiftProxyBasicDeployment, self).__init__(series, openstack,\n source, stable)\n self._add_services()\n self._add_relations()\n self._configure_services()\n self._deploy()\n\n u.log.info('Waiting on extended status checks...')\n exclude_services = []\n self._auto_wait_for_status(exclude_services=exclude_services)\n\n self.d.sentry.wait()\n self._initialize_tests()", "def auto_upgrade_v1(cfg):\n v1 = V1Status(cfg)\n if v1.installed:\n # On first auto-upgrade pickley (ran in background by wrapper)\n setup_audit_log(cfg)\n inform(\"Auto-upgrading %s packages with pickley v2\" % len(v1.installed))\n for prev in v1.installed:\n pspec = PackageSpec(cfg, prev.name)\n try:\n manifest = perform_install(pspec, is_upgrade=False, quiet=False)\n if manifest and manifest.entrypoints and prev.entrypoints:\n for old_ep in prev.entrypoints:\n if old_ep not in manifest.entrypoints:\n runez.delete(os.path.join(cfg.base.path, old_ep))\n\n except BaseException:\n inform(\"%s could not be upgraded, please reinstall it\" % runez.red(prev.name))\n if prev.entrypoints:\n for old_ep in prev.entrypoints:\n runez.delete(os.path.join(cfg.base.path, old_ep))\n\n inform(\"----\")\n\n v1.clean_old_files()\n inform(\"Done\")", "def run():\r\n autostartup()", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def default_upgrade_charm():\n reactive.set_state('upgraded')", "def auto_upgrade(force, package):\n if not package or package == PICKLEY: # pragma: no cover, exercised via test_bootstrap() functional test\n manifest = bootstrap()\n if not package:\n if not manifest:\n inform(\"Pickley is already bootstrapped\")\n\n sys.exit(0) # When called without 'package' specified: intent was to bootstrap only\n\n # We were called by auto-upgrade wrapper (in the background)\n auto_upgrade_v1(CFG)\n if manifest:\n sys.exit(0) # Bootstrap already got us up-to-date\n\n pspec = PackageSpec(CFG, package)\n ping = pspec.ping_path\n if not force and runez.file.is_younger(ping, CFG.version_check_delay(pspec) * 60):\n LOG.debug(\"Skipping auto-upgrade, checked recently\")\n sys.exit(0)\n\n runez.touch(ping)\n if runez.file.is_younger(pspec.lock_path, CFG.install_timeout(pspec) * 60):\n LOG.debug(\"Lock file present, another installation is in progress\")\n sys.exit(0)\n\n perform_install(pspec, is_upgrade=True, force=False, quiet=True)", "def test_upgrade_apply_all_fine(setup, platform, skuba):\n\n setup_kubernetes_version(skuba)\n\n # node upgrade apply\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\n \"Node my-master-0 is up to date\"\n ) != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\n \"Node my-worker-0 is up to date\"\n ) != -1", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def test_upgrade_apply_user_lock(setup, platform, kubectl, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n # lock kured\n kubectl.run_kubectl(\"-n kube-system annotate ds kured weave.works/kured-node-lock='{\\\"nodeID\\\":\\\"manual\\\"}'\")\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n # disable skuba-update.timer\n platform.ssh_run(r, n, \"sudo systemctl disable --now skuba-update.timer\")\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n assert platform.ssh_run(r, n, \"sudo systemctl is-enabled skuba-update.timer || :\").find(\"disabled\") != -1\n\n assert kubectl.run_kubectl(\"-n kube-system get ds/kured -o jsonpath='{.metadata.annotations.weave\\.works/kured-node-lock}'\").find(\"manual\") != -1\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def pre_upgrade(self, upgrade_specs):\n pass", "def main():\n spark_it_up()", "def __init__(self, series, openstack=None, source=None, stable=False):\n super(ManilaPluginCharmDeployment, self).__init__(\n series, openstack, source, stable)\n self._add_services()\n self._add_relations()\n self._configure_services()\n self._deploy()\n\n u.log.info('Waiting on extended status checks...')\n exclude_services = ['mysql', ]\n self._auto_wait_for_status(exclude_services=exclude_services)\n\n self._initialize_tests()", "def daemonize(package, bin_loc, user):\n\n path = \"/etc/cron.hourly/centinel-\" + user\n\n if user != \"root\":\n # create a script to run centinel every hour as the current user\n hourly = \"\".join([\"#!/bin/bash\\n\",\n \"# cron job for centinel\\n\",\n \"su \", user, \" -c '\", bin_loc, \" --sync'\\n\",\n \"su \", user, \" -c '\", bin_loc, \"'\\n\",\n \"su \", user, \" -c '\", bin_loc, \" --sync'\\n\"])\n else:\n # create a script to run centinel every hour as root\n hourly = \"\".join([\"#!/bin/bash\\n\",\n \"# cron job for centinel\\n\",\n bin_loc, \" --sync\\n\",\n bin_loc, \"\\n\",\n bin_loc, \" --sync\\n\"])\n\n create_script_for_location(hourly, path)\n\n # create a script to get the client to autoupdate every day\n if package is None:\n return\n updater = \"\".join([\"#!/bin/bash\\n\",\n \"# autoupdater for centinel\\n\"\n \"sudo pip install --upgrade \", package, \"\\n\"])\n create_script_for_location(updater, \"/etc/cron.daily/centinel-autoupdate\")\n print \"Successfully created cron jobs for user \" + user", "def run_starter(self, expect_to_fail=False):", "def start():\n if env.latest:\n if env.python3:\n sudo('/bin/systemctl start demo-latest-py3', shell=False)\n else:\n sudo('/bin/systemctl start demo-latest.service', shell=False)\n else:\n with cd(env.directory):\n sudo('./bin/supervisorctl start zeoserver', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient1', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient2', user=env.deploy_user)", "def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)", "def test_retest_deployment_run(self):\n pass", "def main():\n update_delay = getenv('UPDATE_DELAY', '300')\n notification_url = getenv('NOTIFICATION_URL', '')\n\n try:\n client = docker.from_env()\n except ConnectionError:\n logger.error('Could not connect to Docker Engine. Check https://git.io/fjvRd for possible solutions')\n return\n\n logger.info('Started checking for updates')\n apprise = Apprise()\n if len(notification_url) > 0:\n # Add notification provider from URL if provided\n apprise.add(notification_url)\n\n if not is_swarm_manager(client):\n raise Exception('Docker Engine is not in Swarm Mode')\n while True:\n update_services(client, apprise)\n time.sleep(float(update_delay))", "def quickstart(*args, **kwargs):\n\n setup(*args, **kwargs)\n update_site(*args, **kwargs)\n restart_site(*args, **kwargs)", "def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def main(config):\n current_config = DKRConfig()\n\n for key, value in config.items():\n\n if key in current_config.config:\n for version in value['versions']:\n current_config.add_entrypoint_version(key, version)\n continue\n\n current_config.add_entrypoint(key, value['versions'])\n\n current_config.write(create=True)", "def this_needs_work_test_hook_upgrade(self):\n self.do_test_hook_install(testee.upgrade_setup, True)", "def first_deployment_mode():\n env.initial_deploy = True", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def pre_installation(self):\n pass", "def test_update_deployment(self):\n pass", "def enable_rolling(releaser):\n releases = list(releaser.get_releases().values())\n if releases:\n commit = releases[-1][\"commit\"]\n releaser.set_app_to_release(commit)\n releaser.enable_rolling()\n click.echo(\"Enabled rolling\")", "def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()", "def deploy():", "def start():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --daemon'])", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def update():\n with cd(env.directory):\n\n # update plone\n result = sudo('git pull', user=env.deploy_user)\n quick_update = 'Already up-to-date.' in result\n\n if quick_update:\n # Plonesite Recipe replaces site on the fly\n print 'UPDATE: No full Buildout required: {0:s}'.format(result)\n # buildout\n stop()\n sudo('./bin/buildout install plonesite', user=env.deploy_user)\n start()\n\n else:\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n\n sudo('rm -rf ./var/blobstorage', user=env.deploy_user)\n sudo('rm -rf ./var/filestorage', user=env.deploy_user)\n sudo('rm .installed.cfg', user=env.deploy_user)\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo('./bin/zeoclient_debug adduser admin admin', user=env.deploy_user) # noqa: E501\n\n # load page twice to fill cache and prevent a bug showing raw html\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501", "def startUp(self):\n pass", "def pre_start_migrate_cores(self, env):\n import params\n env.set_params(params)\n\n if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:\n backup_solr_cores=\"/tmp/solr/cores\"\n solr_home_dir=params.solr_data_dir\n\n Directory(format(solr_home_dir),\n owner=params.solr_user,\n create_parents=True,\n group=params.user_group\n )\n\n if os.path.isdir(solr_home_dir) and os.path.isdir(backup_solr_cores):\n Execute(('cp', '-rn', backup_solr_cores+\"/.\", solr_home_dir),\n user=params.solr_user,\n logoutput=True\n )", "def upgrade_cmd(jail, release):\n lgr = ioc_logger.Logger('ioc_cli_upgrade')\n lgr = lgr.getLogger()\n\n jails, paths = IOCList(\"uuid\").list_datasets()\n _jail = {tag: uuid for (tag, uuid) in jails.items() if\n uuid.startswith(jail) or tag == jail}\n\n if len(_jail) == 1:\n tag, uuid = next(iter(_jail.items()))\n path = paths[tag]\n root_path = \"{}/root\".format(path)\n elif len(_jail) > 1:\n lgr.error(\"Multiple jails found for\"\n \" {}:\".format(jail))\n for t, u in sorted(_jail.items()):\n lgr.critical(\" {} ({})\".format(u, t))\n exit(1)\n else:\n lgr.critical(\"{} not found!\".format(jail))\n exit(1)\n\n pool = IOCJson().json_get_value(\"pool\")\n iocroot = IOCJson(pool).json_get_value(\"iocroot\")\n freebsd_version = checkoutput([\"freebsd-version\"])\n status, jid = IOCList.list_get_jid(uuid)\n conf = IOCJson(path).json_load()\n host_release = os.uname()[2]\n jail_release = conf[\"release\"]\n started = False\n\n if conf[\"release\"] == \"EMPTY\":\n lgr.critical(\"Upgrading is not supported for empty jails.\")\n exit(1)\n\n if conf[\"type\"] == \"jail\":\n if not status:\n IOCStart(uuid, tag, path, conf, silent=True)\n status, jid = IOCList.list_get_jid(uuid)\n started = True\n elif conf[\"type\"] == \"basejail\":\n lgr.critical(\"Please run \\\"iocage migrate\\\" before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n elif conf[\"type\"] == \"template\":\n lgr.critical(\"Please convert back to a jail before trying\"\n \" to upgrade {} ({})\".format(uuid, tag))\n exit(1)\n else:\n lgr.critical(\"{} is not a supported jail type.\".format(conf[\"type\"]))\n exit(1)\n\n _freebsd_version = \"{}/releases/{}/root/bin/freebsd-version\".format(\n iocroot, release)\n\n if \"HBSD\" in freebsd_version:\n Popen([\"hbsd-upgrade\", \"-j\", jid]).communicate()\n else:\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(root_path)):\n # 10.3-RELEASE and under lack this flag\n if float(host_release.partition(\"-\")[0][:5]) <= 10.3:\n lgr.critical(\"Host: {} is too old, please upgrade to \"\n \"10.3-RELEASE or above\".format(host_release))\n exit(1)\n\n os.environ[\"PAGER\"] = \"/bin/cat\"\n fetch = Popen([\"freebsd-update\", \"-b\", root_path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(root_path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(root_path),\n \"--currently-running {}\".format(jail_release), \"-r\",\n release, \"upgrade\"], stdin=PIPE)\n fetch.communicate(b\"y\")\n\n while not __upgrade_install__(root_path, release):\n pass\n\n if release[:4].endswith(\"-\"):\n # 9.3-RELEASE and under don't actually have this binary.\n new_release = release\n else:\n with open(_freebsd_version, \"r\") as r:\n for line in r:\n if line.startswith(\"USERLAND_VERSION\"):\n new_release = line.rstrip().partition(\"=\")[\n 2].strip(\n '\"')\n\n IOCJson(path, silent=True).json_set_value(\"release={}\".format(\n new_release))\n\n if started:\n IOCStop(uuid, tag, path, conf, silent=True)\n\n lgr.info(\"\\n{} ({}) successfully upgraded from {} to {}!\".format(\n uuid, tag, jail_release, new_release))", "def upgrade_script():\n if postgres.db_exists(env.db):\n with cd(path()):\n sudo('bin/upgrade_{odoo} -d {db} '.format(**env), user=env.account)", "def run(self):\n\n run_command(['apt-get', 'update'])\n run_command(['apt-get', 'install', '-y', 'unattended-upgrades'])\n run_command(['apt-get', 'upgrade', '-y'])", "def main():\n date = time.gmtime().tm_mday\n if date == 1 or date == 2: # in case it missed once\n # shift from slave to master, checking to ensure it hasn't already happened\n status = check_status()\n if status == 'slave':\n slave_to_master()\n elif status == 'master':\n print(\"Shift has probably already happened\")\n else:\n print(\"In a forbidden state:\", status)\n elif date == 22 or date == 23: #in case it missed once\n # shift from master to slave, checking to ensure it hasn't already happened\n status = check_status()\n if status == 'master':\n master_to_slave()\n elif status == 'slave':\n print(\"Shift has probably already happened\")\n else:\n print(\"In a forbidden state:\", status)\n else:\n pass", "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def deploy():\n update_treesheets()\n restart_treesheets()", "def pre_stop_backup_cores(self, env):\n import params\n env.set_params(params)\n\n if compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:\n solr_home_dir=params.solr_data_dir\n else: #4.1.0.0\n solr_home_dir=params.old_lib_dir + \"/data\"\n\n unique = get_unique_id_and_date()\n backup_solr_dir=\"/tmp/upgrades/{0}/solr_{1}\".format(params.version, unique)\n backup_solr_cores=\"/tmp/solr/cores\"\n\n if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):\n os.makedirs(backup_solr_dir)\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_dir),\n sudo=True\n )\n\n if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:\n Directory(backup_solr_cores,\n action=\"delete\",\n create_parents=True)\n\n Directory(backup_solr_cores,\n mode=0755,\n cd_access='a',\n owner=params.solr_user,\n create_parents=True,\n group=params.user_group\n )\n\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_cores),\n user=params.solr_user\n )", "def full_upgrade(self):\n return self.upgrade(\"full-upgrade\")", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def main():\n setup()\n master = Master()\n master.start()", "def upgrade(ctx):\n tf_cmds = [\n [\"terraform\", \"init\", \"--upgrade\"],\n [\"terraform\", \"refresh\"],\n [\"terraform\", \"apply\", \"-auto-approve\"],\n ]\n\n if ctx.invoked_subcommand is None:\n if click.confirm('Do you want to run upgrade prechecks?'):\n ctx.invoke(precheck)\n else:\n print_warning_msg(f\"Skipping upgrade prechecks\")\n\n click.echo(\n \"Following commands will be run during upgrade\\n%s\" % (\n \"\\n\".join((map(\" \".join, tf_cmds)))\n ),\n )\n for cmd in tf_cmds:\n if click.confirm(\n 'Do you want to continue with %s?' %\n \" \".join(cmd),\n ):\n rc = execute_command(cmd)\n if rc != 0:\n print_error_msg(\"Upgrade Failed!!!\")\n return", "def start_deployment(self):\n return", "def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n old_version = self.cfg.version\n self.default_starter_args = new_install_cfg.default_starter_args.copy()\n self.enterprise = new_install_cfg.enterprise\n self.replace_binary_setup_for_upgrade(new_install_cfg)\n with step(\"kill the starter processes of the old version\"):\n logging.info(\"StarterManager: Killing my instance [%s]\", str(self.instance.pid))\n self.kill_instance()\n with step(\"revalidate that the old arangods are still running and alive\"):\n self.detect_instance_pids_still_alive()\n if relaunch:\n with step(\"replace the starter binary with a new one,\" + \" this has not yet spawned any children\"):\n self.respawn_instance(new_install_cfg.version)\n logging.info(\"StarterManager: respawned instance as [%s]\", str(self.instance.pid))\n self.arangosh = None\n self.detect_arangosh_instances(new_install_cfg, old_version)", "def smarter():\r\n pass", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def auto_upgrade(self) -> bool:\n return pulumi.get(self, \"auto_upgrade\")", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def test_upgrade_plan_from_previous(setup, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n # cluster upgrade plan\n out = skuba.cluster_upgrade_plan()\n assert out.find(\"Current Kubernetes cluster version: {pv}\".format(\n pv=PREVIOUS_VERSION)) != -1\n assert out.find(\"Latest Kubernetes version: {cv}\".format(\n cv=CURRENT_VERSION)) != -1\n assert out.find(\n \"Upgrade path to update from {pv} to {cv}:\\n - {pv} -> {cv}\".format(\n pv=PREVIOUS_VERSION, cv=CURRENT_VERSION)\n ) != -1\n\n # node upgrade plan\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"plan\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\n \"Current Kubernetes cluster version: {pv}\".format(pv=PREVIOUS_VERSION))\n assert master.find(\"Latest Kubernetes version: {cv}\".format(\n cv=CURRENT_VERSION)) != -1\n assert master.find(\" - apiserver: {pv} -> {cv}\".format(\n pv=PREVIOUS_VERSION, cv=CURRENT_VERSION)) != -1\n assert master.find(\" - kubelet: {pv} -> {cv}\".format(\n pv=PREVIOUS_VERSION, cv=CURRENT_VERSION)) != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\n \"Current Kubernetes cluster version: {pv}\".format(pv=PREVIOUS_VERSION))\n assert worker.find(\"Latest Kubernetes version: {cv}\".format(\n cv=CURRENT_VERSION)) != -1\n # If the control plane nodes are not upgraded yet, skuba disallows upgrading a worker\n assert worker.find(\"Node my-worker-0 is up to date\")", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def data_upgrades():\n pass", "def data_upgrades():\n pass", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def main():\n if PEPQUEUED.exists():\n PEPQUEUED.remove()\n os.chdir(BETABUILD_CHECKOUT_DIR / \"peps\")\n try:\n cmd(\"svn up\")\n except RuntimeError, error:\n log('%s: %s' % (error.__class__.__name__, error))\n cmd(\"./pep2pyramid.py --force -d %s\" % PEPDIR)\n cmd(\"./pep2rss.py %s\" % PEPDIR)\n BUILDQUEUED.touch()\n\n if BUILDINPROCESS.exists():\n # allow new checkins to queue a new build during another build\n # (leave BUILDQUEUED in place)\n return\n\n if BUILDQUEUED.exists():\n revision = BUILDQUEUED.text()\n log('revision %s' % revision)\n BUILDQUEUED.remove()\n update(revision)\n\n #rebuild jobs rss\n cmd(\"%s/jobs2rss.py %s\"%(JOBSDIR, JOBSDIR))\n log('Rebuilding jobs.rss')", "def main():\n\n # a quick way to verify the version\n if getscriptversion:\n print('This script is running version: ' + scriptversion)\n exit(0)\n\n # verify that environmental and script requirements are met\n requirements()\n\n # pretty the screen up\n clear()\n # do the MD5 checksum\n checkmd5sum()\n if not user or not password:\n getcreds()\n\n # if device_file is provided parse the lines into a list of devices\n if device_file:\n with open(device_file) as line:\n devices = line.readlines()\n devices = [x.strip() for x in devices]\n else:\n devices = args.devices.split(\",\")\n\n for device in devices:\n\n device = Acos(device)\n print('')\n print('')\n print(dev_addr + ' ' + '{:*^100}'.format('Begin upgrade log for ' + dev_addr))\n print(dev_addr + ' ' + '{:*^100}'.format('Performing pre-upgrade checks'))\n\n # check if the device is online before running\n status = device.checkstatus()\n if status == 'FAIL':\n continue\n\n # authenticate to the device\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n # get the device hostname\n device.get_hostname()\n\n # get the currently running version\n version = device.get_running_ver()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing upgrade'))\n\n # if we are running 4.1.0 we have to use a different upgrade method\n if '4.1.0' in version:\n response = device.gui_upgrade(user, password)\n if response == 'FAIL':\n continue\n # for other versions just use the normal method\n else:\n response = device.upgrade()\n if response == 'FAIL':\n continue\n bootvar = device.get_bootvar()\n\n # if the user has specified they'd like to update the boot variable\n if updatebootvar:\n # why do work that we don't have to\n if partition in bootvar:\n print(dev_addr + ' Bootvar update requested, but not necessary, device already set to boot from ' + partition)\n # if you're not already set to boot from the partition we installed to, update the bootvar\n else:\n device.update_bootvar()\n # if the user wants to reboot to initialize the new code reboot the box\n if reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n if not reboot:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + 'You have requested the device not reboot, in order to initialize the new code you will need to reboot the device')\n # if you install to a partition the device won't reboot to, we probably want to stop you from shooting yourself in the foot\n elif not partition in bootvar:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + ' You have chosen to install to the partition that the device does not currently boot from.')\n print(dev_addr + ' If you wish for the device to run the new code upon reboot you need to update the boot variable manually.')\n if reboot:\n print(dev_addr + ' You have also requested a reboot which will not invoke the new code, SKIPPING REBOOT')\n elif reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n # technically we could still use the old AXAPI token, however for sake of code clarity we're going to do a quick log off then back on\n # the alternative would be having to shove the remaining steps below into each of the appropriate loops making this a bit more\n # spaghettish than it already is\n else:\n device.axapi_logoff()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing post-upgrade checks'))\n\n # since it is very likely the box has rebooted, and our old token is gone, lets get a new one\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n\n # find out where the device was booted from\n bootdefault = device.get_bootvar()\n\n # get the version of the currently booted partition\n device.get_ver(bootdefault)\n\n # get the current boot variable\n device.get_bootvar()\n\n # get the current running version\n device.get_running_ver()\n\n # log off\n device.axapi_logoff()\n print(dev_addr + ' ' + '{:*^100}'.format(' End upgrade log for ' + dev_addr))", "def main():\n # Wait for dependency services (ES and RE) to be live\n wait_for_dependencies(timeout=180)\n logging.info('Services started! Now starting the app..')\n # Initialize worker group of ESIndexer\n es_indexers = WorkerGroup(ESIndexer, (), count=config()['workers']['num_es_indexers'])\n # Initialize a worker group of RelengImporter\n releng_importers = WorkerGroup(RelengImporter, (), count=config()['workers']['num_re_importers'])\n # All worker groups to send kafka messages to\n receivers = [es_indexers, releng_importers]\n\n # used to check update every minute\n last_updated_minute = int(time.time()/60)\n _CONFIG_TAG = _query_for_config_tag()\n\n # Initialize and run the Kafka consumer\n consumer = _set_consumer()\n\n while True:\n msg = consumer.poll(timeout=0.5)\n if msg is None:\n continue\n curr_min = int(time.time()/60)\n if curr_min > last_updated_minute:\n config_tag = _query_for_config_tag()\n # update minute here\n last_updated_minute = curr_min\n if config_tag is not None and config_tag != _CONFIG_TAG:\n _CONFIG_TAG = config_tag\n # send message to es_indexers to update config.\n es_indexers.queue.put(('ws_event', {\n 'evtype': \"RELOAD_ELASTIC_ALIASES\",\n \"msg\": f\"updating to tag {_CONFIG_TAG}\"\n }))\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n logging.info('End of stream.')\n else:\n logging.error(f\"Kafka message error: {msg.error()}\")\n continue\n val = msg.value().decode('utf-8')\n try:\n data = json.loads(val)\n except ValueError as err:\n logging.error(f'JSON parsing error: {err}')\n logging.error(f'Message content: {val}')\n for receiver in receivers:\n receiver.queue.put(('ws_event', data))", "def test_serviceRunsMigrations(self):\n m1 = TestMigration(store=self.store)\n m2 = TestMigration(store=self.store)\n self.store.powerUp(m1)\n self.store.powerUp(m2)\n self.assertEquals(m1.ran, 0)\n self.assertEquals(m2.ran, 0)\n self.manager.startService()\n self.assertEquals(m1.ran, 1)\n self.assertEquals(m2.ran, 1)", "def run_starter(self, expect_to_fail=False):\n logging.info(\"running starter \" + self.name)\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n\n lh.log_cmd(args)\n self.instance = psutil.Popen(args)\n logging.info(\"my starter has PID:\" + str(self.instance.pid))\n if not expect_to_fail:\n self.wait_for_logfile()\n self.wait_for_port_bind()", "def run_upgrade(args):\n upgrader = Upgrade(\n args.src,\n args.dst,\n PuppetUpgrader(args.src),\n disable_rollback=args.disable_rollback)\n\n upgrader.run()", "def __init__(self, series=None, openstack=None,\n source=None, git=False, stable=False):\n super(KeystoneBasicDeployment, self).__init__(series, openstack,\n source, stable)\n self.keystone_api_version = 2\n self.git = git\n self._add_services()\n self._add_relations()\n self._configure_services()\n self._deploy()\n\n u.log.info('Waiting on extended status checks...')\n self.exclude_services = []\n self._auto_wait_for_status(exclude_services=self.exclude_services)\n\n self.d.sentry.wait()\n self._initialize_tests()", "def test_redeploy(self):\n pass", "def new():\n run('pew new --dont-activate --python={0} '\n '{1}'.format(python_bin, package_name()))\n verun('pip install --upgrade wheel')\n verun('pip install --upgrade pip')", "def run_migration(env, upgrade_type):\n pass", "def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()", "def deploy(env='development', update_settings='n', upgrade_apps='n'):\n update_site(env, update_settings, upgrade_apps)\n restart_site(env)", "def production_reset():\n command_file = os.path.join(\n config.updater.cache_partition, 'ubuntu_command')\n with atomic(command_file) as fp:\n print('format data', file=fp)\n print('enable factory_wipe', file=fp)\n log.info('Performing a production factory reset')\n config.hooks.apply().apply()", "def feltBump(self):\n self.stamp = rospy.Time.now()\n self.ready_to_publish = True", "def test_upgrade(longhorn_upgrade_type,\n upgrade_longhorn_repo_url,\n upgrade_longhorn_repo_branch,\n upgrade_longhorn_manager_image,\n upgrade_longhorn_engine_image,\n upgrade_longhorn_instance_manager_image,\n upgrade_longhorn_share_manager_image,\n upgrade_longhorn_backing_image_manager_image,\n client, core_api, volume_name, csi_pv, # NOQA\n pvc, pod_make, statefulset, storage_class): # NOQA\n longhorn_repo_url = upgrade_longhorn_repo_url\n longhorn_repo_branch = upgrade_longhorn_repo_branch\n longhorn_manager_image = upgrade_longhorn_manager_image\n longhorn_engine_image = upgrade_longhorn_engine_image\n longhorn_instance_manager_image = upgrade_longhorn_instance_manager_image\n longhorn_share_manager_image = upgrade_longhorn_share_manager_image\n longhorn_backing_image_manager_image = \\\n upgrade_longhorn_backing_image_manager_image\n\n host_id = get_self_host_id()\n pod_data_path = \"/data/test\"\n\n # Disable Auto Salvage Setting\n update_setting(client, SETTING_AUTO_SALVAGE, \"false\")\n\n # 2-1 Create vol_revision_enabled with revision counter enabled\n # attached to a node\n update_setting(client, SETTING_DISABLE_REVISION_COUNTER, \"false\")\n vol_revision_enabled_name = 'vol-revision-enabled'\n vol_revision_enabled, vol_revision_enabled_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_revision_enabled_name)\n\n # 2-2 Create vol_revision_disabled with revision counter disable\n # attached to a node\n update_setting(client, SETTING_DISABLE_REVISION_COUNTER, \"true\")\n vol_revision_disabled_name = 'vol-revision-disabled'\n vol_revision_disabled, vol_revision_disabled_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_revision_disabled_name)\n\n # 2-3 Create vol_rebuild for replica rebuilding after system upgrade\n # & engine live upgrade\n vol_rebuild_name = 'vol-rebuild'\n vol_rebuild, vol_rebuild_data_before_sys_upgrade = \\\n create_volume_and_write_data(client, vol_rebuild_name)\n\n # Create Volume used by Pod\n pod_volume_name = 'lh-vol-pod-test'\n pod_name, pv_name, pvc_name, pod_md5sum = \\\n prepare_pod_with_data_in_mb(client, core_api, csi_pv, pvc,\n pod_make, pod_volume_name,\n data_path=pod_data_path,\n add_liveness_probe=False)\n\n # Create multiple volumes used by StatefulSet\n statefulset_name = 'statefulset-upgrade-test'\n update_statefulset_manifests(statefulset,\n storage_class,\n statefulset_name)\n create_storage_class(storage_class)\n create_and_wait_statefulset(statefulset)\n statefulset_pod_info = get_statefulset_pod_info(core_api, statefulset)\n\n for sspod_info in statefulset_pod_info:\n sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api,\n sspod_info['pod_name'],\n sspod_info['data'])\n # upgrade Longhorn manager\n assert longhorn_upgrade(longhorn_repo_url,\n longhorn_repo_branch,\n longhorn_manager_image,\n longhorn_engine_image,\n longhorn_instance_manager_image,\n longhorn_share_manager_image,\n longhorn_backing_image_manager_image)\n\n client = get_longhorn_api_client()\n\n # wait for 1 minute before checking pod restarts\n time.sleep(60)\n\n # Check Pod and StatefulSet didn't restart after upgrade\n pod = core_api.read_namespaced_pod(name=pod_name,\n namespace='default')\n assert pod.status.container_statuses[0].restart_count == 0\n\n for sspod_info in statefulset_pod_info:\n sspod = core_api.read_namespaced_pod(name=sspod_info['pod_name'],\n namespace='default')\n assert \\\n sspod.status.container_statuses[0].restart_count == 0\n\n # Check all volumes data after system upgrade\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_before_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_before_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_before_sys_upgrade)\n\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)\n assert res_pod_md5sum == pod_md5sum\n\n # Write data to all volumes after system upgrade\n for sspod_info in statefulset_pod_info:\n sspod_info['data'] = generate_random_data(VOLUME_RWTEST_SIZE)\n write_pod_volume_data(core_api,\n sspod_info['pod_name'],\n sspod_info['data'])\n\n vol_revision_enabled_data_after_sys_upgrade = \\\n write_volume_random_data(vol_revision_enabled)\n vol_revision_disabled_data_after_sys_upgrade = \\\n write_volume_random_data(vol_revision_disabled)\n vol_rebuild_data_after_sys_upgrade = \\\n write_volume_random_data(vol_rebuild)\n\n # Check data written to all volumes\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_after_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_after_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_after_sys_upgrade)\n\n # Detach the vol_revision_enabled & vol_revision_disabled,\n # and Delete Pod, and StatefulSet to detach theirvolumes\n\n statefulset['spec']['replicas'] = replicas = 0\n apps_api = get_apps_api_client()\n\n apps_api.patch_namespaced_stateful_set(\n name=statefulset_name,\n namespace='default',\n body={\n 'spec': {\n 'replicas': replicas\n }\n })\n\n delete_and_wait_pod(core_api, pod_name)\n\n # Upgrade all volumes engine images\n volumes = client.list_volume()\n for v in volumes:\n if v.name != vol_rebuild_name:\n volume = client.by_id_volume(v.name)\n volume.detach(hostId=\"\")\n wait_for_volume_detached(client, v.name)\n\n engineimages = client.list_engine_image()\n for ei in engineimages:\n if ei.image == longhorn_engine_image:\n new_ei = ei\n\n for v in volumes:\n volume = client.by_id_volume(v.name)\n volume.engineUpgrade(image=new_ei.image)\n\n # Recreate Pod, and StatefulSet\n statefulset['spec']['replicas'] = replicas = 2\n apps_api = get_apps_api_client()\n\n apps_api.patch_namespaced_stateful_set(\n name=statefulset_name,\n namespace='default',\n body={\n 'spec': {\n 'replicas': replicas\n }\n })\n\n wait_statefulset(statefulset)\n\n pod = pod_make(name=pod_name)\n pod['spec']['volumes'] = [create_pvc_spec(pvc_name)]\n create_and_wait_pod(core_api, pod)\n\n # Attach the volume\n for v in volumes:\n if v.name == vol_revision_enabled_name or \\\n v.name == vol_revision_disabled_name:\n volume = client.by_id_volume(v.name)\n volume.attach(hostId=host_id)\n wait_for_volume_healthy(client, v.name)\n\n # Verify volume's engine image has been upgraded\n for v in volumes:\n volume = client.by_id_volume(v.name)\n engine = get_volume_engine(volume)\n assert engine.engineImage == new_ei.image\n assert engine.currentImage == new_ei.image\n\n # Check All volumes data\n for sspod_info in statefulset_pod_info:\n resp = read_volume_data(core_api, sspod_info['pod_name'])\n assert resp == sspod_info['data']\n\n res_pod_md5sum = get_pod_data_md5sum(core_api, pod_name, pod_data_path)\n assert res_pod_md5sum == pod_md5sum\n\n check_volume_data(vol_revision_enabled,\n vol_revision_enabled_data_after_sys_upgrade)\n check_volume_data(vol_revision_disabled,\n vol_revision_disabled_data_after_sys_upgrade)\n check_volume_data(vol_rebuild,\n vol_rebuild_data_after_sys_upgrade)\n\n # Delete one healthy replica for vol_rebuild to trigger the rebuilding\n delete_replica_on_test_node(client, vol_rebuild_name)\n # Make sure vol_rebuild replica is deleted\n replica_count = 2\n vol_rebuild = wait_for_volume_replica_count(client, vol_rebuild_name,\n replica_count)\n # vol_rebuild will become degraded and start replica rebuilding\n # Wait for replica rebuilding to complete\n # Verify the vol_rebuild is still healthy\n vol_rebuild = wait_for_volume_degraded(client, vol_rebuild_name)\n assert vol_rebuild.robustness == \"degraded\"\n vol_rebuild = wait_for_volume_healthy(client, vol_rebuild_name)\n assert vol_rebuild.robustness == \"healthy\"\n assert len(vol_rebuild.replicas) == 3", "def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return", "def main():\n\n\n\n\tdaemon = ORsched(scheduler_config.pidfile, stdout=scheduler_config.outstream, stderr=scheduler_config.outstream)\n\ttry:\n\t\topts, list = getopt.getopt(sys.argv[1:], 'st')\n\texcept getopt.GetoptError, e:\n\t\tprint(\"Bad options provided!\")\n\t\tsys.exit()\n\n\tfor opt, a in opts:\n\t\tif opt == \"-s\":\n\t\t\ttry:\n\t\t\t\tpid_number = open(scheduler_config.pidfile,'r').readline()\n\t\t\t\tif pid_number:\n \t\t\t\tsys.exit('Daemon is already running!')\n \t\texcept Exception, e:\n \t\t\tpass\n\n\t\t\tprint(\"Starting daemon...!\")\n\t\t\tdaemon.start()\n\t\telif opt == \"-t\":\n\t\t\tdaemon.stop()\n\t\t\tprint \"The daemon is stoped!\"\n\t\telse:\n\t\t\tprint(\"Option %s not supported!\" % (opt))", "def set_sensor(self, kwargs: dict) -> None:\n subdirs = os.listdir(\"/usr/local/lib/python3.6/site-packages\")\n for subdirname in subdirs:\n if \"appdaemon\" in subdirname.lower():\n if \"info\" in subdirname.lower():\n version = subdirname.split(\"-\")[1][:-5]\n self.set_state(\"sensor.appdaemon_installed\", state=version)", "def rollback():\n with cd(env.basepath):\n run('mv current/rollback rollback')\n run('mv current undeployed')\n run('mv rollback current')\n version = run('readlink current')\n previous = run('readlink undeployed')\n puts(green('>>> Rolled back from %(previous)s to %(version)s' % { 'previous': previous, 'version': version }))\n run('rm -fr %s' % previous)\n run('rm undeployed')\n sudo('service nginx reload')\n with cd(env.nodejs):\n for n in [1, 2]:\n with settings(warn_only=True):\n sudo('stop nodejs N=%s' % n)\n run('mv instance%s/rollback rollback%s' % (n, n))\n run('mv instance%s undeployed' % n)\n run('mv rollback%s instance%s' % (n, n))\n version = run('readlink instance%s' % n)\n previous = run('readlink undeployed')\n puts(green('>>> Rolled back nodejs %(n)s from %(previous)s to %(version)s' % { 'n': n, 'previous': previous, 'version': version }))\n run('rm -fr %s' % previous)\n run('rm undeployed')\n sudo('start nodejs N=%s' % n)", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def maintenance_on():\n run('touch %s' % NGINX_MAINTENANCE_FILEPATH)", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def upgrade_if_needed(self, restart = True, dependencies = False):\n if self.check():\n print \"Upgrading %s\" % self.pkg\n self.upgrade(dependencies)\n if restart:\n self.restart()", "def startapp():", "def main():\n\n args = _parse_arguments()\n if args.call:\n if args.archive is None:\n raise RuntimeError(\n \"Could not run update archive is missing\"\n )\n run_update()\n else:\n\n folder = args.folder\n\n import os\n\n if not os.access(folder, os.W_OK):\n raise RuntimeError(\"Could not update, base folder is not writable\")\n\n update_info = get_update_info()\n archive = loadPluginTarget(\n update_info.get(UPDATE_CONFIG_NAME)\n .get(\"pip\")\n .format(target_version=args.target),\n folder,\n )\n\n # call new update script with args\n sys.argv = [\n \"--call=true\",\n \"--archive={}\".format(archive)\n ] + sys.argv[1:]\n try:\n result = subprocess.call(\n [sys.executable, os.path.join(folder, \"update_script.py\")] + sys.argv,\n stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError as e:\n print(e.output)\n raise RuntimeError(\"error code %s\", (e.returncode, e.output))\n\n if result != 0:\n raise RuntimeError(\"Error Could not update returncode - {}\".format(result))", "def do_update(self, node_role_map, node_roles, first_run=False):\n require('use_rds')\n require('pstat_instance')\n require('pstat_url')\n require('project_root')\n require('config_folder')\n require('ssl_prefix')\n require('backup')\n require('aws_access_key_id')\n require('aws_secret_access_key')\n require('sphinx_counter')\n require('key_filename')\n require('calabar_conf_context')\n require('loggly_inputs')\n require('sphinx_counter')\n require('ipsec_confs')\n require('hostname')\n require('enable_periodic_tasks')\n\n logger.info(\"Starting to provision %s\", env.host_string)\n\n for ipsec_name, _ in env.ipsec_confs.items():\n # Require all of the pre-shared key configs\n require('ipsec_psk_%s' % ipsec_name)\n\n if first_run:\n self.do_first_launch_config()\n\n self._stop_celery()\n\n self._update_cache_settings(node_role_map['memcached']['all'])\n self._update_sphinx_settings(\n node_role_map['celery_backend']['same_az'],\n node_roles,\n )\n self._update_celery_backend_settings(\n node_role_map['sphinx_search_indexer']['same_az'],\n )\n ldap_api_nodes = node_role_map['has_ldap_access']\n self._update_ldap_api_endpoint_settings(\n all_ldap_api_nodes=ldap_api_nodes['all'],\n same_az_ldap_api_nodes=ldap_api_nodes['same_az'],\n node_roles=node_roles,\n )\n self._update_celery_ldap_settings(node_roles)\n\n # Package and push the app to the new instance\n env.project_root_src = '/opt/pstat/versions/%(timestamp)s' % env\n source_dir = env.project_root_src\n current_source_dir = None\n if not first_run:\n current_source_dir = env.project_root\n with hide(*fab_output_hides):\n push_source(\n new_source_dir=source_dir,\n current_source_dir=current_source_dir,\n chown=F_CHOWN,\n chmod=\"u+rw,g+rw,o-rw\",\n )\n self._make_media_readable(source_dir)\n self._configure_settings_local(\n source_dir,\n env.pstat_settings,\n chown=F_CHOWN,\n )\n self._configure_settings_target(\n source_dir,\n env.settings_target,\n chown=F_CHOWN,\n )\n self.configure_terrarium(source_dir=source_dir, user=FILE_OWNER)\n self._activate_new_source(\n source_dir,\n [ACTIVE_SOURCE_SYMLINK, env.project_root],\n )\n self._run_db_migrations(user=FILE_OWNER)\n\n # Link up the attachments and upload directories from /mnt/\n self._link_storage_dirs()\n\n self._configure_webservers(node_roles)\n building_search_index = self._build_search_index()\n\n self._create_media_folder()\n self._collect_static_media()\n\n self._create_500_page()\n self._restart_webservers()\n\n # Services managed via supervisord\n self._configure_celery(node_roles)\n self._update_supervisord()\n self._configure_calabar()\n self._configure_ipsec()\n self._start_celery()\n\n self._configure_loggly()\n self._configure_pstat_cron_jobs()\n self._configure_email_sending()\n\n if first_run:\n self._sync_s3_media()\n\n if building_search_index:\n self._wait_for_search_indexing()\n self._ensure_sphinx_running()\n self._configure_sphinx_cron()\n\n logger.info(\"Provisioner completed successfully\")" ]
[ "0.5944614", "0.5928757", "0.57852423", "0.5737201", "0.57358366", "0.57358366", "0.5688907", "0.5653342", "0.5614044", "0.56099707", "0.5584282", "0.55671465", "0.5554055", "0.5543296", "0.5511669", "0.5501744", "0.5501293", "0.54920185", "0.54645705", "0.5462174", "0.543319", "0.540628", "0.54033256", "0.5400922", "0.5392308", "0.53705615", "0.53618294", "0.53597367", "0.5352641", "0.535261", "0.53415245", "0.5322594", "0.5319407", "0.53127235", "0.5306216", "0.529296", "0.5266058", "0.52583075", "0.52517414", "0.51971835", "0.5189168", "0.516936", "0.51669", "0.51568156", "0.51469344", "0.51336646", "0.51282567", "0.5127279", "0.5122229", "0.5120687", "0.51171756", "0.51108754", "0.5110436", "0.5102284", "0.5089081", "0.50847685", "0.5084505", "0.5080568", "0.5078376", "0.5062596", "0.5058759", "0.5055467", "0.50447196", "0.5043471", "0.5040977", "0.5038096", "0.5034911", "0.5023757", "0.5020833", "0.5020833", "0.5014245", "0.49983153", "0.49980742", "0.4994685", "0.49938768", "0.499295", "0.49861658", "0.49661148", "0.49654293", "0.49650127", "0.4959181", "0.49509194", "0.49435475", "0.49399015", "0.49390265", "0.49311972", "0.4923154", "0.4916468", "0.49145633", "0.49100196", "0.49075466", "0.49064764", "0.48999956", "0.48991174", "0.48822898", "0.48641944", "0.48597237", "0.485546", "0.48512983", "0.48462924" ]
0.5740626
3
wait for the upgrade commanding starter to finish
def wait_for_upgrade(self, timeout=60): ret = None try: ret = self.upgradeprocess.wait(timeout=timeout) except psutil.TimeoutExpired as timeout_ex: msg = "StarterManager: Upgrade command [%s] didn't finish in time: %d" % ( str(self.basedir), timeout, ) raise TimeoutError(msg) from timeout_ex logging.info( "StarterManager: Upgrade command [%s] exited: %s", str(self.basedir), str(ret), ) if ret != 0: raise Exception("Upgrade process exited with non-zero reply")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def wait_for_upgrade_done_in_log(self, timeout=120):\n keep_going = True\n logging.info('Looking for \"Upgrading done\" in the log file.\\n')\n while keep_going:\n text = self.get_log_file()\n pos = text.find(\"Upgrading done.\")\n keep_going = pos == -1\n if keep_going:\n time.sleep(1)\n progress(\".\")\n timeout -= 1\n if timeout <= 0:\n raise TimeoutError(\"upgrade of leader follower not found on time\")\n for instance in self.all_instances:\n instance.wait_for_shutdown()", "async def wait_until_done(self) -> None:\n ...", "def wait_for_version_reply(self):\n frontends = self.get_frontends()\n for frontend in frontends:\n # we abuse this function:\n while frontend.get_afo_state() != AfoServerState.LEADER:\n progress(\".\")\n time.sleep(0.1)", "def wait(self):\n pass", "def wait(self):\n pass", "def waitUntilSuccess():", "def do_wait(self):\n pass", "def wait():\n pass", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def wait(self):\n self.Popen.wait()", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def wait(self):\n self.mainloop().wait()", "def test_nothing_to_upgrade(self, mock_click_echo):\n agent_config = self.load_agent_config(self.agent_name)\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Checking if there is a newer remote version of agent package '{agent_config.public_id}'...\"\n )\n mock_click_echo.assert_any_call(\n \"Package not found, continuing with normal upgrade.\"\n )\n mock_click_echo.assert_any_call(\"Everything is already up to date!\")", "def wait():\n time.sleep(1)", "def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple[\"Status\", dict]:", "def wait(self):\n time.sleep(0.010)", "def _wait_for_install(self, instance, ssh_options, wait_dir):\n wait_time = 3\n command = \"ls %s\" % wait_dir\n ssh_command = self._get_standard_ssh_command(instance, ssh_options, command)\n\n self.logger.info(\"Waiting for install with command %s\" % ssh_command)\n while True:\n retcode = subprocess.call(ssh_command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n if retcode == 0:\n break\n self.logger.debug(\"Sleeping for %d seconds...\" % wait_time)\n time.sleep(wait_time)", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def setup_complete():\n\n async def predicate(ctx:vbu.Context):\n if await fetch_guild_settings(ctx):\n return True\n raise CheckFailure(f'Your server hasn\\'t yet been set up. Use {ctx.prefix}setup')\n return commands.check(predicate)", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()", "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def test_nothing_to_upgrade(self, mock_click_echo):\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Updating AEA version specifier from ==0.1.0 to {compute_specifier_from_version(get_current_aea_version())}.\"\n )\n\n # test 'aea_version' of agent configuration is upgraded\n expected_aea_version_specifier = compute_specifier_from_version(\n get_current_aea_version()\n )\n agent_config = self.load_agent_config(self.current_agent_context)\n assert agent_config.aea_version == expected_aea_version_specifier\n assert agent_config.author == self.author\n assert agent_config.version == DEFAULT_VERSION", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def _wait_for_sync(self):\n\n if not self._inventory_mgr.block_until_inventory(timeout=self._mor_sync_timeout):\n raise RuntimeError(\"Did not sync inventory within {0} seconds\".format(self._mor_sync_timeout))\n\n if not self._metric_mgr.block_until_has_metrics(timeout=self._metric_sync_timeout):\n raise RuntimeError(\"Did not sync metrics within {0} seconds\".format(self._metric_sync_timeout))", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def wait_finish(self):\r\n self.proc.join()", "def wait_complete(self):\n self.join()", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def test_arbitrary(self):\n self.executor.add_command('apt_get')\n output, _error = self.executor.apt_get.update().batch()\n self.assertEqual(output, 'update finished successfully')", "def wait(self):\n self.event.wait()", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "async def async_setup(hass, config):\n\n config = config.get(DOMAIN, {})\n if config.get(CONF_REPORTING):\n huuid = await hass.async_add_job(_load_uuid, hass)\n else:\n huuid = None\n\n include_components = config.get(CONF_COMPONENT_REPORTING)\n\n async def check_new_version(now):\n \"\"\"Check if a new version is available and report if one is.\"\"\"\n result = await get_newest_version(hass, huuid, include_components)\n\n if result is None:\n return\n\n newest, releasenotes, android, apt = result\n\n # Load data from supervisor on hass.io\n if hass.components.hassio.is_hassio():\n newest = hass.components.hassio.get_homeassistant_version()\n\n # Validate version\n if StrictVersion(newest) > StrictVersion(current_version):\n _LOGGER.info(\"The latest available version is %s\", newest)\n info = 'Dostępna jest nowa wersja ' + newest + '. ' + releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Aktualizacja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": True,\n \"reinstall_android_app\": android,\n \"apt\": apt\n }\n )\n # add all entities to keep the order\n # hass.async_add_job(\n # hass.services.async_call(\n # 'group',\n # 'set', {\n # \"object_id\": \"dom_system_version\",\n # \"entities\": [\n # \"sensor.version_info\",\n # \"script.ais_update_system\",\n # \"camera.remote_access\",\n # \"input_boolean.ais_remote_access\",\n # \"sensor.ais_secure_android_id_dom\",\n # \"script.ais_scan_network_devices\",\n # \"script.ais_restart_system\",\n # \"script.ais_stop_system\"]}))\n\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Zainstaluj aktualizację',\n \"icon\": \"mdi:download\"\n }\n )\n\n else:\n info = 'Twój system jest aktualny, wersja ' + newest + '. '\n info += releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Wersja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": False,\n \"reinstall_android_app\": False,\n \"apt\": apt\n }\n )\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Sprawdź dostępność aktualizacji',\n \"icon\": \"mdi:refresh\"\n }\n )\n _LOGGER.info(\n \"You are on the latest version (%s) of Assystent domowy\", newest)\n\n # Update daily, start 1 hour after startup\n\n _dt = dt_util.utcnow() + timedelta(hours=1)\n event.async_track_utc_time_change(\n hass, check_new_version,\n hour=_dt.hour, minute=_dt.minute, second=_dt.second)\n\n # register services\n hass.services.async_register(\n DOMAIN, SERVICE_CHECK_VERSION, check_new_version)\n\n return True", "def test_wait_for_exits(main_container, version_container):\n assert (\n version_container.wait() == 0\n ), \"Container service (version) did not exit cleanly\"", "def wait_for_acquisition(self):\n self.lib.WaitForAcquisition()", "async def update(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are updating\n self.add_to_output(f\"Updating...\")\n # create ssh connection to miner\n try:\n conn = await self.get_connection(\"root\", \"admin\")\n # tell the user we are sending the update file\n self.add_to_output(\"Sending upgrade file...\")\n # send the update file\n await self.send_file(UPDATE_FILE_S9, \"/tmp/firmware.tar\")\n # install the update and collect the result\n result = await conn.run(f'sysupgrade /tmp/firmware.tar')\n self.add_to_output(result.stdout.strip())\n # tell the user the update completed\n self.add_to_output(f\"Update completed...\")\n except OSError:\n self.add_to_output(f\"Unknown error...\")", "def run(self):\n\n run_command(['apt-get', 'update'])\n run_command(['apt-get', 'install', '-y', 'unattended-upgrades'])\n run_command(['apt-get', 'upgrade', '-y'])", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def test_component_update_available_NEW(self):\n MockPopen.mock_stdout = 'Inst b (new from)'\n self.assertTrue(self.u.component_update_available())", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')", "def wait_completion(self):\r\n self.tasks.join()", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def _wait_for_cf_stack_update_to_complete(self):\n cf_waiter = self.cf_client.get_waiter(\"stack_update_complete\")\n logger.info(\"Waiting for stack to get to Successful Update state....\")\n try:\n cf_waiter.wait(\n StackName=self.shared_resource_stack_name,\n WaiterConfig={\"Delay\": 10, \"MaxAttempts\": 6},\n )\n except Exception as e:\n logger.error(e)\n logger.error(\n \"Failed to use Stack with name {} \".format(self.shared_resource_stack_name)\n )\n raise Exception(\n f\"The provided CloudFormation Stack for Shared Resource is unstable. \"\n f\"Please debug the stack here: {self._get_cf_stack_events_link()}\"\n )", "def test_upgrade_required_mock(self):\n with patch(\n \"aea.cli.upgrade.ItemUpgrader.check_upgrade_is_required\",\n return_value=\"100.0.0\",\n ):\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=False,\n )\n assert result.exit_code == 0", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)", "def wait(self):\r\n self.jobs.join()", "def __await_helms_installation(self, job_id, expected_services_count):\n end_waiting = datetime.now().timestamp() + self.TIMEOUT_MIN * 60 * 1000\n curr_status = self.helm_results.get(job_id)\n while datetime.now().timestamp() <= end_waiting:\n curr_status = self.helm_results.get(job_id, {\"services\": []})\n if expected_services_count != len(curr_status[\"services\"]):\n time.sleep(1.)\n else:\n self.helm_results.pop(job_id)\n return curr_status\n self.helm_results.pop(job_id)\n return curr_status", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def upgrade(ctx):\n tf_cmds = [\n [\"terraform\", \"init\", \"--upgrade\"],\n [\"terraform\", \"refresh\"],\n [\"terraform\", \"apply\", \"-auto-approve\"],\n ]\n\n if ctx.invoked_subcommand is None:\n if click.confirm('Do you want to run upgrade prechecks?'):\n ctx.invoke(precheck)\n else:\n print_warning_msg(f\"Skipping upgrade prechecks\")\n\n click.echo(\n \"Following commands will be run during upgrade\\n%s\" % (\n \"\\n\".join((map(\" \".join, tf_cmds)))\n ),\n )\n for cmd in tf_cmds:\n if click.confirm(\n 'Do you want to continue with %s?' %\n \" \".join(cmd),\n ):\n rc = execute_command(cmd)\n if rc != 0:\n print_error_msg(\"Upgrade Failed!!!\")\n return", "def wait(self, *args):\n # TODO -- say something\n if self.finished_places == 7:\n self.finished_places += 1\n return super(Up, self).wait(*args)", "def test_clean_wait(self):\n\n arg_parser = arguments.get_parser()\n\n args = arg_parser.parse_args([\n 'run',\n '-H', 'this',\n 'clean_test'\n ])\n run_cmd = commands.get_command(args.command_name)\n run_cmd.silence()\n run_cmd.run(self.pav_cfg, args)\n\n time.sleep(1)\n\n args = arg_parser.parse_args([\n 'clean'\n ])\n\n clean_cmd = commands.get_command(args.command_name)\n clean_cmd.silence()\n\n self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)", "async def wait_until_ready(self):\n await self._ready.wait()", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def level_check(self):\n\t\tif self.happiness > self.__get_data(\"happiness_level_up_requirement\") and \\\n\t\t\t self.level < self.level_max:\n\t\t\t# add a production line that gets the necessary upgrade material.\n\t\t\t# when the production finished, it calls level_up as callback.\n\t\t\tupgrade_material_prodline = self.session.db.get_settler_upgrade_material_prodline(self.level+1)\n\t\t\tif self.has_production_line(upgrade_material_prodline):\n\t\t\t\treturn # already waiting for res\n\t\t\tupgrade_material_production = SingleUseProduction(self.inventory, \\\n\t\t\t upgrade_material_prodline, callback = self.level_up)\n\t\t\t# drive the car out of the garage to make space for the building material\n\t\t\tfor res, amount in upgrade_material_production.get_consumed_resources().iteritems():\n\t\t\t\tself.inventory.add_resource_slot(res, abs(amount))\n\t\t\tself.add_production(upgrade_material_production)\n\t\t\tself.log.debug(\"%s: Waiting for material to upgrade from %s\", self, self.level)\n\t\telif self.happiness < self.__get_data(\"happiness_level_down_limit\"):\n\t\t\tself.level_down()\n\t\t\tself._changed()", "def test_new_upgrade_pending(\n mocker, state, slack, ouw_oc_map, ouw_ocm_map, upgrade_config, dt\n):\n dt.utcnow.return_value = upgrade_at - timedelta(hours=1)\n gso = mocker.patch(\n \"reconcile.openshift_upgrade_watcher._get_start_osd\", autospec=True\n )\n gso.return_value = upgrade_at.strftime(\"%Y-%m-%dT%H:%M:%SZ\"), upgrade_version\n ouw.notify_upgrades_start(\n ocm_map=ouw_ocm_map,\n oc_map=ouw_oc_map,\n clusters=[load_cluster(\"cluster1.yml\")],\n state=state,\n slack=slack,\n )\n assert slack.chat_post_message.call_count == 0\n assert state.add.call_count == 0", "def this_needs_work_test_hook_upgrade(self):\n self.do_test_hook_install(testee.upgrade_setup, True)", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def test_component_update_available_REMOVE(self):\n MockPopen.mock_stdout = 'Remv c (old PKG)\\nRemv d PKG'\n self.assertTrue(self.u.component_update_available())", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait(self):\n time.sleep(self.next())", "def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)", "def state_wait_exit(cfg, app, win):", "def wait(aws):\n\n aws_list = aws if isinstance(aws, list) else [aws]\n results = asyncio.get_event_loop().run_until_complete(asyncio.gather(\n *aws_list, return_exceptions=True))\n # If any of the cmds failed, re-raise the error.\n for result in results:\n if isinstance(result, Exception):\n raise result\n return results if isinstance(aws, list) else results[0]", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def upgrade(self):", "def upgrade(self):", "def wait(self, cycles):\n\t\tpass", "def test_upgrade_apply_from_previous(setup, platform, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def answer_waiting_call(self) -> None:", "def wait(self):\n return self.bot_client.send_command(_Command.Wait)", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "async def on_ready():\n print(f'{bot.user} has connected!')\n try:\n await pull_prev_info()\n except Exception as e:\n print(\"Error in starting function with pulling previous information:\")\n print(e)\n\n try:\n await update_tournament_list()\n except Exception as e:\n print(\"Error in starting function with updating tournament list:\")\n print(e)\n\n try:\n refresh_sheet.start()\n except Exception as e:\n print(\"Error in starting function with updating tournament list:\")\n print(e)\n\n post_something.start()\n cron.start()\n go_stylist.start()\n manage_welcome.start()\n store_variables.start()\n change_bot_status.start()\n update_member_count.start()", "def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()" ]
[ "0.77359796", "0.7126432", "0.7126432", "0.7126432", "0.7126432", "0.6678276", "0.66306156", "0.6630208", "0.66240835", "0.64890206", "0.6419722", "0.640707", "0.640707", "0.64049435", "0.63792473", "0.6337039", "0.63248074", "0.62579197", "0.6122265", "0.6120902", "0.61138004", "0.60820246", "0.60655546", "0.6057462", "0.6029344", "0.60255265", "0.5987153", "0.59747934", "0.59511006", "0.59248954", "0.59068155", "0.59068155", "0.5882927", "0.58785045", "0.5820724", "0.5805098", "0.58032817", "0.5782743", "0.5772905", "0.5765151", "0.5734717", "0.57235163", "0.57174253", "0.57165426", "0.5702999", "0.5701843", "0.56979305", "0.5688733", "0.56844723", "0.5679969", "0.5667569", "0.5664097", "0.56622195", "0.56497717", "0.56398547", "0.5628815", "0.56274575", "0.56267136", "0.55944073", "0.55927366", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.5592665", "0.55888724", "0.55885595", "0.5586996", "0.557441", "0.556692", "0.55579334", "0.55573916", "0.55534226", "0.55530196", "0.5548121", "0.5543734", "0.5525338", "0.55126625", "0.5509156", "0.5508744", "0.5500515", "0.5488492", "0.54787517", "0.54551506", "0.5451735", "0.5450927", "0.5439181", "0.54344165", "0.54344165", "0.54342127", "0.5431463", "0.5420494", "0.54187775", "0.5414403", "0.54128295", "0.53991723" ]
0.73207533
1
tries to wait for the server to restart after the 'restore' command
def wait_for_restore(self): for node in self.all_instances: if node.instance_type in [ InstanceType.RESILIENT_SINGLE, InstanceType.SINGLE, InstanceType.DBSERVER, ]: node.detect_restore_restart()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def continue_server():\n update_server_status({'ready': True})", "async def async_restore(self):\n await self._client.restore()\n self.async_write_ha_state()", "def _restart(self):\n pass", "def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')", "def restart(self):\n\t\treturn self.reset().start()", "def test_restore_with_erlang_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n conn.start_couchbase()\n conn.disconnect()\n timeout_now = 600\n output = restore_result.result(timeout=timeout_now)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with erlang crash and restart within 180 seconds\")\n self.log.info(\"Restore succeeded with erlang crash and restart within 180 seconds\")", "def restart_salt():\n stop_salt()\n start_salt()", "def test_restore_with_memcached_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n output = restore_result.result(timeout=600)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with memcached crash and restart within 400 seconds\")\n self.log.info(\"Restore succeeded with memcached crash and restart within 400 seconds\")", "def test_backup_restore_after_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n rebalance.result()\n self.backup_cluster_validate()\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")", "def restart(self) -> None:", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def restart(self):", "def restartSystem(self):\n # save retry count between reboots\n try:\n self.notifyPut('Restarting System...')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['retry_count'] = self.retry_count\n self.db.close()\n except Exception, e:\n self.logQ.put('{0} - Unable to save retry count'.format(e))\n \n try:\n subprocess.call(['SHUTDOWN', '/f', '/r'])\n except Exception, e:\n self.logQ.put('{0} - Unable to restart Windows'.format(e))\n return", "def request_shutdown(self, restart=False):", "def test_resume_restore(self):\n if not self.backupset.resume:\n self.fail(\"Resume must be True for this test\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.log.info(\"Start to flush bucket\")\n self._all_buckets_flush()\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version,\n force_updates=self.backupset.force_updates,\n no_resume=True)\n state = \"\"\n while state not in (\"FINISHED\", \"EXECUTING\"):\n state = restore_result.state\n self._kill_cbbackupmgr()\n self.assertFalse(self._check_output(\"success\", restore_result.result()))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def repl_restart(restart: bool = True) -> None:", "def snap_restore_complete(mnode, volname, snapname):\n\n # Stopping volume before snap restore\n ret = volume_stop(mnode, volname)\n if not ret:\n g.log.error(\"Failed to stop volume %s before restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n ret, _, _ = snap_restore(mnode, snapname)\n if ret != 0:\n g.log.error(\"snapshot restore cli execution failed\")\n return False\n\n # Starting volume after snap restore\n ret = volume_start(mnode, volname)\n if not ret:\n g.log.error(\"Failed to start volume %s after restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n return True", "def restart(self):\n pass", "def restart():\n stop()\n start()", "def restart(self):\r\n pass", "def test_backup_restore_with_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_cluster_validate()\n count += 1\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n count += 1", "def test_restore_with_memcached_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n try:\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n output, error = self.backup_restore()\n self.assertTrue(self._check_output(\n \"Error restoring cluster: failed to connect\", output),\n \"Expected error message not thrown by Restore 180 seconds after memcached crash\")\n self.log.info(\"Expected error thrown by Restore 180 seconds after memcached crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n self.sleep(30)", "def finish_maintenance(self, errors):\n if not self.can_restart:\n return\n\n try:\n self._shutdown()\n run(\" \".join(self.cmd_line_opts['argv']))\n self.client = pymongo.MongoClient(self.host, self.port)\n self._wait_secondaries_catch_up()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def ready_for_commands(self, retries = 3):\n while retries:\n try:\n self.refresh()\n return True\n except Reset_Exception as e:\n pass\n except Max_Retry_Exception as e:\n pass\n finally:\n retries -= 1\n raise e", "async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')", "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def reboot(self,request):\n\t\tresult = True\n\t\tPopen(['/sbin/reboot']) # that's all\n\t\tself.finished(request.id,result)", "async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def reboot(self):\n self.resetStream()\n logger.info(\"Going to reboot %s\" % self)\n self.setMode(CLI_MODES.shell)\n self._session.sendline(\"reboot\")\n reboot_failed_tries = 3\n reboot_wait_tries = 3\n while True:\n i = self._session.expect([\n \"The system is going down for reboot\",\n \"System shutdown initiated\",\n \"Connection to [\\.\\d]* closed\",\n pexpect.EOF,\n \"Request failed\",\n pexpect.TIMEOUT,\n ], timeout=120)\n if i == 0 or i == 1:\n logger.info(\"Reboot initiated\")\n continue\n elif i == 2 or i == 3:\n logger.info(\"Machine Rebooted. Connection closed\")\n break\n elif i == 4:\n if reboot_failed_tries > 0:\n logger.info(\"Reboot failed. Trying again...\")\n self._session.sendline(\"reload force\")\n reboot_failed_tries -= 1\n continue\n elif i == 5:\n if reboot_wait_tries > 0:\n logger.warn(\"Waited for 120 secs, but machine did NOT reboot. Waiting for sometime more...\")\n self._session.sendline(\"reload force\")\n reboot_wait_tries -= 1\n continue\n else:\n logger.error(\"Machine did NOT reboot!!!\")\n return False\n # break to prevent infinite loop\n break\n\n self._session.logfile_read.flush()\n self._session.logfile_read = None\n sys.stdout.flush()\n self.disconnect()\n logger.debug(\"Waiting for 300secs..\")\n\n time.sleep(300)\n return self.waitTillReachable(180, timeout=1800)", "async def restart(ctx):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Restarting, please wait...`\")\n execv(\"python3 SchmuckBot\", argv)", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "async def attempt_reconnect(self):\n await deploy.reconnect()", "def at_server_reload(self):\n self.db.started = True", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "async def confirm_remote_startup(self):\n pass", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def test_restore_backup():", "def state_wait_exit(cfg, app, win):", "def is_restarting(self) -> bool:\r\n return False", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def test_relaunch_deployment_run(self):\n pass", "def test_workflows_restart(self):\n pass", "def at_server_shutdown(self):\n self.db.started = False", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "async def async_press(self) -> None:\n if self.entity_description.key == _RESTART_KEY:\n await self._device.async_reboot()\n else:\n await self._device.async_unpair_remotes()\n await self._device.async_config_remotes(RemoteConfig.OPEN)", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "async def async_restore(self):\n if self._state == STATE_UNAVAILABLE:\n return\n\n if not self._slave_mode:\n _LOGGER.debug(\"For %s RESTORE, current source: %s, restoring volume: %s, source: %s uri: %s, seek: %s, pos: %s\", self.name, self._source, self._snap_volume, self._snap_source, self._snap_uri, self._snap_seek, self._snap_playhead_position)\n if self._snap_state != STATE_UNKNOWN:\n self._state = self._snap_state\n\n self._playing_tts = False\n self._announce = False\n self._playhead_position = self._snap_playhead_position\n\n if self._snap_spotify:\n self._snap_spotify = False\n if not self._snap_spotify_volumeonly:\n await self.async_call_linkplay_httpapi(\"MCUKeyShortClick:{0}\".format(str(self._preset_key)), None)\n self._snapshot_active = False\n self._snap_spotify_volumeonly = False\n # await self.async_schedule_update_ha_state(True)\n\n elif self._snap_mass:\n self._snap_mass = False\n self._snapshot_active = False\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"snapshot_restore\"})\n\n elif self._snap_source != \"Network\":\n self._snapshot_active = False\n await self.async_select_source(self._snap_source)\n if self._snap_uri is None:\n await asyncio.sleep(.6)\n self._snap_source = None\n \n elif self._snap_uri is not None:\n self._playing_mediabrowser = self._snap_playing_mediabrowser\n self._media_source_uri = self._snap_media_source_uri\n self._media_uri = self._snap_uri\n self._nometa = self._snap_nometa\n if self._snap_state in [STATE_PLAYING, STATE_PAUSED]: # self._media_uri.find('tts_proxy') == -1\n await self.async_play_media(MEDIA_TYPE_URL, self._media_uri)\n self._snapshot_active = False\n self._snap_uri = None\n\n if self._snap_volume != 0:\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(self._snap_volume)), None)\n self._snap_volume = 0\n\n if self._snap_state in [STATE_PLAYING, STATE_PAUSED]:\n await asyncio.sleep(0.5)\n if self._snap_seek and self._snap_playhead_position > 0:\n _LOGGER.debug(\"Seekin'\")\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:seek:{0}\".format(str(self._snap_playhead_position)), None)\n if self._snap_state == STATE_PAUSED:\n await self.async_media_pause()\n\n self._snap_state = STATE_UNKNOWN\n self._snap_seek = False\n self._snap_playhead_position = 0\n\n else:\n return\n #await self._master.async_restore()", "def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)", "def wait_for_restore():\n while True:\n states = [\n # Wait till all actors are either \"ALIVE\" (retored),\n # or \"DEAD\" (cancelled. these actors are from other\n # finished test cases).\n a[\"state\"] == \"ALIVE\" or a[\"state\"] == \"DEAD\"\n for a in list_actors(filters=[(\"class_name\", \"=\", \"Actor\")])\n ]\n print(\"waiting ... \", states)\n if all(states):\n break\n # Otherwise, wait a bit.\n time.sleep(0.5)", "async def send_reset(self):\n try:\n await self._send_command([PrivateConstants.SYSTEM_RESET])\n except RuntimeError:\n exit(0) #keep this??", "def restart_db():\n print system(\"service postgresql restart \")\n time.sleep(POSTGRES_WAIT)", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "def start_system_restore(self):\n confirmation = input(\"Do you want to system restore? (Y or N)\\n\")\n if confirmation in ('Y', 'y'):\n return self.mycam.devicemgmt.StartSystemRestore()\n return None", "def do(self):\n device = self.target\n\n # We might have interrupted a long-running command such as a Configure\n # or a Scan, so we need to clean up from that.\n\n # Now totally deconfigure\n device._deconfigure()\n\n # and release all receptors\n device._remove_receptors_helper(device._receptors[:])\n\n message = \"Restart command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK,message)", "def reboot(self, *args, **kwargs):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Attempting to reset the Treerunner board\"\n \"\".format(log_tag))\n cmd = \"shutdown > /dev/null 2>&1\"\n self.exec_command_ssh(cmd, background=True)\n self.logger.info(\"{} Waiting for the Treerunner board to come\"\n \" back online\".format(log_tag))\n time.sleep(30)\n # Start the sshd server daemon\n self.start_sshd_server()", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def reboot(self):\n raise NotImplementedError", "def restart(cfg: 'Settings', server, countdown=None):\n\n try:\n server_path = find_server(cfg.parent_directory, server)\n except (ParentDirMissing, ServerNotFound, NoInvocation) as e:\n e.log_this()\n return\n\n if isUp(server):\n countdownSteps = [\n \"20m\",\n \"15m\",\n \"10m\",\n \"5m\",\n \"3m\",\n \"2m\",\n \"1m\",\n \"30s\",\n \"10s\",\n \"5s\",\n ]\n if countdown:\n if countdown not in countdownSteps:\n log.error(f'{countdown} is an undefined step, aborting!')\n availableSteps1 = ', '.join(countdownSteps[:5])\n availableSteps2 = ', '.join(countdownSteps[5:])\n log.info(\n '> Available countdown steps are:\\n'\n f'> {availableSteps1},\\n'\n f'> {availableSteps2}'\n )\n return False\n log.info(f'Restarting {server} with {countdown}-countdown.')\n indx = countdownSteps.index(countdown)\n cntd = countdownSteps[indx:]\n else:\n log.info(f'Restarting {server} with default 10min countdown.')\n cntd = countdownSteps[2:]\n steps = buildCountdownSteps(cntd)\n for step in steps:\n screenCmd(\n server,\n 'title @a times 20 40 20',\n f'title @a subtitle {{\\\"text\\\":\\\"in {step[0]} {step[2]}!\\\",\\\"italic\\\":true}}',\n 'title @a title {\\\"text\\\":\\\"Restarting\\\", \\\"bold\\\":true}',\n f'tellraw @a {{\\\"text\\\":\\\"[Restarting in {step[0]} {step[2]}!]\\\",\\\"color\\\":\\\"green\\\"}}',\n )\n sleep(step[1])\n screenCmd(server, 'save-all')\n sleep(15)\n screenCmd(server, 'stop')\n waiting = 6\n while isUp(server) and waiting > 0:\n waiting -= 1\n sleep(20)\n if isUp(server):\n log.warning(f'Restart failed, {server} appears not to have stopped!')\n\n log.warning(f'Terminating {server} process!')\n terminated = terminate(server)\n if not terminated:\n return\n\n log.info('Restart in progress...')\n invocation = get_invocation(server_path)\n log.info(f'Starting {server}')\n os.chdir(server_path)\n run(['screen', '-h', '5000', '-dmS', server, *invocation, 'nogui'])\n sleep(5)\n if isUp(server):\n log.info(f'Restart successful, {server} is now running!')\n # run_startup_commands(server)\n return True\n else:\n log.warning(f'Restart failed, {server} does not appear to have started!')\n return False\n else:\n log.warning(f'Restart cancelled, {server} is offline!')\n return False", "def reboot(self, node):", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def test_restore_with_erlang_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n try:\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n output = restore_result.result(timeout=300)\n self.assertTrue(self._check_output(\n \"Error restoring cluster: Not all data was sent to Couchbase\", output),\n \"Expected error message not thrown by Restore 180 seconds after erlang crash\")\n self.log.info(\"Expected error thrown by Restore 180 seconds after erlang crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)", "def changed(self, *args):\n log.debug(\"Scheduling for immediate restart.\")\n self.schedule('restart', 0)\n return CONTINUE", "def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])", "def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")", "def test_restore_finished(self):\n task = self._remote_task()\n task['state'] = tasks.TASK_SUCCEEDED\n graph = self._restore_graph([task])\n assert graph.tasks == []", "def REBshutdown(self):\n pass", "def restart(self):\n self.stop()\n self.start(init=False)", "def syncrepl_refreshdone(self):\n pass", "def message_restart():\n if dialog_yes_no(32014):\n xbmc.executebuiltin('RestartApp')", "def storage_reset(self):\n daos_srv_bin = os.path.join(self.daosbinpath, \"daos_server\")\n cmd = \"{} storage prepare -n --reset -f\".format(daos_srv_bin)\n result = pcmd(self._hosts, cmd)\n if len(result) > 1 or 0 not in result:\n raise ServerFailed(\"Error resetting NVMe storage\")", "def reconfigure(client, instance_name, command):\n\n # 'command' has 3 parts in a list (1 Command and 2 ARGs)\n exec_Id = client.exec_create(container=instance_name, cmd=command)\n\n exec_start_resp = client.exec_start(exec_Id, stream=True)\n\n # Using a 'single' generator response to solve issue of 'start_exec' returning control after 6 minutes\n for response in exec_start_resp:\n dlog.info(\"Reconfig Script execution response: {:}\".format(response))\n exec_start_resp.close()\n break", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def restart():\n run_commands('python manage.py supervisor restart all')", "def restart(self,check_status=False):\n if (uart.SerialTxEsc(self.device)):\n return 1\n # Restart the LM32 software\n # Todo\n # Bug: after running \"init boot\", the slave cannot sync to master\n print(uart.SerialTx(self.device,\"init boot\"))\n\n\n time.sleep(10)\n \n if (check_status):\n timeout = 0\n sync_state = \"IDLE\"\n while (not (\"TRACK_PHASE\" in sync_state)):\n print \"wait sync...\"\n timeout += 1\n sync_state = self.get_sync_state()\n if (timeout>80):\n print(\"The synchronization cannot be achieved.\")\n return 1\n else:\n timeout+=1\n print(\"Restart the device \"+str(self.role))\n return 0", "def restart(self):\n self.iic.set_flag(REG.CTRL_REG2.RST)\n time.sleep(0.01)\n self.conf = Configuration()", "def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass", "def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "async def shutdown_requested(self, restart=False) -> None:\n await self.shutdown_listener()", "def poll_bgp_restored(duthosts, timeout=900, delay=20):\n logger.info(\"Poll for BGP to recover.\")\n pytest_assert(wait_until(timeout, 10, 0, check_bgp_neighbors, duthosts),\n \"All BGP's are not established after config reload from original minigraph\")" ]
[ "0.69105613", "0.6523955", "0.63679016", "0.62714815", "0.6169232", "0.6149965", "0.61382735", "0.61187077", "0.61115396", "0.61071575", "0.606816", "0.6046619", "0.6045591", "0.6022971", "0.6000961", "0.59798926", "0.59677154", "0.5965609", "0.59406596", "0.5929384", "0.5921322", "0.5902289", "0.5885909", "0.5868154", "0.58384186", "0.5831974", "0.58264554", "0.582301", "0.5818666", "0.5801851", "0.57735634", "0.5767161", "0.57669854", "0.57547814", "0.575192", "0.5751279", "0.57466733", "0.5741296", "0.5738781", "0.5715294", "0.570336", "0.5701519", "0.56884825", "0.56715834", "0.5664674", "0.5652119", "0.56425494", "0.56267893", "0.5623395", "0.562269", "0.56199205", "0.5615335", "0.5600178", "0.5595606", "0.55886626", "0.5587312", "0.5584977", "0.5579026", "0.5573888", "0.5573701", "0.5567775", "0.5563445", "0.55598474", "0.55578166", "0.5556064", "0.55559933", "0.55528873", "0.55427706", "0.5539583", "0.55331784", "0.55314606", "0.55290234", "0.5521615", "0.5506186", "0.55008733", "0.5497249", "0.54954", "0.54923296", "0.5490707", "0.5490171", "0.5475439", "0.54696566", "0.54656565", "0.5456246", "0.54460794", "0.5434153", "0.54191303", "0.54124725", "0.54104227", "0.540939", "0.53969634", "0.5387222", "0.53838366", "0.5382483", "0.5382483", "0.5381436", "0.5372966", "0.5372966", "0.5371769", "0.5366676" ]
0.7891546
0
tries to wait for the server to restart after the 'restore' command
def tcp_ping_nodes(self, timeout=20.0): for node in self.all_instances: if node.instance_type in [ InstanceType.RESILIENT_SINGLE, InstanceType.SINGLE, InstanceType.DBSERVER, ]: node.check_version_request(timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_restore(self):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.detect_restore_restart()", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def continue_server():\n update_server_status({'ready': True})", "async def async_restore(self):\n await self._client.restore()\n self.async_write_ha_state()", "def _restart(self):\n pass", "def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')", "def restart(self):\n\t\treturn self.reset().start()", "def test_restore_with_erlang_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n conn.start_couchbase()\n conn.disconnect()\n timeout_now = 600\n output = restore_result.result(timeout=timeout_now)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with erlang crash and restart within 180 seconds\")\n self.log.info(\"Restore succeeded with erlang crash and restart within 180 seconds\")", "def restart_salt():\n stop_salt()\n start_salt()", "def test_restore_with_memcached_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n output = restore_result.result(timeout=600)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with memcached crash and restart within 400 seconds\")\n self.log.info(\"Restore succeeded with memcached crash and restart within 400 seconds\")", "def test_backup_restore_after_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n rebalance.result()\n self.backup_cluster_validate()\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")", "def restart(self) -> None:", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def restart(self):", "def restartSystem(self):\n # save retry count between reboots\n try:\n self.notifyPut('Restarting System...')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['retry_count'] = self.retry_count\n self.db.close()\n except Exception, e:\n self.logQ.put('{0} - Unable to save retry count'.format(e))\n \n try:\n subprocess.call(['SHUTDOWN', '/f', '/r'])\n except Exception, e:\n self.logQ.put('{0} - Unable to restart Windows'.format(e))\n return", "def request_shutdown(self, restart=False):", "def test_resume_restore(self):\n if not self.backupset.resume:\n self.fail(\"Resume must be True for this test\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.log.info(\"Start to flush bucket\")\n self._all_buckets_flush()\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version,\n force_updates=self.backupset.force_updates,\n no_resume=True)\n state = \"\"\n while state not in (\"FINISHED\", \"EXECUTING\"):\n state = restore_result.state\n self._kill_cbbackupmgr()\n self.assertFalse(self._check_output(\"success\", restore_result.result()))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def repl_restart(restart: bool = True) -> None:", "def snap_restore_complete(mnode, volname, snapname):\n\n # Stopping volume before snap restore\n ret = volume_stop(mnode, volname)\n if not ret:\n g.log.error(\"Failed to stop volume %s before restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n ret, _, _ = snap_restore(mnode, snapname)\n if ret != 0:\n g.log.error(\"snapshot restore cli execution failed\")\n return False\n\n # Starting volume after snap restore\n ret = volume_start(mnode, volname)\n if not ret:\n g.log.error(\"Failed to start volume %s after restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n return True", "def restart(self):\n pass", "def restart():\n stop()\n start()", "def restart(self):\r\n pass", "def test_backup_restore_with_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_cluster_validate()\n count += 1\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n count += 1", "def test_restore_with_memcached_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n try:\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n output, error = self.backup_restore()\n self.assertTrue(self._check_output(\n \"Error restoring cluster: failed to connect\", output),\n \"Expected error message not thrown by Restore 180 seconds after memcached crash\")\n self.log.info(\"Expected error thrown by Restore 180 seconds after memcached crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n self.sleep(30)", "def finish_maintenance(self, errors):\n if not self.can_restart:\n return\n\n try:\n self._shutdown()\n run(\" \".join(self.cmd_line_opts['argv']))\n self.client = pymongo.MongoClient(self.host, self.port)\n self._wait_secondaries_catch_up()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def ready_for_commands(self, retries = 3):\n while retries:\n try:\n self.refresh()\n return True\n except Reset_Exception as e:\n pass\n except Max_Retry_Exception as e:\n pass\n finally:\n retries -= 1\n raise e", "async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')", "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def reboot(self,request):\n\t\tresult = True\n\t\tPopen(['/sbin/reboot']) # that's all\n\t\tself.finished(request.id,result)", "async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def reboot(self):\n self.resetStream()\n logger.info(\"Going to reboot %s\" % self)\n self.setMode(CLI_MODES.shell)\n self._session.sendline(\"reboot\")\n reboot_failed_tries = 3\n reboot_wait_tries = 3\n while True:\n i = self._session.expect([\n \"The system is going down for reboot\",\n \"System shutdown initiated\",\n \"Connection to [\\.\\d]* closed\",\n pexpect.EOF,\n \"Request failed\",\n pexpect.TIMEOUT,\n ], timeout=120)\n if i == 0 or i == 1:\n logger.info(\"Reboot initiated\")\n continue\n elif i == 2 or i == 3:\n logger.info(\"Machine Rebooted. Connection closed\")\n break\n elif i == 4:\n if reboot_failed_tries > 0:\n logger.info(\"Reboot failed. Trying again...\")\n self._session.sendline(\"reload force\")\n reboot_failed_tries -= 1\n continue\n elif i == 5:\n if reboot_wait_tries > 0:\n logger.warn(\"Waited for 120 secs, but machine did NOT reboot. Waiting for sometime more...\")\n self._session.sendline(\"reload force\")\n reboot_wait_tries -= 1\n continue\n else:\n logger.error(\"Machine did NOT reboot!!!\")\n return False\n # break to prevent infinite loop\n break\n\n self._session.logfile_read.flush()\n self._session.logfile_read = None\n sys.stdout.flush()\n self.disconnect()\n logger.debug(\"Waiting for 300secs..\")\n\n time.sleep(300)\n return self.waitTillReachable(180, timeout=1800)", "async def restart(ctx):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Restarting, please wait...`\")\n execv(\"python3 SchmuckBot\", argv)", "def test_backup_restore_after_online_upgrade(self):\n if self.initial_version[:1] == \"5\" and self.upgrade_versions[0][:1] >= \"7\":\n self.log.error(\"\\n\\n\\n*** ERROR: Direct upgrade from {0} to {1} does not support.\\\n Test will skip\\n\\n\"\\\n .format(self.initial_version[:5], self.upgrade_versions[0][:5]))\n return\n servers = copy.deepcopy(self.servers)\n self.vbuckets = self.initial_vbuckets\n if len(servers) != 4:\n self.fail(\"\\nThis test needs exactly 4 nodes to run! \")\n\n self._install(servers)\n count = 0\n nodes_fail_to_install = []\n for server in servers:\n ready = RestHelper(RestConnection(server)).is_ns_server_running(60)\n if ready:\n count += 1\n else:\n nodes_fail_to_install.append(server.ip)\n if count < len(servers):\n self.fail(\"Some servers may not install Couchbase server: {0}\"\\\n .format(nodes_fail_to_install))\n\n if not self.disable_diag_eval_on_non_local_host:\n self.enable_diag_eval_on_non_local_hosts()\n cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,\n self.master.rest_username,\n self.master.rest_password)\n cmd += '-d \"path_config:component_path(bin).\"'\n bin_path = subprocess.check_output(cmd, shell=True)\n try:\n bin_path = bin_path.decode()\n except AttributeError:\n pass\n if \"bin\" not in bin_path:\n self.fail(\"Check if cb server install on %s\" % self.master.ip)\n else:\n self.cli_command_location = bin_path.replace('\"', '') + \"/\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],\n [servers[int(self.nodes_init) - 1]], [])\n rebalance.result()\n self.sleep(15)\n self.add_built_in_server_user()\n rest = RestConnection(self.master)\n cb_version = rest.get_nodes_version()\n initial_compression_mode = \"off\"\n if 5.5 > float(cb_version[:3]):\n self.compression_mode = initial_compression_mode\n\n rest.create_bucket(bucket='default', ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.buckets = rest.get_buckets()\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n \"\"\" create index \"\"\"\n if self.create_gsi:\n if \"5\" > rest.get_nodes_version()[:1]:\n if self.gsi_type == \"forestdb\":\n self.fail(\"Need to set param self.gsi_type=memory_optimized\")\n rest.set_indexer_storage_mode(storageMode=\"memory_optimized\")\n else:\n rest.set_indexer_storage_mode(storageMode=\"plasma\")\n self.create_indexes()\n self.backup_create()\n if self.backupset.number_of_backups > 1:\n self.log.info(\"Start doing multiple backup\")\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n else:\n self.backup_cluster_validate()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.sleep(5)\n self.backup_list()\n\n \"\"\" Start to online upgrade using swap rebalance \"\"\"\n self.initial_version = self.upgrade_versions[0]\n if self.force_version_upgrade:\n self.initial_version = self.force_version_upgrade\n self.sleep(self.sleep_time,\n \"Pre-setup of old version is done. Wait for online upgrade to: \"\n \"{0} version\".format(self.initial_version))\n self.product = 'couchbase-server'\n self._install(servers[2:])\n self.sleep(self.sleep_time,\n \"Installation of new version is done. Wait for rebalance\")\n self.log.info(\n \"Rebalanced in upgraded nodes and rebalanced out nodes with old version\")\n add_node_services = [self.add_node_services]\n if \"-\" in self.add_node_services:\n add_node_services = self.add_node_services.split(\"-\")\n\n self.cluster.rebalance(servers, servers[2:], servers[:2],\n services=add_node_services)\n self.sleep(15)\n self.backupset.cluster_host = servers[2]\n \"\"\" Upgrade is done \"\"\"\n self.log.info(\"** Upgrade is done **\")\n healthy = False\n timeout = 0\n while not healthy:\n healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()\n if not healthy:\n if timeout == 120:\n self.fail(\"Node %s is not ready after 2 mins\" % self.backupset.cluster_host)\n else:\n self.sleep(5, \"Wait for server up \")\n timeout += 5\n else:\n healthy = True\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n servers[2].ip))\n RbacBase().create_user_source(testuser, 'builtin', servers[2])\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')\n self.log.info(status)\n if self.backupset.number_of_backups_after_upgrade:\n self.backupset.number_of_backups += \\\n self.backupset.number_of_backups_after_upgrade\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n self.add_built_in_server_user(node=servers[2])\n for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):\n self.log.info(\"_backup_restore_with_ops #{0} started...\".format(i))\n validate_dir_struct = True\n if i > 2:\n validate_dir_struct = False\n self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,\n validate_directory_structure=validate_dir_struct)\n self.backup_list()\n\n \"\"\" merged after upgrade \"\"\"\n if self.after_upgrade_merged:\n self.backupset.start = 1\n self.backupset.end = len(self.backups)\n self.backup_merge_validate()\n self.backup_list()\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(servers[2]).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n if self.bucket_flush:\n self.log.info(\"Start to flush bucket\")\n rest = RestConnection(servers[2])\n rest.flush_bucket()\n else:\n self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(servers[2]).create_bucket(bucket='default',\n ramQuotaMB=512,\n compressionMode=self.compression_mode)\n self.sleep(5)\n self.total_buckets = len(self.buckets)\n\n if self.after_upgrade_merged:\n self.backupset.end = 1\n\n \"\"\" restore back to cluster \"\"\"\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n if self.create_gsi:\n self.verify_gsi()", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "async def attempt_reconnect(self):\n await deploy.reconnect()", "def at_server_reload(self):\n self.db.started = True", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "async def confirm_remote_startup(self):\n pass", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def test_restore_backup():", "def state_wait_exit(cfg, app, win):", "def is_restarting(self) -> bool:\r\n return False", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def test_relaunch_deployment_run(self):\n pass", "def test_workflows_restart(self):\n pass", "def at_server_shutdown(self):\n self.db.started = False", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "async def async_press(self) -> None:\n if self.entity_description.key == _RESTART_KEY:\n await self._device.async_reboot()\n else:\n await self._device.async_unpair_remotes()\n await self._device.async_config_remotes(RemoteConfig.OPEN)", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "async def async_restore(self):\n if self._state == STATE_UNAVAILABLE:\n return\n\n if not self._slave_mode:\n _LOGGER.debug(\"For %s RESTORE, current source: %s, restoring volume: %s, source: %s uri: %s, seek: %s, pos: %s\", self.name, self._source, self._snap_volume, self._snap_source, self._snap_uri, self._snap_seek, self._snap_playhead_position)\n if self._snap_state != STATE_UNKNOWN:\n self._state = self._snap_state\n\n self._playing_tts = False\n self._announce = False\n self._playhead_position = self._snap_playhead_position\n\n if self._snap_spotify:\n self._snap_spotify = False\n if not self._snap_spotify_volumeonly:\n await self.async_call_linkplay_httpapi(\"MCUKeyShortClick:{0}\".format(str(self._preset_key)), None)\n self._snapshot_active = False\n self._snap_spotify_volumeonly = False\n # await self.async_schedule_update_ha_state(True)\n\n elif self._snap_mass:\n self._snap_mass = False\n self._snapshot_active = False\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"snapshot_restore\"})\n\n elif self._snap_source != \"Network\":\n self._snapshot_active = False\n await self.async_select_source(self._snap_source)\n if self._snap_uri is None:\n await asyncio.sleep(.6)\n self._snap_source = None\n \n elif self._snap_uri is not None:\n self._playing_mediabrowser = self._snap_playing_mediabrowser\n self._media_source_uri = self._snap_media_source_uri\n self._media_uri = self._snap_uri\n self._nometa = self._snap_nometa\n if self._snap_state in [STATE_PLAYING, STATE_PAUSED]: # self._media_uri.find('tts_proxy') == -1\n await self.async_play_media(MEDIA_TYPE_URL, self._media_uri)\n self._snapshot_active = False\n self._snap_uri = None\n\n if self._snap_volume != 0:\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(self._snap_volume)), None)\n self._snap_volume = 0\n\n if self._snap_state in [STATE_PLAYING, STATE_PAUSED]:\n await asyncio.sleep(0.5)\n if self._snap_seek and self._snap_playhead_position > 0:\n _LOGGER.debug(\"Seekin'\")\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:seek:{0}\".format(str(self._snap_playhead_position)), None)\n if self._snap_state == STATE_PAUSED:\n await self.async_media_pause()\n\n self._snap_state = STATE_UNKNOWN\n self._snap_seek = False\n self._snap_playhead_position = 0\n\n else:\n return\n #await self._master.async_restore()", "def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)", "def wait_for_restore():\n while True:\n states = [\n # Wait till all actors are either \"ALIVE\" (retored),\n # or \"DEAD\" (cancelled. these actors are from other\n # finished test cases).\n a[\"state\"] == \"ALIVE\" or a[\"state\"] == \"DEAD\"\n for a in list_actors(filters=[(\"class_name\", \"=\", \"Actor\")])\n ]\n print(\"waiting ... \", states)\n if all(states):\n break\n # Otherwise, wait a bit.\n time.sleep(0.5)", "async def send_reset(self):\n try:\n await self._send_command([PrivateConstants.SYSTEM_RESET])\n except RuntimeError:\n exit(0) #keep this??", "def restart_db():\n print system(\"service postgresql restart \")\n time.sleep(POSTGRES_WAIT)", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "def start_system_restore(self):\n confirmation = input(\"Do you want to system restore? (Y or N)\\n\")\n if confirmation in ('Y', 'y'):\n return self.mycam.devicemgmt.StartSystemRestore()\n return None", "def do(self):\n device = self.target\n\n # We might have interrupted a long-running command such as a Configure\n # or a Scan, so we need to clean up from that.\n\n # Now totally deconfigure\n device._deconfigure()\n\n # and release all receptors\n device._remove_receptors_helper(device._receptors[:])\n\n message = \"Restart command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK,message)", "def reboot(self, *args, **kwargs):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Attempting to reset the Treerunner board\"\n \"\".format(log_tag))\n cmd = \"shutdown > /dev/null 2>&1\"\n self.exec_command_ssh(cmd, background=True)\n self.logger.info(\"{} Waiting for the Treerunner board to come\"\n \" back online\".format(log_tag))\n time.sleep(30)\n # Start the sshd server daemon\n self.start_sshd_server()", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def reboot(self):\n raise NotImplementedError", "def restart(cfg: 'Settings', server, countdown=None):\n\n try:\n server_path = find_server(cfg.parent_directory, server)\n except (ParentDirMissing, ServerNotFound, NoInvocation) as e:\n e.log_this()\n return\n\n if isUp(server):\n countdownSteps = [\n \"20m\",\n \"15m\",\n \"10m\",\n \"5m\",\n \"3m\",\n \"2m\",\n \"1m\",\n \"30s\",\n \"10s\",\n \"5s\",\n ]\n if countdown:\n if countdown not in countdownSteps:\n log.error(f'{countdown} is an undefined step, aborting!')\n availableSteps1 = ', '.join(countdownSteps[:5])\n availableSteps2 = ', '.join(countdownSteps[5:])\n log.info(\n '> Available countdown steps are:\\n'\n f'> {availableSteps1},\\n'\n f'> {availableSteps2}'\n )\n return False\n log.info(f'Restarting {server} with {countdown}-countdown.')\n indx = countdownSteps.index(countdown)\n cntd = countdownSteps[indx:]\n else:\n log.info(f'Restarting {server} with default 10min countdown.')\n cntd = countdownSteps[2:]\n steps = buildCountdownSteps(cntd)\n for step in steps:\n screenCmd(\n server,\n 'title @a times 20 40 20',\n f'title @a subtitle {{\\\"text\\\":\\\"in {step[0]} {step[2]}!\\\",\\\"italic\\\":true}}',\n 'title @a title {\\\"text\\\":\\\"Restarting\\\", \\\"bold\\\":true}',\n f'tellraw @a {{\\\"text\\\":\\\"[Restarting in {step[0]} {step[2]}!]\\\",\\\"color\\\":\\\"green\\\"}}',\n )\n sleep(step[1])\n screenCmd(server, 'save-all')\n sleep(15)\n screenCmd(server, 'stop')\n waiting = 6\n while isUp(server) and waiting > 0:\n waiting -= 1\n sleep(20)\n if isUp(server):\n log.warning(f'Restart failed, {server} appears not to have stopped!')\n\n log.warning(f'Terminating {server} process!')\n terminated = terminate(server)\n if not terminated:\n return\n\n log.info('Restart in progress...')\n invocation = get_invocation(server_path)\n log.info(f'Starting {server}')\n os.chdir(server_path)\n run(['screen', '-h', '5000', '-dmS', server, *invocation, 'nogui'])\n sleep(5)\n if isUp(server):\n log.info(f'Restart successful, {server} is now running!')\n # run_startup_commands(server)\n return True\n else:\n log.warning(f'Restart failed, {server} does not appear to have started!')\n return False\n else:\n log.warning(f'Restart cancelled, {server} is offline!')\n return False", "def reboot(self, node):", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def test_restore_with_erlang_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n try:\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n output = restore_result.result(timeout=300)\n self.assertTrue(self._check_output(\n \"Error restoring cluster: Not all data was sent to Couchbase\", output),\n \"Expected error message not thrown by Restore 180 seconds after erlang crash\")\n self.log.info(\"Expected error thrown by Restore 180 seconds after erlang crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)", "def changed(self, *args):\n log.debug(\"Scheduling for immediate restart.\")\n self.schedule('restart', 0)\n return CONTINUE", "def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])", "def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")", "def test_restore_finished(self):\n task = self._remote_task()\n task['state'] = tasks.TASK_SUCCEEDED\n graph = self._restore_graph([task])\n assert graph.tasks == []", "def REBshutdown(self):\n pass", "def restart(self):\n self.stop()\n self.start(init=False)", "def syncrepl_refreshdone(self):\n pass", "def message_restart():\n if dialog_yes_no(32014):\n xbmc.executebuiltin('RestartApp')", "def storage_reset(self):\n daos_srv_bin = os.path.join(self.daosbinpath, \"daos_server\")\n cmd = \"{} storage prepare -n --reset -f\".format(daos_srv_bin)\n result = pcmd(self._hosts, cmd)\n if len(result) > 1 or 0 not in result:\n raise ServerFailed(\"Error resetting NVMe storage\")", "def reconfigure(client, instance_name, command):\n\n # 'command' has 3 parts in a list (1 Command and 2 ARGs)\n exec_Id = client.exec_create(container=instance_name, cmd=command)\n\n exec_start_resp = client.exec_start(exec_Id, stream=True)\n\n # Using a 'single' generator response to solve issue of 'start_exec' returning control after 6 minutes\n for response in exec_start_resp:\n dlog.info(\"Reconfig Script execution response: {:}\".format(response))\n exec_start_resp.close()\n break", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def restart():\n run_commands('python manage.py supervisor restart all')", "def restart(self,check_status=False):\n if (uart.SerialTxEsc(self.device)):\n return 1\n # Restart the LM32 software\n # Todo\n # Bug: after running \"init boot\", the slave cannot sync to master\n print(uart.SerialTx(self.device,\"init boot\"))\n\n\n time.sleep(10)\n \n if (check_status):\n timeout = 0\n sync_state = \"IDLE\"\n while (not (\"TRACK_PHASE\" in sync_state)):\n print \"wait sync...\"\n timeout += 1\n sync_state = self.get_sync_state()\n if (timeout>80):\n print(\"The synchronization cannot be achieved.\")\n return 1\n else:\n timeout+=1\n print(\"Restart the device \"+str(self.role))\n return 0", "def restart(self):\n self.iic.set_flag(REG.CTRL_REG2.RST)\n time.sleep(0.01)\n self.conf = Configuration()", "def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass", "def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "async def shutdown_requested(self, restart=False) -> None:\n await self.shutdown_listener()", "def poll_bgp_restored(duthosts, timeout=900, delay=20):\n logger.info(\"Poll for BGP to recover.\")\n pytest_assert(wait_until(timeout, 10, 0, check_bgp_neighbors, duthosts),\n \"All BGP's are not established after config reload from original minigraph\")" ]
[ "0.7891546", "0.69105613", "0.6523955", "0.63679016", "0.62714815", "0.6169232", "0.6149965", "0.61382735", "0.61187077", "0.61115396", "0.61071575", "0.606816", "0.6046619", "0.6045591", "0.6022971", "0.6000961", "0.59798926", "0.59677154", "0.5965609", "0.59406596", "0.5929384", "0.5921322", "0.5902289", "0.5885909", "0.5868154", "0.58384186", "0.5831974", "0.58264554", "0.582301", "0.5818666", "0.5801851", "0.57735634", "0.5767161", "0.57669854", "0.57547814", "0.575192", "0.5751279", "0.57466733", "0.5741296", "0.5738781", "0.5715294", "0.570336", "0.5701519", "0.56884825", "0.56715834", "0.5664674", "0.5652119", "0.56425494", "0.56267893", "0.5623395", "0.562269", "0.56199205", "0.5615335", "0.5600178", "0.5595606", "0.55886626", "0.5587312", "0.5584977", "0.5579026", "0.5573888", "0.5573701", "0.5567775", "0.5563445", "0.55598474", "0.55578166", "0.5556064", "0.55559933", "0.55528873", "0.55427706", "0.5539583", "0.55331784", "0.55314606", "0.55290234", "0.5521615", "0.5506186", "0.55008733", "0.5497249", "0.54954", "0.54923296", "0.5490707", "0.5490171", "0.5475439", "0.54696566", "0.54656565", "0.5456246", "0.54460794", "0.5434153", "0.54191303", "0.54124725", "0.54104227", "0.540939", "0.53969634", "0.5387222", "0.53838366", "0.5382483", "0.5382483", "0.5381436", "0.5372966", "0.5372966", "0.5371769", "0.5366676" ]
0.0
-1
restart the starter instance after we killed it eventually, maybe command manual upgrade (and wait for exit)
def respawn_instance(self, version, moreargs=None, wait_for_logfile=True): assert version is not None self.cfg.version = version args = [self.cfg.bin_dir / "arangodb"] + self.hotbackup_args + self.default_starter_args + self.arguments if moreargs is not None: args.extend(moreargs) logging.info("StarterManager: respawning instance %s", str(args)) self.instance = psutil.Popen(args) self.pid = self.instance.pid self.ppid = self.instance.ppid() print("respawned with PID:" + str(self.instance.pid)) if wait_for_logfile: self.wait_for_logfile() self.wait_for_port_bind() else: print("Waiting for starter to exit") print("Starter exited %d" % self.instance.wait())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def _restart(self):\n pass", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def restart():\n run_commands('python manage.py supervisor restart all')", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def restart_salt():\n stop_salt()\n start_salt()", "def restart(self) -> None:", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def restart(self):\n pass", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)", "def restart(self):", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def restart(self):\r\n pass", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')", "def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)", "def restart():\n stop()\n start()", "def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def supervisor_restart():\n log('restart supervisor', yellow)\n sudo('/etc/init.d/supervisor stop')\n sudo('/etc/init.d/supervisor start')\n # sudo('/etc/init.d/supervisor restart')", "def restart_from_helper ( self, ):\r\n self.no_helper_restarts += 1\r\n self.logger.info( \"restart_from_helper\" )\r\n\r\n self.restart()", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def restart(self):\n self.stop()\n self.start(init=False)", "async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))", "def restart_scrapy_daemon():\n global REPO_BASE_PATH\n logger.info('Scrapy daemon restarting...')\n arguments = ['python'] + [REPO_BASE_PATH+'/deploy/sqs_ranking_spiders/scrapy_daemon.py'] + sys.argv[1:]\n if 'restarted' not in arguments:\n arguments += ['restarted']\n else:\n logger.error('Error while restarting scrapy daemon. '\n 'Already restarted.')\n return\n logging.info('Starting %s with args %s' % (sys.executable, arguments))\n os.execv(sys.executable, arguments)", "def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])", "def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True", "def repl_restart(restart: bool = True) -> None:", "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "async def restart(ctx):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Restarting, please wait...`\")\n execv(\"python3 SchmuckBot\", argv)", "def restart():\n log('reiniciando servicos', yellow)\n nginx_stop()\n nginx_start()\n nginx_restart()\n nginx_reload()\n supervisor_stop()\n supervisor_start()", "def reboot(self):\n raise NotImplementedError", "def reboot_instance(InstanceId=None):\n pass", "def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])", "def restart(reason, *args, **kwargs):\n logging.info(\"Restarting: %s\" % reason)\n os.execv(sys.argv[0], sys.argv)", "def restart(self):\n\n self.stop()\n self.start()", "def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def request_shutdown(self, restart=False):", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def restartSystem(self):\n # save retry count between reboots\n try:\n self.notifyPut('Restarting System...')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['retry_count'] = self.retry_count\n self.db.close()\n except Exception, e:\n self.logQ.put('{0} - Unable to save retry count'.format(e))\n \n try:\n subprocess.call(['SHUTDOWN', '/f', '/r'])\n except Exception, e:\n self.logQ.put('{0} - Unable to restart Windows'.format(e))\n return", "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def restart_db():\n print system(\"service postgresql restart \")\n time.sleep(POSTGRES_WAIT)", "def service_restart(appname):\n sudo('service {} restart'.format(appname))", "def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")", "def reboot():\n sudo('/mnt/apps/bin/restart-all-apache.sh')", "def restart(self):\n\t\treturn self.reset().start()", "def test_relaunch_deployment_run(self):\n pass", "def restart_celery():\n os.system('flask kill_celery')\n os.system('flask celery')", "def django_restart_shell():\r\n \r\n singles = wingapi.gApplication.fSingletons\r\n shell = singles.fGuiMgr.ShowPanel('python-shell', flash=True, grab_focus=True)\r\n if shell is not None:\r\n shell.fOwner.ScheduleRestart()", "def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])", "def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"", "def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def restart(self):\n self.__init__()\n return", "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def _restart_environment_episode(self, force_environment_reset=False) -> None:\n raise NotImplementedError(\"\")", "def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)", "def restart(self, sync=True):\n self.shutdown(sync=True)\n self.power_on(sync)", "def mysql_restart():\n log('restart mysql', yellow)\n sudo('/etc/init.d/mysql restart')", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def restart_all():\n\n restart_nginx()\n restart_supervisor()", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "def restartFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('start fluidinfo-api-node PORT=%d' % port)\n with settings(warn_only=True):\n sudo('kill -USR1 $(cat /var/run/nginx.pid)')", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "def restart(self):\n self.km.restart_kernel(now=True)", "def restart_worker_sig_handler(signum, frame):\n worker.logger.warn(\"Restarting celeryd (%s)\" % (\n \" \".join(sys.argv)))\n worker.stop()\n os.execv(sys.executable, [sys.executable] + sys.argv)", "async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True", "def test_workflows_restart(self):\n pass", "def reboot(self):\n self.gripper_io.set_signal_value(\"reboot\", True)", "def restart(self):\n self._start_time = None\n self.start()", "async def terminate(self, restart=False) -> None:\n pass", "def restart_llap(self, env):\n Logger.info(\"Custom Command to retart LLAP\")\n import params\n env.set_params(params)\n\n if params.security_enabled:\n self.do_kinit()\n\n self._llap_stop(env)\n self._llap_start(env)", "def finish_maintenance(self, errors):\n if not self.can_restart:\n return\n\n try:\n self._shutdown()\n run(\" \".join(self.cmd_line_opts['argv']))\n self.client = pymongo.MongoClient(self.host, self.port)\n self._wait_secondaries_catch_up()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()", "def reload_test(test_name):\n sudo(\"restart %s\" % test_name)", "def reboot(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n f.write(\"REBOOT\")\n log.info(\"Reboot ...\")", "def restart(self, device, platform, version, app, package, activity):\n self.quit()\n self.initialize(device, platform, version, app, package, activity)", "def is_restarting(self) -> bool:\r\n return False", "def _restart(self):\n\n daemon_prefix = ConfigUtil().get_prefix_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n\n if daemon_prefix:\n if self._daemon_id in self._pyswitchlib_conf:\n daemon_prefixes = self._pyswitchlib_conf[self._daemon_id].split(':')\n\n if len(daemon_prefixes) > 1:\n daemon_prefixes.remove(daemon_prefix)\n daemon_prefixes.insert(0, daemon_prefix)\n\n self._pyswitchlib_conf[self._daemon_id] = ':'.join(daemon_prefixes)\n ConfigFileUtil().write(filename=pyswitchlib_conf_file, conf_dict=self._pyswitchlib_conf)\n\n super(PySwitchLibApiDaemonRunner, self)._restart()", "def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:", "def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()", "def force_restart(self):\n\n logging.info(\"Force restarting the experiment...\")\n\n save_dir = os.path.join(self._dir_name)\n create_folder(save_dir)\n rmtree(save_dir)\n\n if self._logger is not None:\n self._logger.force_restart()", "async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')", "def restart_kernel(self, now=False, **kw):", "def shutdown_kernel(self, now=False, restart=False):", "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def restart_celery():\n puts(yellow(\"Restart celery worker\"))\n with prefix('source %s' % in_rwd('bin/activate')):\n sudo('supervisorctl restart celery-worker', user=env.app_user)\n time.sleep(1)\n sudo('supervisorctl status', user=env.app_user)", "def restart(service_name):\n start_time = time.time()\n\n print(_green(\"Started...\"))\n require('environment', provided_by=('staging', 'production'))\n\n try:\n fabconf, env_config = parse_ini('appserver', check_all=False)\n except Exception as e:\n print(_red('Exception parsing config file: {}'.format(str(e))))\n exit()\n env.user = fabconf['SERVER_USERNAME']\n env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH']\n\n from recipes.default_appserver import restart_services as recipe\n command = recipe['%s' % service_name]\n from misc import _oven\n\n conn = boto.connect_ec2(ec2_key, ec2_secret)\n reservations = conn.get_all_instances()\n instances = [i for r in reservations for i in r.instances]\n for instance in instances:\n tags = instance.tags\n if instance.state == 'running' and 'Env' in tags:\n if tags['Env'] == env.environment and tags['Name'] == 'AppServer':\n print(_yellow('Restarting service on instance: %s' %\n instance.id))\n env.host_string = instance.public_dns_name\n env.user = fabconf['SERVER_USERNAME']\n env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH']\n _oven(command)\n\n end_time = time.time()\n print(_green(\"Runtime: %f minutes\" % ((end_time - start_time) / 60)))\n print(_green(env.host_string))" ]
[ "0.762096", "0.7359066", "0.7356461", "0.7342839", "0.7295235", "0.7272434", "0.7151316", "0.7120452", "0.703074", "0.7012595", "0.6993744", "0.69693404", "0.6928476", "0.6927464", "0.692301", "0.6919504", "0.6894615", "0.6894615", "0.68921846", "0.68630123", "0.6854615", "0.68424237", "0.68396354", "0.6814391", "0.68116176", "0.67492104", "0.6730589", "0.6717123", "0.6682467", "0.6667437", "0.6662105", "0.66578704", "0.66565436", "0.6645399", "0.66407335", "0.663596", "0.6614992", "0.6602461", "0.6598894", "0.65866923", "0.6584054", "0.65786654", "0.6561553", "0.6498194", "0.64752716", "0.6450918", "0.6448761", "0.6423032", "0.6422526", "0.64189744", "0.6411652", "0.6408609", "0.6408609", "0.6391396", "0.6356984", "0.6336072", "0.6325507", "0.6304361", "0.6299866", "0.62759316", "0.62674665", "0.6264847", "0.6246346", "0.6244206", "0.62129974", "0.6210488", "0.61997515", "0.6196693", "0.6193862", "0.6192817", "0.6188734", "0.6186602", "0.6179598", "0.61682284", "0.61679745", "0.6165131", "0.61575246", "0.615527", "0.61514527", "0.6140735", "0.6119412", "0.61173147", "0.61154795", "0.6111772", "0.6068992", "0.606098", "0.60554624", "0.6051779", "0.60385674", "0.6036324", "0.6034234", "0.6030845", "0.60286117", "0.60275066", "0.6018069", "0.60069233", "0.6004065", "0.6002696", "0.5999692", "0.5991148", "0.5984734" ]
0.0
-1
wait for the SUT reply with a 200 to /_api/version
def wait_for_version_reply(self): frontends = self.get_frontends() for frontend in frontends: # we abuse this function: while frontend.get_afo_state() != AfoServerState.LEADER: progress(".") time.sleep(0.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gets_to_version_page(self):\n\n response = self.client.get('/version')\n\n self.assertEqual(response.status_code, 200)", "def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'])", "def test_status(self):\n resp = self.client.get('/status')\n json_resp = json.loads(resp.data.decode('utf-8'))\n self.assertEqual(json_resp['status'], 200)\n self.assertEqual(len(json_resp['version']), 7)", "def test_status_ok(api_client):\n response = api_client.get()\n assert response.ok", "def test_ready(client):\n response = client.get('/api/ready')\n assert response.json == \"Ready\" \n assert response.status_code == 200", "def test_get_version(mocker):\n client = wsgi.application.test_client(mocker)\n\n url = '/api/v0/version'\n\n response = client.get(url)\n\n output = {\n \"message\": f\"AIOPS Publisher Version {wsgi.VERSION}\",\n \"status\": \"OK\",\n \"version\": wsgi.VERSION\n }\n assert response.get_json() == output\n assert response.status_code == 200", "def test_check_version(mock_send_message):\n A1sim.check_version(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get ric version',\n (f\"{BASE_URL}/counter/interface\"))", "def test_api_versioning(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=views_api.CORE_API_DEFAULT_VERSION,\n )\n self.assertEqual(response.status_code, 200)", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_1():\n\tassert api_call().status_code == 200", "def waitUntilSuccess():", "async def test_stable_version_beta_week(\n aresponses, event_loop, hassio_response_beta_week\n):\n aresponses.add(\n \"s3.amazonaws.com\",\n \"/hassio-version/stable.json\",\n \"get\",\n aresponses.Response(\n text=json.dumps(hassio_response_beta_week), status=200, headers=HEADERS\n ),\n )\n\n async with aiohttp.ClientSession(loop=event_loop) as session:\n haversion = HassioVersion(event_loop, session)\n await haversion.get_version()\n assert haversion.version == STABLE_VERSION_BETA_WEEK", "async def test_api_status(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(\"/api/\")\n assert resp.status == HTTPStatus.OK\n json = await resp.json()\n assert json[\"message\"] == \"API running.\"", "def test_api_version(self, method):\n self.client = trovebox.Trovebox(host=self.test_host, **self.test_oauth)\n self.client.configure(api_version=1)\n self._register_uri(method,\n uri=\"http://%s/v1/%s\" % (self.test_host,\n self.test_endpoint))\n GetOrPost(self.client, method).call(self.test_endpoint)", "async def test_beta_version_beta_week(\n aresponses, event_loop, hassio_beta_response_beta_week\n):\n aresponses.add(\n \"s3.amazonaws.com\",\n \"/hassio-version/beta.json\",\n \"get\",\n aresponses.Response(\n text=json.dumps(hassio_beta_response_beta_week), status=200, headers=HEADERS\n ),\n )\n\n async with aiohttp.ClientSession(loop=event_loop) as session:\n haversion = HassioVersion(event_loop, session, \"beta\")\n await haversion.get_version()\n assert haversion.version == BETA_VERSION_BETA_WEEK", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_response_200_on_get(self):\n pass", "def testSimpleEchoMethodReturnsVersion(self):\n body = dumps({'id': 100, 'jsonrpc': '2.0', 'method': 'pass',\n 'params': [39, 'steps']})\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual('2.0', response['jsonrpc'])", "def test_status(self):\n status_resp = http.get(urljoin(self.uri, '/api/status'))\n for k in status_resp.json().keys():\n if k.endswith('_version'):\n self.assertEqual(status_resp[k].count('.'), 2)", "def test_server_runnin(self, client):\n\n res = client.get('/')\n assert res.status_code == 200\n assert res.json['message'] == 'Server running'\n assert res.json['status'] == 2000", "def test_api_version(self):\n from supvisors.rpcinterface import API_VERSION, RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertEqual(API_VERSION, rpc.get_api_version())", "def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def test_response_ok():\n\t\n\t# Send GET request to API given endpoint and store the response.\n\tresponse = get_items()\n\n\t# Confirm that the request-response cycle completed successfully.\n\t#assert_true(response.ok)\n\tif ('None' in response): print(\"Failed calling REST API: {}\".format(response))\n\telse: print(\"TC Passed, Response OK: {}\".format(response))", "def test_ready_post(self):\n response = self.client.open(\n '/ready',\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_api(self):\n # Get metadata list\n _logger.info('Get sequencerun API')\n response = self.client.get('/sequencerun/')\n self.assertEqual(response.status_code, 200, 'Ok status response is expected')\n\n _logger.info('Check if API return result')\n result_response = response.data['results']\n self.assertGreater(len(result_response), 0, 'A result is expected')\n\n _logger.info('Check if unique data has a single entry')\n response = self.client.get('/sequencerun/?msg_attr_action=statuschanged')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 1, 'Single result is expected for unique data')\n\n _logger.info('Check Invalid keyword')\n response = self.client.get('/sequencerun/?foo=bar')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 0, 'No results are expected for unrecognized query parameter')", "async def test_stable_version_pagination(aresponses):\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/page1\", False), status=200, headers=HEADERS\n ),\n )\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags/page2\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/page2\", False), status=200, headers=HEADERS\n ),\n )\n async with aiohttp.ClientSession() as session:\n haversion = HaVersion(\n session=session,\n source=HaVersionSource.CONTAINER,\n )\n await haversion.get_version()\n assert haversion.version == STABLE_VERSION", "def test_success(self):\n result = self.test_client.success\n\n assert result == 1", "def test_GetVersion(self):\n ret = wrap_xmlrpc_call(\n self.am_client.GetVersion, [], {}, settings.TIMEOUT)\n self.assertEqual(ret['geni_api'], 1)", "def test_version_successful(self):\n\n url = '/%s/job-types/job-type-for-view-test/' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n for entry in result['results']:\n expected = None\n if entry['id'] == self.job_type4.id:\n expected = self.job_type4\n elif entry['id'] == self.job_type5.id:\n expected = self.job_type5\n elif entry['id'] == self.job_type6.id:\n expected = self.job_type6\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['name'], expected.name)\n self.assertEqual(entry['version'], expected.version)\n self.assertEqual(entry['title'], expected.get_title())\n self.assertEqual(entry['description'], expected.get_description())\n self.assertEqual(entry['icon_code'], expected.icon_code)\n self.assertEqual(entry['is_published'], expected.is_published)\n self.assertEqual(entry['is_active'], expected.is_active)\n self.assertEqual(entry['is_paused'], expected.is_paused)\n self.assertEqual(entry['is_system'], expected.is_system)\n self.assertEqual(entry['max_scheduled'], expected.max_scheduled)\n self.assertEqual(entry['revision_num'], expected.revision_num)\n self.assertEqual(entry['docker_image'], expected.docker_image)", "def sendVersionCheck(self):\r\n\r\n res = self.sender.sendVersionCheck()\r\n time.sleep(self.msgWaitingTime)\r\n return res", "def user_should_get_an_ok_response():\n assert web_app.validate_reponse()", "async def do_version():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n version = data['version']['name']\n await bot.send_message(c, f'Minecraft version {version}')", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def testSimpleFailingMethodReturnsVersion(self):\n body = dumps({'id': 100, 'jsonrpc': '2.0', 'method': 'fail',\n 'params': [39, 'steps']})\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual('2.0', response['jsonrpc'])", "def handle_request_version(self, msg):\n\t\tres = Response(msg)\n\t\tres.body['version'] = VERSION\n\t\tself.finished(msg.id, res)", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def test_get_oapi_version(self):\n pass", "def assert200(self, response):\n self.assertEqual(response.status_code, 200)", "def ping():\n\treturn HTTPResponse(status=200)", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_view_success_code(self):\n response = self.client.get(self.get_url(self.htsv.pk))\n self.assertEqual(response.status_code, 200)", "def test_get_version(self):\n pass", "def test_site_get_status_200_code(api_client):\n r = api_client.get(path=\"/\")\n assert r.status_code == 200", "async def test_beta_version_pagination(aresponses):\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/beta_week_page1\", False),\n status=200,\n headers=HEADERS,\n ),\n )\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags/page2\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/beta_week_page2\", False),\n status=200,\n headers=HEADERS,\n ),\n )\n async with aiohttp.ClientSession() as session:\n haversion = HaVersion(\n session=session,\n source=HaVersionSource.CONTAINER,\n channel=HaVersionChannel.BETA,\n )\n await haversion.get_version()\n assert haversion.version == BETA_VERSION", "def checkResponseOK(response):\n assert response['result'] == 'OK'", "def assertHttpOK(self, response):\r\n self.assertEqual(response.status_code, 200)", "def version():\n response = make_response('{\"version\" : %s }' % app.config.get('VERSION'), 200)\n response.content_type = \"application/json\"\n return response", "def test_status_home(self):\n self.assertEqual(200, self.response.status_code)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_get_status(self):\n resp = self.build_api.getStatus().json()\n assert 'status' in resp\n assert 'message' in resp", "def assertHttpAccepted(self, resp):\r\n return self.assertEqual(resp.status_code, 202)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_service_api_get(service_app):\n response = service_app.get('/')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert json.loads(response.data) == {'description': 'service is up', 'status': 200}", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def test_version_auto_ok(self, m_get, k8sconfig):\n\n # This is a genuine K8s response from Minikube.\n response = {\n 'major': '1', 'minor': '10',\n 'gitVersion': 'v1.10.0',\n 'gitCommit': 'fc32d2f3698e36b93322a3465f63a14e9f0eaead',\n 'gitTreeState': 'clean',\n 'buildDate': '2018-03-26T16:44:10Z',\n 'goVersion': 'go1.9.3',\n 'compiler': 'gc', 'platform': 'linux/amd64'\n }\n m_get.return_value = (response, None)\n\n # Create vanilla `Config` instance.\n m_client = mock.MagicMock()\n k8sconfig = k8sconfig._replace(client=m_client)\n\n # Test function must contact the K8s API and return a `Config` tuple\n # with the correct version number.\n config2, err = k8s.version(k8sconfig)\n assert err is False\n assert isinstance(config2, K8sConfig)\n assert config2.version == \"1.10\"\n\n # Test function must have called out to `get` to retrieve the\n # version. Here we ensure it called the correct URL.\n m_get.assert_called_once_with(m_client, f\"{k8sconfig.url}/version\")\n assert not m_client.called\n\n # The return `Config` tuple must be identical to the input except for\n # the version number because \"k8s.version\" will have overwritten it.\n assert k8sconfig._replace(version=None) == config2._replace(version=None)\n del config2, err\n\n # Repeat the test for a Google idiosyncracy which likes to report the\n # minor version as eg \"11+\".\n response[\"minor\"] = \"11+\"\n m_get.return_value = (response, None)\n config, err = k8s.version(k8sconfig)\n assert config.version == \"1.11\"", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "async def manage_version():\n\n try:\n repo = git.Repo(search_parent_directories=True)\n version = repo.git.describe('--tags')\n except Exception:\n version = \"v0.0.0\"\n\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n creation_time = time.ctime(os.path.getmtime(base_dir))\n\n response = {'version': version, 'deployedOn': creation_time}\n return OK(response)", "def assertHttpOK(self, resp):\r\n return self.assertEqual(resp.status_code, 200)", "def test_root_response():\n request, response = app.test_client.get(\"/info.json\")\n assert response.status == 200", "async def test_command_status(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command/368630\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command-id.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.command_status(368630)\n\n assert response\n assert isinstance(response, models.CommandItem)", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_status_code(self):\n assert self.list_response.status_code == 200", "async def test_update_with_failed_get(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": \"text/xml\"},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == STATE_UNKNOWN\n assert \"REST xml result could not be parsed\" in caplog.text\n assert \"Empty reply\" in caplog.text", "def ping_response():\n\n return Response(\"ok\", status=200)", "async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)", "def test_connection():\n response = echo_client(\"GET webroot/sample.txt HTTP/1.1\")\n print response\n assert \"HTTP/1.1 200 OK\" in response", "def test_check_version(self, mock_server_status):\n # Configure the mock to return a response with an OK status code.\n mock_server_status.return_value.ok = True\n\n # mock the response from misc.server_status()\n mock_server_status.return_value = Mock(ok=True)\n mock_server_status.return_value.json.return_value = self.__server_status\n\n self.assertIsInstance(self.conn._Connection__check_version(), bool)\n assert_true(mock_server_status.called)\n self.assertEqual(self.conn._Connection__web_version, self.__web_version)\n self.assertEqual(self.conn._Connection__iserver_version,\n self.__iserver_version)", "def test_post(self): \n self.assertEqual(200, self.resp.status_code)", "async def test_api_core_state(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(\"/api/core/state\")\n assert resp.status == HTTPStatus.OK\n json = await resp.json()\n assert json[\"state\"] == \"RUNNING\"", "def test_version_is_active(self):\n\n url = '/%s/job-types/job-type-for-view-test/?is_active=false' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def test_run_success():\n httpretty.register_uri(httpretty.GET, URL, body='<html></html>', status=200)\n with mock.patch('httsleep.main.sleep') as mock_sleep:\n httsleep = HttSleep(URL, {'status_code': 200})\n resp = httsleep.run()\n assert resp.status_code == 200\n assert not mock_sleep.called" ]
[ "0.68522596", "0.6699962", "0.6616843", "0.66101503", "0.6607136", "0.6591218", "0.65614545", "0.6492159", "0.6463067", "0.6431189", "0.639559", "0.6369794", "0.62930536", "0.6274485", "0.6269045", "0.62181973", "0.6199127", "0.6190027", "0.6177709", "0.6155891", "0.6152194", "0.61470896", "0.61210364", "0.6003518", "0.59958774", "0.5993836", "0.59821296", "0.5979357", "0.5975538", "0.5972976", "0.5969859", "0.595831", "0.5955177", "0.5955177", "0.595486", "0.5941247", "0.5941051", "0.59188074", "0.5889079", "0.58492374", "0.58483267", "0.58414584", "0.5840579", "0.58358955", "0.58354527", "0.5826162", "0.5822606", "0.58160776", "0.58109343", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.5797283", "0.57873464", "0.57866204", "0.5785176", "0.5785176", "0.5785176", "0.5785176", "0.5785176", "0.578455", "0.5784088", "0.57799417", "0.57779163", "0.57694876", "0.57574415", "0.575423", "0.57540596", "0.5751211", "0.574401", "0.574401", "0.5742323", "0.5730685", "0.57239383", "0.57025665", "0.5702003", "0.56975347", "0.5695528", "0.5685531", "0.567747", "0.5675439", "0.5661109" ]
0.6028883
23
use arangosh to run a command on the frontend arangod
def execute_frontend(self, cmd, verbose=True): return self.arangosh.run_command(cmd, verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_command(self, args):\n pass", "def command():\n pass", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cmd_abor(args):", "def airflow_commands():\n pass", "def cli():\n pass", "def execute():", "def cli(ctx):", "def cli(ctx):", "def cli():\r\n pass", "def execute(self, args):", "def cli() -> None:", "def cli() -> None:", "def cli():\n return", "def cli():\n ...", "def execute(self, isd_as: ISD_AS, cmd: str, *args: str) -> str:\n pass", "def cli():\n\n pass", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def commands():", "def cmd_appe(args):", "def cli(self, env):\n raise NotImplementedError", "def command(arguments):\n os.system(\"barrnap --kingdom {} {} > {}\".format(arguments.kingdom, arguments.input, arguments.output))", "def cmd(self):", "def do_command(self, args):\n chk_arg_count(args, 0)\n sys.stdout.write('temare %s\\n' % (version.__version__, ))", "def cli(_):\n pass", "def cli(_):\n pass", "def cli(ctx):\n #TODO", "def main():\n frontend_query(PATH, USER)", "def octopus_run(self, msg, args):\r\n return self.templates.run(msg, args)", "def arun(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.detached = True\n connecter.run(user_cmd)", "def execute():\n pass", "def runInKatana(self, snappyPaths):\n # RUN SOME COMMAND IN KATANA\n return \"Done\"", "def _cli():\n pass", "def main():\n\tcli = Cli()\n\tcli.run()", "def octopus_predefined_run(self, msg, args):\r\n return self.predefined.run(msg, args)" ]
[ "0.6615906", "0.6522063", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.63606685", "0.62926817", "0.62446445", "0.6232357", "0.62033004", "0.62033004", "0.61711955", "0.6168619", "0.6130842", "0.6130842", "0.61223966", "0.6117348", "0.6108645", "0.607995", "0.60570997", "0.60570997", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.602978", "0.6002356", "0.5969504", "0.5944244", "0.5939088", "0.59197867", "0.5890861", "0.5881166", "0.5881166", "0.58724564", "0.5869408", "0.5862683", "0.58452004", "0.5825105", "0.58210325", "0.5811262", "0.58076894", "0.5785258" ]
0.79413956
0
get the port of the arangod which is coordinator etc.
def get_frontend_port(self): if self.frontend_port: return self.frontend_port return self.get_frontend().port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_port(self):\n return self.__port", "def get_port(self):\n return self.port", "def port(self) -> int:", "def get_port(self):\n return self.__port", "def port():", "def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"port\")", "def get_port(self):\n \n return self._port", "def get_port(self) -> int:\n return self._port", "def getPort(self):\n return self._port", "def get_port(self) -> int:\n return int(self.socket.getsockname()[1])", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def getPort(self):\n return self._port", "def Port(self) -> int:", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self):\n return self._host[CONF_PORT]", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"port\")", "def get_serverport(cobj):\n pass", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def _get_nport(self):\n return self.__nport", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return self.proto.port", "def port(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def remote_getPort(self):\r\n return int(self._fwdPort)", "def port(self) -> int:\n return self._port", "def external_port(self):\r\n return self._external_port", "def port(self):\r\n _, port = self.server_address\r\n return port", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self):\n\n return self._port", "def get_manager_rest_service_port():\n return int(os.environ[MANAGER_REST_PORT_KEY])", "def port(self):\n # This property is not 100% needed, but is included instead of making the raw variable public to prevent people from accidentally overwriting the port and screwing up this representative value\n return self._port", "def port1(self):\n return self._port1", "def internal_port(self):\r\n return self._internal_port", "def comm_port(self):\r\n return self._comm_port", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self):\n _, port = self.server_address\n return port", "def get_res_port():\n return get_port() + 1", "def getCurPort(self):\n cmd_string = '?6'\n data = self.sendRcv(cmd_string)\n with self._syringeErrorHandler():\n try:\n port = int(data)\n except ValueError:\n raise SyringeError(7, self.__class__.ERROR_DICT)\n self.state['port'] = port\n return port", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_get_port(self)", "def get_open_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((\"\", 0))\n o_port = sock.getsockname()[1]\n sock.close()\n return o_port", "def get_port(self, conf, dpid, port_id):\n\t\tpass", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVEN_PORT'))\n\t\texcept:\n\t\t\treturn 1883", "def port(self):\n\n return self.config.dict[\"fhdhr\"][\"port\"]", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._primary_port_prop)", "def port2(self):\n return self._port2", "def remoteport(self) :\n\t\ttry :\n\t\t\treturn self._remoteport\n\t\texcept Exception as e:\n\t\t\traise e", "def port_number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port_number\")", "def envPort(self):\r\n return self._envPort", "def receiver_port(self):\n return self._receiver_port", "def PortNumber(self):\n\t\treturn self._get_attribute('portNumber')", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_get_port(self)", "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def get_port(self):\n if self._port is None:\n self._port = Port(port_no=self._port_no,\n label=self._label,\n type=Port.PON_OLT,\n admin_state=AdminState.ENABLED,\n oper_status=OperStatus.ACTIVE)\n # TODO: For now, no way to report the proper ADMIN or OPER status\n # admin_state=self._admin_state,\n # oper_status=self._oper_status)\n return self._port", "def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def port(self):\n if self._state == JobState.RUNNING:\n return self._process.port\n return None", "def get_host_port(self) -> int:\n return self.config_dict.get(\"host_port\", 0)", "def container_port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"container_port\")", "def localport(self) :\n\t\ttry :\n\t\t\treturn self._localport\n\t\texcept Exception as e:\n\t\t\traise e", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def masterPort(self):\r\n return self._masterPort", "def get_my_port(self):\n if self.starter_port is not None:\n return self.starter_port\n\n where = -1\n tries = 10\n while where == -1 and tries:\n tries -= 1\n lfcontent = self.get_log_file()\n where = lfcontent.find(\"ArangoDB Starter listening on\")\n if where != -1:\n where = lfcontent.find(\":\", where)\n if where != -1:\n end = lfcontent.find(\" \", where)\n port = lfcontent[where + 1 : end]\n self.starter_port = port\n assert int(port), \"port cannot be converted to int!\"\n return port\n logging.info(\"retrying logfile\")\n time.sleep(1)\n message = \"could not get port form: \" + self.log_file\n logging.error(message)\n raise Exception(message)", "def internet_port(self) -> str:\n return pulumi.get(self, \"internet_port\")", "def http_port(self):\r\n return self._http_port", "def get_port_number():\n try:\n return os.environ[\"PORT\"]\n except Exception:\n return None", "def port(self):\n if self._server_thread is None:\n raise RuntimeError('Server not started.')\n return self._port" ]
[ "0.78841996", "0.78097165", "0.7728278", "0.7653651", "0.7631629", "0.7624739", "0.7596705", "0.750416", "0.7466592", "0.74258894", "0.74075156", "0.7370084", "0.7345154", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7284412", "0.7284412", "0.7284412", "0.7265361", "0.7265361", "0.7265361", "0.7265361", "0.7265361", "0.72579724", "0.72378796", "0.72378796", "0.7223743", "0.71973616", "0.71928716", "0.71539015", "0.7137715", "0.7137715", "0.7137715", "0.7137715", "0.7137715", "0.7137715", "0.7137715", "0.7136779", "0.7080483", "0.70733976", "0.70733976", "0.70733976", "0.70733976", "0.70733976", "0.70733976", "0.70733976", "0.70640296", "0.70564824", "0.70442766", "0.7041894", "0.70352685", "0.70352685", "0.70249873", "0.70232093", "0.69816643", "0.6970903", "0.6967671", "0.6960273", "0.69279915", "0.69279915", "0.69279915", "0.69279915", "0.6916263", "0.6883149", "0.6866938", "0.686203", "0.6850571", "0.6841521", "0.682182", "0.6817266", "0.67670614", "0.67636126", "0.67505276", "0.6732453", "0.67246807", "0.6713266", "0.6706812", "0.66990614", "0.6696921", "0.66964877", "0.66964877", "0.6688084", "0.6687468", "0.6687468", "0.6675628", "0.66568035", "0.66456544", "0.6644217", "0.66395944", "0.6639384", "0.6617132", "0.65838724", "0.6580743", "0.6569119", "0.65643644", "0.6548272" ]
0.6660332
89
find out my frontend port
def get_my_port(self): if self.starter_port is not None: return self.starter_port where = -1 tries = 10 while where == -1 and tries: tries -= 1 lfcontent = self.get_log_file() where = lfcontent.find("ArangoDB Starter listening on") if where != -1: where = lfcontent.find(":", where) if where != -1: end = lfcontent.find(" ", where) port = lfcontent[where + 1 : end] self.starter_port = port assert int(port), "port cannot be converted to int!" return port logging.info("retrying logfile") time.sleep(1) message = "could not get port form: " + self.log_file logging.error(message) raise Exception(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frontend_port(self):\n if self.frontend_port:\n return self.frontend_port\n return self.get_frontend().port", "def port():", "def port(self) -> int:", "def port(self):\r\n _, port = self.server_address\r\n return port", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def _browser_server_port() -> int:\n return int(get_option(\"server.port\"))", "def port(self):\n return 5000", "def nscaweb_port(self):\n return self.__get_option('nscaweb_port')", "def get_serverport(cobj):\n pass", "def _get_port(self):\n return self.__port", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def http_port(self):\r\n return self._http_port", "def port(self):\n return self._host[CONF_PORT]", "def port(self):\n _, port = self.server_address\n return port", "def get_port_number():\n try:\n return os.environ[\"PORT\"]\n except Exception:\n return None", "def port(self):\n\n return self.config.dict[\"fhdhr\"][\"port\"]", "def get_port(self):\n return self.port", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('FLASK_DRIVER_PORT'))\n\t\texcept:\n\t\t\treturn 5000", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def get_res_port():\n return get_port() + 1", "def getHost():", "def getHost():", "def Port(self) -> int:", "def port(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"port\")", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def get_manager_rest_service_port():\n return int(os.environ[MANAGER_REST_PORT_KEY])", "def api_endpoint_port():\n return 5000", "def intranet_port(self) -> str:\n return pulumi.get(self, \"intranet_port\")", "def adminPort(self):\n return 55000", "def getWebPort(self):\n port = None\n if hasattr(self, 'web'):\n port = self.web.get('port', None)\n\n if port is not None:\n # Make sure it is an int.\n return int(port)\n else:\n return None", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def get_tcp_port() -> int:\n Config.__get()\n assert Config.__config is not None\n return int(Config.__config.get(\"wsgi\", \"tcp_port\", fallback=\"8000\").strip())", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def bundle_http_port():\n return int(os.getenv('BUNDLE_HTTP_PORT', 9000))", "def internet_port(self) -> str:\n return pulumi.get(self, \"internet_port\")", "def port(self):\n if self._server_thread is None:\n raise RuntimeError('Server not started.')\n return self._port", "def get_port(self) -> int:\n return int(self.socket.getsockname()[1])", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def port(self):\n return f'ListenPort = {self._peer.port}'", "def port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def get_port(self):\n return self.__port", "def external_port(self):\r\n return self._external_port", "def port(self) -> int:\n return self.proto.port", "def server_port(self) -> Optional[int]:\n return pulumi.get(self, \"server_port\")", "def get_host_port(self) -> int:\n return self.config_dict.get(\"host_port\", 0)", "def server_address():\n return '127.0.0.1'", "def server_port(self):\n return self._server_port", "def server_port(self):\n return self._server_port", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def port_show(switch, port):\n print client.port.show(switch, port)", "def nscaweb_host(self):\n return self.__get_option('nscaweb_host')", "def _get_http_port(self):\n http_port = None\n try:\n http_port = self.query('pref', 'httpport', '?')\n if not http_port:\n _LOGGER.error(\n \"Unable to read data from server %s:%s\",\n self.host,\n self.port)\n return\n return http_port\n except ConnectionError as ex:\n _LOGGER.error(\n \"Failed to connect to server %s:%s - %s\",\n self.host,\n self.port,\n ex)\n return", "def get_server():\n pass", "def localhost(self):\n return self.__get_option('localhost')", "def port(self):\n # This property is not 100% needed, but is included instead of making the raw variable public to prevent people from accidentally overwriting the port and screwing up this representative value\n return self._port", "def _current_ip_port(is_secure: bool, host: str, url: str) -> str:\n\n protocol = 'https://' if is_secure else 'http://'\n web_url = protocol + host\n return web_url + url", "def getPort(self):\n return self._port", "def port(self) -> int:\n return self._port", "def get_port(self):\n \n return self._port", "def localport(self) :\n\t\ttry :\n\t\t\treturn self._localport\n\t\texcept Exception as e:\n\t\t\traise e", "def port(self):\n\n return self._port", "def get_port(self) -> int:\n return self._port", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def envPort(self):\r\n return self._envPort", "def ws_port(self):\r\n return self._ws_port", "def internal_port(self):\r\n return self._internal_port", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def getDbPort():\n\n if \"DB_PORT\" in controller.CONF.keys():\n return controller.CONF[\"DB_PORT\"]\n\n return basedefs.DB_PORT", "def _find_free_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('localhost', 0))\n _, port = sock.getsockname()\n sock.close()\n\n return port", "def cloud_port(self):\n return self._cloud_port" ]
[ "0.77191985", "0.76183486", "0.7364856", "0.7049408", "0.70435005", "0.7038148", "0.69965345", "0.6989152", "0.6982898", "0.68437135", "0.6827302", "0.6827302", "0.6827302", "0.6827302", "0.6827302", "0.68235147", "0.6803366", "0.67924094", "0.678412", "0.6752687", "0.672235", "0.67013574", "0.6690418", "0.6690418", "0.6690418", "0.6690418", "0.66873705", "0.66873705", "0.66873705", "0.66873705", "0.66873705", "0.66873705", "0.66873705", "0.6676597", "0.6676597", "0.6665873", "0.66626054", "0.66626054", "0.66344714", "0.66342694", "0.6631219", "0.6631219", "0.6631219", "0.6631219", "0.6631219", "0.6631219", "0.66216284", "0.66157067", "0.6564243", "0.65346247", "0.6518643", "0.6505886", "0.6505886", "0.6505886", "0.6505886", "0.6505886", "0.6505886", "0.6505886", "0.65046895", "0.6503741", "0.6503741", "0.65031856", "0.64682806", "0.64576197", "0.6444877", "0.6439364", "0.6428501", "0.64082", "0.6389328", "0.6389328", "0.6389328", "0.6385221", "0.63710487", "0.6362609", "0.63596725", "0.6350287", "0.6349304", "0.6335585", "0.6335585", "0.6317385", "0.63125336", "0.6308669", "0.63028413", "0.6289668", "0.6288539", "0.62806106", "0.6262651", "0.6255526", "0.62554616", "0.6235833", "0.62306136", "0.6228263", "0.6220294", "0.6200884", "0.6186478", "0.6168704", "0.61683553", "0.61572534", "0.6153677", "0.6153467", "0.61446947" ]
0.0
-1
get the port of a syncmaster arangosync
def get_sync_master_port(self): self.sync_master_port = None pos = None sm_port_text = "Starting syncmaster on port" sw_text = "syncworker up and running" worker_count = 0 logging.info("detecting sync master port") while worker_count < 3 and self.is_instance_running(): progress("%") lfs = self.get_log_file() npos = lfs.find(sw_text, pos) if npos >= 0: worker_count += 1 pos = npos + len(sw_text) else: time.sleep(1) lfs = self.get_log_file() pos = lfs.find(sm_port_text) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) if pos >= 0: pos = pos + len(sm_port_text) + 1 self.sync_master_port = int(lfs[pos : pos + 4]) return self.sync_master_port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def masterPort(self):\r\n return self._masterPort", "def port(self) -> int:", "def get_slave_port():\n return 9901", "def _get_port(self):\n return self.__port", "def port():", "def get_serverport(cobj):\n pass", "def Port(self) -> int:", "def get_res_port():\n return get_port() + 1", "def get_port(self):\n return self.port", "def get_cmd_port(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetCmdPort', self.handle)", "def _get_nport(self):\n return self.__nport", "def get_port(self):\n return self.__port", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def get_port(self) -> int:\n return int(self.socket.getsockname()[1])", "def get_port(self) -> int:\n return self._port", "def getCurPort(self):\n cmd_string = '?6'\n data = self.sendRcv(cmd_string)\n with self._syringeErrorHandler():\n try:\n port = int(data)\n except ValueError:\n raise SyringeError(7, self.__class__.ERROR_DICT)\n self.state['port'] = port\n return port", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVEN_PORT'))\n\t\texcept:\n\t\t\treturn 1883", "def get_port(self):\n \n return self._port", "def external_port(self):\r\n return self._external_port", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def getPort(self):\n return self._port", "def comm_port(self):\r\n return self._comm_port", "def get_manager_rest_service_port():\n return int(os.environ[MANAGER_REST_PORT_KEY])", "def port(self):\r\n _, port = self.server_address\r\n return port", "def remote_getPort(self):\r\n return int(self._fwdPort)", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVER_PORT'))\n\t\texcept:\n\t\t\treturn 1883", "def getPort(self):\n return self._port", "def mod_func(port):\n\n print(port)\n return port", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"port\")", "def port(self):\n return self._host[CONF_PORT]", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"port\")", "def remoteport(self) :\n\t\ttry :\n\t\t\treturn self._remoteport\n\t\texcept Exception as e:\n\t\t\traise e", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def port(self):\n return self._port", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def get_open_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((\"\", 0))\n o_port = sock.getsockname()[1]\n sock.close()\n return o_port", "def port(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return self.proto.port", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def receiver_port(self):\n return self._receiver_port", "def port1(self):\n return self._port1", "def adminPort(self):\n return 55000", "def cmd_port(args):", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self):\n _, port = self.server_address\n return port", "def localport(self) :\n\t\ttry :\n\t\t\treturn self._localport\n\t\texcept Exception as e:\n\t\t\traise e", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_get_port(self)", "def port(self):\n # This property is not 100% needed, but is included instead of making the raw variable public to prevent people from accidentally overwriting the port and screwing up this representative value\n return self._port", "def port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._primary_port_prop)", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def _grab_port(self):\r\n port = \"\"\r\n while self._char != -1 and self._char in \"0123456789\":\r\n port += self._char\r\n self._get_char()\r\n if len(port) == 0:\r\n self._error(\"port empty\")\r\n return int(port)", "def port(self):\n return f'ListenPort = {self._peer.port}'", "def get_vnc_port(self):\n\t\troot = self.get_xml()\n\t\t# get the VNC port\n\t\tgraphics = root.find('./devices/graphics')\n\t\tport = graphics.get('port')\n\t\treturn port", "def get_irc_port(self):\n if self.get_tls():\n return 6697\n else:\n return 6667", "def get_host_port(self) -> int:\n return self.config_dict.get(\"host_port\", 0)", "def get_safe_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((LOCALHOST, 0))\n port = sock.getsockname()[1]\n sock.close()\n return port", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return self._port", "def get_my_port(self):\n if self.starter_port is not None:\n return self.starter_port\n\n where = -1\n tries = 10\n while where == -1 and tries:\n tries -= 1\n lfcontent = self.get_log_file()\n where = lfcontent.find(\"ArangoDB Starter listening on\")\n if where != -1:\n where = lfcontent.find(\":\", where)\n if where != -1:\n end = lfcontent.find(\" \", where)\n port = lfcontent[where + 1 : end]\n self.starter_port = port\n assert int(port), \"port cannot be converted to int!\"\n return port\n logging.info(\"retrying logfile\")\n time.sleep(1)\n message = \"could not get port form: \" + self.log_file\n logging.error(message)\n raise Exception(message)", "def get_address(self):\n \n return tuple('localhost',self._port)", "def get_address(self):\n \n return tuple('localhost',self._port)", "def get_local_port(self):\n info = self._get_controller_info(self._get_controller_name())\n LOGGER.info('Local controller is at %s on %s', info[0], info[1])\n return int(info[1])", "def port(self):\n if self._server_thread is None:\n raise RuntimeError('Server not started.')\n return self._port", "def get_port(project_path):\n\n config = ConfigParser.ConfigParser()\n config_path = os.path.abspath(os.path.join(project_path, 'config.ini'))\n config.read(config_path)\n return config.get('SELENIUMSERVER', 'hub_port')", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_get_port(self)", "def get_platform_serial_port():\n return get_system_serial_port(platform.system())" ]
[ "0.70510185", "0.70510185", "0.70024353", "0.6852463", "0.68338513", "0.679988", "0.6775076", "0.6759334", "0.66923726", "0.6612166", "0.6540162", "0.6490325", "0.64330757", "0.6365352", "0.63405347", "0.6295224", "0.61943614", "0.6190745", "0.6190299", "0.6183827", "0.6175898", "0.6159054", "0.6159054", "0.6159054", "0.61576355", "0.61543393", "0.61479646", "0.6140313", "0.6073863", "0.60477436", "0.604585", "0.60399747", "0.60006946", "0.598306", "0.59812105", "0.59812105", "0.5979852", "0.5979144", "0.59697646", "0.59697646", "0.59675336", "0.5967527", "0.59480554", "0.59480554", "0.59480554", "0.59480554", "0.59480554", "0.59398043", "0.59398043", "0.59398043", "0.59398043", "0.59398043", "0.59398043", "0.59313756", "0.5918366", "0.5906375", "0.5896011", "0.58849734", "0.58734787", "0.5857285", "0.58533597", "0.58469814", "0.58443403", "0.58443403", "0.58443403", "0.58443403", "0.58443403", "0.58443403", "0.58443403", "0.5840443", "0.5798051", "0.5786391", "0.5779621", "0.57671833", "0.5761225", "0.5761225", "0.5761225", "0.5761225", "0.5761225", "0.5761225", "0.5761225", "0.57235533", "0.57179487", "0.5710954", "0.57073385", "0.56982726", "0.56874037", "0.5678615", "0.5678615", "0.5678615", "0.5678615", "0.56782955", "0.5675506", "0.567468", "0.567468", "0.56709576", "0.56675524", "0.5667365", "0.5654745", "0.56492746" ]
0.7340527
0
fetch the logfile of this starter
def get_log_file(self): return self.log_file.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def getLogs():", "def getLogs():", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_main_log(self) -> Any:\n return self.logger", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"", "def getLog():\n # assign a current working directory + '/logs' to log_dir variable (platform independent)\n log_dir = os.path.join(os.getcwd(), \"logs\")\n # or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"logs\")\n # or --> user directory: log_dir = os.path.join(os.path.expanduser(\"~\"), \"logs\")\n\n try:\n # if logs directory(!) doesn't exist, create it\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n # open log file with prefix and timestamp (platform independent) in Append mode\n log = open(os.path.join(log_dir, \"rfaRunner_\" + getCurTime(\"%Y%m%d_%H-%M\") + \".log\"), \"a\")\n return log\n except (OSError, IOError):\n # return -1 in case of exception\n return -1", "def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def getLog(self):\n pass", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def get_terraform_install_log_file(self):\n return self.terraform_install_log", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def get_log_path():\n return LOG_PATH", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def logs_directory(self):", "def get_action_logfile():\n return \"action\" + get_day() + \".log\"", "def test_get_source_log(self):\n pass", "def getLog(self):\n \n return self.resp[\"log\"]", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def get_console_log_filename(self):\n return", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log", "def get_full_log(self):\n return self._get_log('full')", "def getLog(self):\n return self.session.request('diag/log/')", "def _get_job_log(self, extended_slug=None, job_id=None):\n filename = get_filename(extended_slug)\n if filename:\n print('using {0}'.format(filename))\n file = codecs.open(filename, 'r', 'utf-8')\n log = Log.from_file(file)\n if not job_id:\n job = get_job(self._travis, extended_slug)\n print('set job_id={0}'.format(job.id))\n else:\n job = self._travis.job(job_id)\n assert job.log != ''\n save_job_log(job)\n log = job.log\n\n return log", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def get_fifteen_logfile():\n return \"fifteenStrategy\" + get_day() + \".log\"", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def getAndSetLogfile(self,device,outputFile = None):\n\n dirResults = \"/var/www/html/sqa/testResults/%s/\"%self.build\n httpPath = \"http://sysmaster/sqa/testResults/%s/\"%self.build\n if self.isCardSerialSet == 0:\n \n self.setCardSerialAndType(device)\n \n # add number to the end, so it wont complain\n if outputFile:\n outputFile = outputFile + \"-0\"\n \n if not outputFile:\n outputFile = \"%s-%s-%s-%s-%s-%s-0\"%(self.host.name,get_device_letter(device),self.cardType,self.cardSerial,self.host.cat_etc_issue(),self.testcaseStr)\n \n outputFile = dirResults + outputFile\n \n #outputFile = outputFile + \".html\"\n \n outputFile = incrementFileNameIfExists(outputFile,ext = \"html\")\n \n \n self.logFile = outputFile\n \n m = re.search(\".*/(\\S+)\",outputFile)\n outputFile = m.group(1)\n self.logFileHttpPath = httpPath + outputFile\n return self.logFile", "def logpath(self):\n return self.outpath", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def log(self):\r\n return self._log", "def _get_logger(self):", "def log (self):\n return self._log", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def task_log(self, task_id, subtask=None, workunit_id=None):\n\n if subtask:\n dir, logfile = task_log_path(task_id, subtask, workunit_id)\n else:\n dir, logfile = task_log_path(task_id)\n\n fp = open(logfile, 'r')\n log = fp.read()\n fp.close()\n return log", "def get_log():\n set_ctime()\n f = open(log_path, 'r')\n o = get_offset()\n f.seek(int(o))\n return f", "def logfile():\n\n class Logfile(object):\n def __init__(self, filename, *args, **kwargs):\n super(Logfile, self).__init__(*args, **kwargs)\n self.filename = filename\n self.logs = \"\"\n\n def read(self):\n with open(self.filename) as file:\n for line in file:\n self.logs += line\n return self.logs\n\n yield Logfile(filename=\"gen3tests.logs\")\n\n # cleanup after each use\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")", "def log(self):\n return self._log", "def log(self):\n return self._log", "def logfile(self, logfilename=None):\n if logfilename is None:\n loggingtimestring = time.asctime().replace(\" \", \"_\").replace(':', '-')\n logfilename = os.path.expanduser(\"~\" + os.sep + \"commslog_\" + type(self).__name__ + \"_\" + loggingtimestring + \".txt\")\n \n self._commslogfilename = logfilename\n print 'Logging : %r' % (self._OIFlogging)\n print 'Log File : %s' % (self._commslogfilename)\n return self._commslogfilename", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def get_hedge_logfile(dir):\n logfiles = Path(dir).glob('*')\n max_m_time = -1\n current_log = None\n for lf in logfiles:\n mtime = lf.stat().st_mtime\n if mtime > max_m_time:\n max_m_time = mtime\n current_log = lf\n if current_log:\n logging.debug(f\"Reading hedge logfile {current_log}.\")\n else:\n logging.warning(f\"No hedge log found. Ensure hedge logfile is being written and correct address is entered in settings.\")\n return current_log", "def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def get_logs():\n callback = bottle.request.query.get('callback')\n folder = os.path.dirname(os.path.abspath(__file__))\n test_run_title = bottle.request.query.test_run_id\n results = {'logs': {'monitor': '', 'testrun': ''}, 'host': bottle.request.headers.get('host')}\n try:\n with open(os.path.join(folder, 'monitor.log'), 'r+') as _f:\n results['logs'].update({'monitor': tools.get_last_logs(_f.readlines())})\n with open(os.path.join(folder, '%s-testrun.log' % test_run_title), 'r+') as _f:\n results['logs'].update({'testrun': tools.get_last_logs(_f.readlines())})\n except IOError as err:\n key = 'monitor' if 'monitor' in str(err) else 'testrun'\n results['logs'].update({key: 'Could not find logs: %s' % err})\n return '{0}({1})'.format(callback, [results])", "def read_linelog():", "def get_logger():\n global swan_logger\n return swan_logger", "def read_agent_logfile(self):\n server = self.get_agent()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")", "def updater_log_file(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\t\tresult = None\n\t\tjob = ''\n\t\tif self._current_job and 'job' in self._current_job:\n\t\t\tjob = self._current_job['job']\n\t\telse:\n\t\t\tjob = request.options.get('job','')\n\n\t\tcount = request.options.get('count',0)\n\t\tif count < 0:\n\t\t\tresult = 0\n\t\telse:\n\t\t\tresult = []\n\t\tif not job in INSTALLERS:\n\t\t\t# job empty: this is the first call I can't avoid\n\t\t\tif job != '':\n\t\t\t\tMODULE.warn(\" ?? Don't know a '%s' job\" % job)\n\t\telse:\n\t\t\tif not 'logfile' in INSTALLERS[job]:\n\t\t\t\tMODULE.warn(\" ?? Job '%s' has no associated log file\" % job)\n\t\t\telse:\n\t\t\t\tfname = INSTALLERS[job]['logfile']\n\t\t\t\tif count < 0:\n\t\t\t\t\tresult = self._logstamp(fname)\n\t\t\t\telse:\n\t\t\t\t\t# don't read complete file if we have an 'ignore' count\n\t\t\t\t\tif ('lines' in self._current_job) and (self._current_job['lines']):\n\t\t\t\t\t\tcount += int(self._current_job['lines'])\n\t\t\t\t\tresult = self._logview(fname, -count)\n\n\t\t# again debug, shortened\n\t\tif isinstance(result,int):\n\t\t\tMODULE.info(\" >> %d\" % result)\n\t\telse:\n\t\t\tMODULE.info(\" >> %d lines\" % len(result))\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id, result)", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def get_result_path(self):\n return logPath", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def get_logging_dir(self):\n return self.logging_dir", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def stdout_path(self):\n return self.log_path\n # return self.path / 'stdout.txt'", "def _get_log_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".log.txt\")", "def _logFile_default(self):\n print \"choosing default log file\"\n return os.path.join(self.rpiADCLogFolder,time.strftime(\"rpiADC-%Y-%m-%d.csv\", self.currentLocalTime))", "def log():\n return logging.getLogger(__name__)", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def trigger_logfile(self) -> str:\n return f'''trigger-id-{self.thread_trigger_id}.log'''", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def GetLogFileForTask(cls, task):\n rel_path = cls.TaskRelativeName(task)\n if not rel_path or not PipelineConfig.Instance().pipeline_log_dir(): return None\n # Flatten the path.\n rel_path = rel_path.replace(os.sep, '.')\n return os.path.join(PipelineConfig.Instance().pipeline_log_dir(), rel_path + '.log')", "def log(self) -> DagsterLogManager:\n return self._step_execution_context.log", "def logging(self):\r\n return None", "def build_log(self):\n if not self._build_log_text:\n self._build_log_text = self._cat('/tmp/log')\n return self._build_log_text", "def get_trial_dir() -> str:\n return logging.root._log_dir # type: ignore", "def retrieveLogs(self, execution, localLogDestination):\n # TODO: Implement this in order to get your logs out. The parent implementation will take care of cpu.log in case\n # profiling was requested. Example:\n #\n # execution.host.getFile( '{0}/log.log'.format( self.getExecutionLogDir( execution ) ),\n # os.path.join( localLogDestination, 'log.log' ), reuseConnection = execution.getRunnerConnection() )\n # client.retrieveLogs(self, execution, localLogDestination)\n #\n # The use of the execution.getRunnerConnection() connection prevents errors with multi-threading.\n #\n # This assumes you have no logs of your own:\n client.retrieveLogs(self, execution, localLogDestination)", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def return_user_log_from_frr(dut,log_file_name):\n return st.config(dut,\"docker exec -it bgp cat /var/log/frr/%s\"%log_file_name)", "def log_info(request, piece_id):\n\n asset = AssetMap.objects.get(piece=piece_id)\n url = '/'.join([FTP_URL, asset.folder, asset.name+'.log',])\n context = {\n 'keyform': KeySearchForm(auto_id=False),\n 'piece' : Piece.objects.get(pk=piece_id),\n 'logfile': requests.get(url),\n }\n return render(request, 'mutopia/piece_log.html', context)", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def setup_script_logging():\n #handlers = [logbook.NullHandler()]\n format_str = (\"[{record.time:%Y-%m-%dT%H:%MZ}] \"\n \"{record.level_name}: {record.message}\")\n\n #handler = logbook.StreamHandler(sys.stderr, format_string=format_str,\n # level=\"DEBUG\")\n #handler.push_thread()\n #return handler", "def GetLogFile (physical = False) :\n if sys.hal_log_values [\"__log_file\"] is None :\n if physical :\n path = GetPath ()\n if sys.hal_log_values [\"__log_file_name\"] is None :\n if os.path.exists (path) : sys.hal_log_values [\"__log_file_name\"] = os.path.join (path, sys.hal_log_values [\"__log_const\"])\n else : raise PQHException (\"unable to create a log file in folder \" + path)\n \n if not isinstance (sys.hal_log_values [\"__log_file_name\"], str) :\n sys.hal_log_values [\"__log_file\"] = sys.hal_log_values [\"__log_file_name\"]\n else :\n try : \n sys.hal_log_values [\"__log_file\"] = open (sys.hal_log_values [\"__log_file_name\"], \"w\", encoding=\"utf-8\")\n except Exception as e: \n raise OSError (\"unable to create file \" + sys.hal_log_values [\"__log_file_name\"] + \"\\n\" + str(e))\n else :\n sys.hal_log_values [\"__log_file\"] = LogFakeFileStream()\n \n return sys.hal_log_values [\"__log_file\"]", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def log(self) -> misc_.Logger:\n\t\treturn self._log", "def get_log(*args, **kwargs):\n return get_log_async(*args, **kwargs).get_result()", "def logger():\n global logger\n # create logger with 'syncsolr'\n logger = logging.getLogger('scheduleAddData')\n logger.setLevel(logging.DEBUG)\n\n # specifies the lowest severity that will be dispatched to the appropriate destination\n\n # create file handler which logs even debug messages\n fh = logging.FileHandler('scheduleUpdateData.log')\n # fh.setLevel(logging.WARN)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n # StreamHandler instances send messages to streams\n # ch.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(ch)\n logger.addHandler(fh)", "def logs(self, container: Container) -> str:", "def log_artifact(self, filename=''):\n return os.path.join(os.sep, 'opt', 'ml', 'model', filename)", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def open_logs():\n\treturn log, action_log, error_log", "def logfile(targetfile=\"ros.log\"):\n log = logging.getLogger(__name__)\n log.basicConfig(filename=str(targetfile))", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def read_db_logfile(self):\n server = self.get_dbserver()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")", "def pytest_logger_logsdir(self, config):", "def __str__(self):\n return f\"QChem log file {self.filename}\"", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")" ]
[ "0.75987124", "0.734955", "0.734955", "0.69272965", "0.6812943", "0.67958313", "0.6760511", "0.6727943", "0.67024195", "0.6673078", "0.6650076", "0.6613002", "0.66086835", "0.6594247", "0.6592443", "0.6579317", "0.6579317", "0.65463054", "0.6543485", "0.6533908", "0.6504604", "0.6501198", "0.6493251", "0.6486426", "0.64857507", "0.6479044", "0.6475161", "0.6468744", "0.6393952", "0.63902956", "0.637833", "0.6337188", "0.6323602", "0.6310433", "0.6305771", "0.6300044", "0.629522", "0.628856", "0.6288387", "0.62806237", "0.6266522", "0.6258074", "0.6244098", "0.62379545", "0.6235642", "0.62207013", "0.6203544", "0.6203148", "0.6185179", "0.61634254", "0.61634254", "0.6148936", "0.6145023", "0.6142844", "0.61189866", "0.61155105", "0.61063814", "0.61033773", "0.6101308", "0.6085341", "0.6085159", "0.6065389", "0.60544723", "0.6052843", "0.605249", "0.6019892", "0.60048896", "0.5997486", "0.59864515", "0.598414", "0.59805673", "0.5976397", "0.5967019", "0.596648", "0.59500283", "0.5941347", "0.59412885", "0.59382343", "0.5920863", "0.5911628", "0.5899768", "0.588862", "0.5888451", "0.5885503", "0.58735067", "0.58719987", "0.5867932", "0.58512306", "0.5848661", "0.58395576", "0.5831324", "0.583128", "0.58151656", "0.5814726", "0.580957", "0.5808505", "0.5802008", "0.58014566", "0.57989013", "0.57972145" ]
0.63419765
31
get the logfile of the dbserver instance
def read_db_logfile(self): server = self.get_dbserver() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def getLogs():", "def getLogs():", "def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"", "def logger():\n return TestListenerDB()", "def get_log_path():\n return LOG_PATH", "def getLog(self):\n return self.session.request('diag/log/')", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_log()->dict:\n return execute_command(\"SELECT log FROM log\").fetchall()[0][0]", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def set_database_log(config):\n global DATABASE_LOG\n DATABASE_LOG = config", "def log (self):\n return self._log", "def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %s'%local_conf_name)\n except:\n print 'Could not remove %s. It may not exist'%(local_conf_name)\n logger.warning('Could not remove %s. It may not exist'%(local_conf_name))\n # Get remote conf file or return None if it cannot be retrieved\n try:\n get(rsyslog_conf_name,local_conf_name)\n except:\n print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id)\n logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id)\n return None\n # Open conf file or return None if it cannot be opened\n try:\n log_conf_file = open(local_conf_name,'r')\n except:\n print 'Cannot open %s from instance %s'%(local_conf_name,instance_id)\n logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id)\n return None\n return log_conf_file", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log", "def get_main_log(self) -> Any:\n return self.logger", "def get_console_log_filename(self):\n return", "def log(self):\n return self._log", "def log(self):\n return self._log", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def log(self):\r\n return self._log", "def read_agent_logfile(self):\n server = self.get_agent()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")", "def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE", "def log(self):\n if self._log is None:\n self._log = Logger().get_logger(self.__class__.__name__)\n return self._log", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def getLog(self):\n \n return self.resp[\"log\"]", "def logger():\n global logger\n # create logger with 'syncsolr'\n logger = logging.getLogger('scheduleAddData')\n logger.setLevel(logging.DEBUG)\n\n # specifies the lowest severity that will be dispatched to the appropriate destination\n\n # create file handler which logs even debug messages\n fh = logging.FileHandler('scheduleUpdateData.log')\n # fh.setLevel(logging.WARN)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n # StreamHandler instances send messages to streams\n # ch.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(ch)\n logger.addHandler(fh)", "def log():\n return logging.getLogger(__name__)", "def logfile(self, logfilename=None):\n if logfilename is None:\n loggingtimestring = time.asctime().replace(\" \", \"_\").replace(':', '-')\n logfilename = os.path.expanduser(\"~\" + os.sep + \"commslog_\" + type(self).__name__ + \"_\" + loggingtimestring + \".txt\")\n \n self._commslogfilename = logfilename\n print 'Logging : %r' % (self._OIFlogging)\n print 'Log File : %s' % (self._commslogfilename)\n return self._commslogfilename", "def getlogger(self):\n return self.logger", "def get_error_log(self) -> Any:\n return self.err", "def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")", "def log(self) -> misc_.Logger:\n\t\treturn self._log", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def getLog():\n # assign a current working directory + '/logs' to log_dir variable (platform independent)\n log_dir = os.path.join(os.getcwd(), \"logs\")\n # or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"logs\")\n # or --> user directory: log_dir = os.path.join(os.path.expanduser(\"~\"), \"logs\")\n\n try:\n # if logs directory(!) doesn't exist, create it\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n # open log file with prefix and timestamp (platform independent) in Append mode\n log = open(os.path.join(log_dir, \"rfaRunner_\" + getCurTime(\"%Y%m%d_%H-%M\") + \".log\"), \"a\")\n return log\n except (OSError, IOError):\n # return -1 in case of exception\n return -1", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def getLog(self):\n pass", "def get_logging_dir(self):\n return self.logging_dir", "def get_server_logs(self, server_id):\n status, data, errors, messages = self._make_get_request(MCAPIRoutes.GET_LOGS, extra_params={'id': server_id})\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"", "def get_full_log(self):\n return self._get_log('full')", "def _get_logger(self):", "def log(self) -> DagsterLogManager:\n return self._step_execution_context.log", "def get_stderr(self):\n return self._get_log('stderr')", "def get_logger(context):\n Log.job_log = logging.getLogger(context)\n return Log.job_log", "def get_logger(self):\n return self.__logger", "def get_logger(self):\n return self.logger", "def get_logger(self):\n return self.logger", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def __logger(self):\n return self._ThreadedSocket__logger", "def logs(self, container: Container) -> str:", "def get_hedge_logfile(dir):\n logfiles = Path(dir).glob('*')\n max_m_time = -1\n current_log = None\n for lf in logfiles:\n mtime = lf.stat().st_mtime\n if mtime > max_m_time:\n max_m_time = mtime\n current_log = lf\n if current_log:\n logging.debug(f\"Reading hedge logfile {current_log}.\")\n else:\n logging.warning(f\"No hedge log found. Ensure hedge logfile is being written and correct address is entered in settings.\")\n return current_log", "def getLogger(self):\n \n logging.basicConfig(filename=self.__logfile, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n return logging.getLogger('loggerAdv')", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def get_log(self):\n return Gumtree.gumtree.getLog() + ';'", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def logger(self):\n return self.logging", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def saslog(self):\n return self._log", "def getLogSession(self):\n return self.session.request('diag/logSession/')", "def logpath(self):\n return self.outpath", "def get_logger(self, verbose):\n log_levels = [logging.INFO, logging.DEBUG]\n\n log = logging.getLogger()\n log.setLevel(log_levels[int(verbose)])\n \n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(log_levels[int(verbose)])\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n ch.setFormatter(formatter)\n log.addHandler(ch)\n\n return log", "def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def logger(self):\r\n return self._logger", "def logger(self):\n return self._pinnacle.logger", "def get_logger(verbose, debug):\n if debug:\n level = logging.DEBUG\n elif verbose:\n level = logging.INFO\n else:\n level = logging.WARNING\n\n # hahahahaha\n class UtcFormatter(logging.Formatter):\n converter = time.gmtime\n\n formatter = UtcFormatter(\"%(message)s\" if not verbose else \"%(asctime)s [%(levelname)-5s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S%z\")\n\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n\n logger = logging.Logger('s3cfdeploy')\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def get_log():\n set_ctime()\n f = open(log_path, 'r')\n o = get_offset()\n f.seek(int(o))\n return f", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def logdir(self) -> str:\n return self._logdir", "def logger(self):\n return self._logger", "def logger(self):\n return self._logger", "def getLogger(self):\n return self.logger", "def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')", "def get_connection_logging(self, loadbalancer):\n return loadbalancer.connection_logging", "def logs(self):\n return self.logger.logs()", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log", "def get_fifteen_logfile():\n return \"fifteenStrategy\" + get_day() + \".log\"", "def services_log(slave_id):\n handler = None\n for kwargs in (dict(socktype=socket.SOCK_RAW), dict(socktype=socket.SOCK_STREAM), dict()):\n try:\n handler = logging.handlers.SysLogHandler(\n facility=logging.handlers.SysLogHandler.LOG_LOCAL7, address='/dev/log', **kwargs)\n break\n except (IOError, TypeError):\n pass\n logger = logging.getLogger('[{slave_id}] {name}'.format(name=__name__, slave_id=slave_id))\n logger.setLevel(logging.DEBUG)\n if handler:\n logger.propagate = 0\n logger.addHandler(handler)\n return logger", "def get_logs(self):\n return self.network.get_logs()", "def GetLogFile (physical = False) :\n if sys.hal_log_values [\"__log_file\"] is None :\n if physical :\n path = GetPath ()\n if sys.hal_log_values [\"__log_file_name\"] is None :\n if os.path.exists (path) : sys.hal_log_values [\"__log_file_name\"] = os.path.join (path, sys.hal_log_values [\"__log_const\"])\n else : raise PQHException (\"unable to create a log file in folder \" + path)\n \n if not isinstance (sys.hal_log_values [\"__log_file_name\"], str) :\n sys.hal_log_values [\"__log_file\"] = sys.hal_log_values [\"__log_file_name\"]\n else :\n try : \n sys.hal_log_values [\"__log_file\"] = open (sys.hal_log_values [\"__log_file_name\"], \"w\", encoding=\"utf-8\")\n except Exception as e: \n raise OSError (\"unable to create file \" + sys.hal_log_values [\"__log_file_name\"] + \"\\n\" + str(e))\n else :\n sys.hal_log_values [\"__log_file\"] = LogFakeFileStream()\n \n return sys.hal_log_values [\"__log_file\"]", "def get_log(*args, **kwargs):\n return get_log_async(*args, **kwargs).get_result()", "def get_action_logfile():\n return \"action\" + get_day() + \".log\"", "def get_logger():\n return logging.getLogger(__name__)", "def _get_log_filename(self):\n fnd = self._get_session_dir()\n fn = os.path.join(fnd, '%s.log' % self.timestamp.time_string())\n\n if not os.path.exists(fn):\n with open(fn, 'wt') as log_file:\n log_file.write('Log Created %s by ' % str(datetime.now()))\n log_file.write('%s V%s\\n' % (__PROGRAM_NAME__, __VERSION__))\n\n return fn", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def logs_directory(self):", "def getLogger():\n return GlobalLogger.logger", "def get_browser_console_log(self):\n try:\n log = self.__driver.get_log('browser')\n print('log')\n return log\n except Exception as e:\n print(\"Exception when reading Browser Console log\")\n print(str(e))", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs" ]
[ "0.7550862", "0.7224536", "0.6880102", "0.6592017", "0.6583927", "0.6482713", "0.6482713", "0.6469686", "0.6463902", "0.63621795", "0.63040406", "0.6289661", "0.6283953", "0.6207564", "0.6207564", "0.6109618", "0.6103252", "0.6098555", "0.6073537", "0.60647166", "0.6054933", "0.6024553", "0.6009368", "0.59929013", "0.5992791", "0.5992791", "0.5959552", "0.59518474", "0.59449756", "0.5941459", "0.59327024", "0.5927101", "0.5907797", "0.5869269", "0.58406466", "0.5840638", "0.5835412", "0.583466", "0.58295864", "0.58263063", "0.58218336", "0.58138", "0.58092016", "0.58060664", "0.5803791", "0.57764924", "0.57448417", "0.57387", "0.5720041", "0.5700986", "0.5697554", "0.56956667", "0.5678812", "0.5658134", "0.5651725", "0.5651725", "0.56308347", "0.561557", "0.56129086", "0.5592959", "0.5589501", "0.5580845", "0.5577377", "0.55551094", "0.55506563", "0.5529727", "0.55231875", "0.5515042", "0.5514616", "0.5510034", "0.55093056", "0.5500578", "0.54988587", "0.5491923", "0.5490946", "0.5484199", "0.5474955", "0.5468409", "0.54672545", "0.54672545", "0.54663956", "0.54519427", "0.54491895", "0.5441408", "0.54365367", "0.54364854", "0.54315174", "0.54314196", "0.5428373", "0.54203576", "0.5404618", "0.53886425", "0.5381187", "0.5376523", "0.5374713", "0.5373962", "0.5363183", "0.5353621", "0.53502876", "0.53502876" ]
0.7439977
1
get the agent logfile of this instance
def read_agent_logfile(self): server = self.get_agent() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def log(self) -> DagsterLogManager:\n return self._step_execution_context.log", "def log (self):\n return self._log", "def getLog(self):\n return self.session.request('diag/log/')", "def log(self):\n return self._log", "def log(self):\n return self._log", "def getLog(self):\n \n return self.resp[\"log\"]", "def get_main_log(self) -> Any:\n return self.logger", "def log(self, namespace, agent, ltype: int = 0):\n if isinstance(agent, Agent):\n agent = agent.uid\n\n with self.lock:\n with self.zip.open(f'{namespace}/logs/{agent}_{ltype}.txt', 'r') as log:\n return log.read().decode('utf-8')", "def _create_agent_log():\n log_file = SETTINGS['agent.log_file']\n if not log_file.endswith('.rollbar'):\n log.error(\"Provided agent log file does not end with .rollbar, which it must. \"\n \"Using default instead.\")\n log_file = DEFAULTS['agent.log_file']\n\n retval = logging.getLogger('rollbar_agent')\n handler = logging.FileHandler(log_file, 'a', 'utf-8')\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n retval.addHandler(handler)\n retval.setLevel(logging.WARNING)\n return retval", "def getlogger(self):\n return self.logger", "def log(self):\n if self._log is None:\n self._log = Logger().get_logger(self.__class__.__name__)\n return self._log", "def getLog(self):\n pass", "def log(self) -> misc_.Logger:\n\t\treturn self._log", "def getLogger(self):\n \n logging.basicConfig(filename=self.__logfile, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n return logging.getLogger('loggerAdv')", "def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log", "def log(self):\r\n return self._log", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def get_logger(self):\n return self.logger", "def get_logger(self):\n return self.logger", "def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")", "def getLogs():", "def getLogs():", "def get_logger(self):\n return self.__logger", "def logger(self):\n return self.logging", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def get_log_path():\n return LOG_PATH", "def getLog():\n # assign a current working directory + '/logs' to log_dir variable (platform independent)\n log_dir = os.path.join(os.getcwd(), \"logs\")\n # or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"logs\")\n # or --> user directory: log_dir = os.path.join(os.path.expanduser(\"~\"), \"logs\")\n\n try:\n # if logs directory(!) doesn't exist, create it\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n # open log file with prefix and timestamp (platform independent) in Append mode\n log = open(os.path.join(log_dir, \"rfaRunner_\" + getCurTime(\"%Y%m%d_%H-%M\") + \".log\"), \"a\")\n return log\n except (OSError, IOError):\n # return -1 in case of exception\n return -1", "def logger(self):\r\n return self._logger", "def logger(self):\n return self._logger", "def logger(self):\n return self._logger", "def logpath(self):\n return self.outpath", "def get_logging_dir(self):\n return self.logging_dir", "def get_log(self):\n return Gumtree.gumtree.getLog() + ';'", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def logger(self):\n return self._pinnacle.logger", "def getLogger(self):\n return self.logger", "def trigger_logfile(self) -> str:\n return f'''trigger-id-{self.thread_trigger_id}.log'''", "def logdir(self) -> str:\n return self._logdir", "def logfile(self, logfilename=None):\n if logfilename is None:\n loggingtimestring = time.asctime().replace(\" \", \"_\").replace(':', '-')\n logfilename = os.path.expanduser(\"~\" + os.sep + \"commslog_\" + type(self).__name__ + \"_\" + loggingtimestring + \".txt\")\n \n self._commslogfilename = logfilename\n print 'Logging : %r' % (self._OIFlogging)\n print 'Log File : %s' % (self._commslogfilename)\n return self._commslogfilename", "def read_db_logfile(self):\n server = self.get_dbserver()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def logger(self):\n return logging", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def get_logger(context):\n Log.job_log = logging.getLogger(context)\n return Log.job_log", "def _get_logger(self):", "def logger(self):\n return logging.getLogger(self.logger_name)", "def logs(self):\n return self.logger.logs()", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def logger(self):\n if not self._state[\"logger\"]:\n self.prepare()\n return self._state[\"logger\"]", "def logfile(targetfile=\"ros.log\"):\n log = logging.getLogger(__name__)\n log.basicConfig(filename=str(targetfile))", "def saslog(self):\n return self._log", "def get_hedge_logfile(dir):\n logfiles = Path(dir).glob('*')\n max_m_time = -1\n current_log = None\n for lf in logfiles:\n mtime = lf.stat().st_mtime\n if mtime > max_m_time:\n max_m_time = mtime\n current_log = lf\n if current_log:\n logging.debug(f\"Reading hedge logfile {current_log}.\")\n else:\n logging.warning(f\"No hedge log found. Ensure hedge logfile is being written and correct address is entered in settings.\")\n return current_log", "def get_logger(self, model):\n return idaeslog.getInitLogger(model.name, self.get_output_level())", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def log(self) -> Optional[Logger]:\n return self._log", "def get_action_logfile():\n return \"action\" + get_day() + \".log\"", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def get_full_log(self):\n return self._get_log('full')", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def get_error_log(self) -> Any:\n return self.err", "def get_log(self):\n return self._get_property(core.SVN_PROP_REVISION_LOG)", "def getLogSession(self):\n return self.session.request('diag/logSession/')", "def log():\n return logging.getLogger(__name__)", "def log(self):\n return self.cpp_analyzer.user_log;", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def mod_log(self) -> ModLog:\n return self.bot.get_cog(\"ModLog\")", "def get_console_log_filename(self):\n return", "def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"", "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def log(self):\n if not self._logger:\n self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)\n return self._logger", "def get_logs(self):\n return self.network.get_logs()", "def logs(self) -> Sequence['outputs.GetElasticsearchLogResult']:\n return pulumi.get(self, \"logs\")", "def _get_job_log(self, extended_slug=None, job_id=None):\n filename = get_filename(extended_slug)\n if filename:\n print('using {0}'.format(filename))\n file = codecs.open(filename, 'r', 'utf-8')\n log = Log.from_file(file)\n if not job_id:\n job = get_job(self._travis, extended_slug)\n print('set job_id={0}'.format(job.id))\n else:\n job = self._travis.job(job_id)\n assert job.log != ''\n save_job_log(job)\n log = job.log\n\n return log", "def info_log(self):\n return self._info_log", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def getLogger():\n return GlobalLogger.logger", "def __logger(self):\n return self._ThreadedSocket__logger", "def get_stderr(self):\n return self._get_log('stderr')", "def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')", "def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)", "def logger(self):\n pass", "def get_log(*args, **kwargs):\n return get_log_async(*args, **kwargs).get_result()", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def logging(self):\n conf = self.get(\"logging\")\n level = conf[\"level\"]\n if os.environ.get(\"DEBUG_FG21SIM\"):\n print(\"DEBUG: Force 'DEBUG' logging level\", file=sys.stderr)\n level = \"DEBUG\"\n # logging handlers\n handlers = []\n stream = conf[\"stream\"]\n if stream:\n handlers.append(StreamHandler(getattr(sys, stream)))\n logfile = conf[\"filename\"]\n filemode = conf[\"filemode\"]\n if logfile:\n handlers.append(FileHandler(logfile, mode=filemode))\n #\n logconf = {\n \"level\": getattr(logging, level),\n \"format\": conf[\"format\"],\n \"datefmt\": conf[\"datefmt\"],\n \"filemode\": filemode,\n \"handlers\": handlers,\n }\n return logconf", "def get_terraform_install_log_file(self):\n return self.terraform_install_log" ]
[ "0.79751503", "0.73155695", "0.72758114", "0.72477007", "0.72477007", "0.70513636", "0.6913755", "0.68768865", "0.68585616", "0.6792453", "0.6792453", "0.6751899", "0.6743848", "0.6700573", "0.66865516", "0.66694623", "0.66520137", "0.66206974", "0.66192824", "0.66146636", "0.6584084", "0.65412825", "0.65354013", "0.6520273", "0.6520273", "0.65137196", "0.65035045", "0.65035045", "0.64954853", "0.6485503", "0.6471909", "0.64441806", "0.64418006", "0.6434292", "0.6405168", "0.6405168", "0.6394812", "0.63886803", "0.63711524", "0.63413066", "0.6316121", "0.6207158", "0.6192957", "0.615575", "0.61500585", "0.61496615", "0.6147825", "0.6146546", "0.61063606", "0.6105824", "0.6090401", "0.6087695", "0.60415936", "0.6035407", "0.60296285", "0.6024509", "0.6018089", "0.60024506", "0.6001411", "0.5971794", "0.5953523", "0.5949536", "0.59453726", "0.5939423", "0.59254175", "0.59240913", "0.5920164", "0.59199184", "0.59199184", "0.59137195", "0.5909056", "0.5902161", "0.590204", "0.59014374", "0.59007114", "0.58916074", "0.58691144", "0.58604956", "0.58603483", "0.5858303", "0.58555126", "0.5844357", "0.58389986", "0.58331686", "0.5826293", "0.5804833", "0.5784683", "0.57749665", "0.5752103", "0.57506466", "0.5749342", "0.5747457", "0.57151073", "0.57075083", "0.56953615", "0.56750596", "0.5643062", "0.5636801", "0.56340873", "0.56327516" ]
0.7742073
1
see which arangods where spawned and inspect their logfiles
def detect_instances(self): lh.subsection("Instance Detection for {0.name}".format(self)) jwt = self.get_jwt_header() self.all_instances = [] logging.debug("waiting for frontend") logfiles = set() # logfiles that can be used for debugging # the more instances we expect to spawn the more patient: tries = 10 * self.expect_instance_count # Wait for forntend to become alive. all_instances_up = False while not all_instances_up and tries: self.all_instances = [] detected_instances = [] sys.stdout.write(".") sys.stdout.flush() for root, dirs, files in os.walk(self.basedir): for onefile in files: # logging.debug("f: " + root + os.path.sep + onefile) if onefile.endswith("log"): logfiles.add(str(Path(root) / onefile)) for name in dirs: # logging.debug("d: " + root + os.path.sep + name) match = None instance_class = None if name.startswith("sync"): match = re.match(r"(syncmaster|syncworker)(\d*)", name) instance_class = SyncInstance else: match = re.match( r"(agent|coordinator|dbserver|resilientsingle|single)(\d*)", name, ) instance_class = ArangodInstance # directory = self.basedir / name if match and len(match.group(2)) > 0: # we may see a `local-slave-*` directory inbetween, # hence we need to choose the current directory not # the starter toplevel dir for this: instance = instance_class( match.group(1), match.group(2), self.cfg.localhost, self.cfg.publicip, Path(root) / name, self.passvoid, self.cfg.ssl, self.cfg.version, self.enterprise, jwt=jwt, ) instance.wait_for_logfile(tries) instance.detect_pid( ppid=self.instance.pid, full_binary_path=self.cfg.real_sbin_dir, offset=0, ) detected_instances.append(instance.instance_type) self.all_instances.append(instance) print(self.expect_instances) detected_instances.sort() print(detected_instances) attach(str(self.expect_instances), "Expected instances") attach(str(detected_instances), "Detected instances") if (self.expect_instances != detected_instances) or (not self.get_frontends()): tries -= 1 time.sleep(5) else: all_instances_up = True if not self.get_frontends(): print() logging.error("STARTER FAILED TO SPAWN ARANGOD") self.show_all_instances() logging.error("can not continue without frontend instance") logging.error("please check logs in" + str(self.basedir)) for logf in logfiles: logging.debug(logf) message = "if that does not help try to delete: " + str(self.basedir) logging.error(message) raise Exception(message) self.show_all_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLogs():", "def getLogs():", "def getArchLogs(self):\n\n # Implement checkFiles() for archs?\n\n # Pull log file\n if self.nbDetails['proc']['archLog'] is not None:\n result = self.c.get(self.nbDetails['proc']['archLog'])\n print(f\"Pulled archive creation log {result.remote} to {result.local}\")\n else:\n print(f\"Archives not yet written.\")", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def open_logs():\n\treturn log, action_log, error_log", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))", "def logs_directory(self):", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def log_diagnostics(self, paths):\n\t\tpass", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def parse_orthologs(self):\n counted_orthologs = {}\n for file in glob(f'{self.ortholog_folder}/*.fas'):\n for record in SeqIO.parse(file, 'fasta'):\n if record.description not in counted_orthologs:\n counted_orthologs[record.description] = 1\n else:\n counted_orthologs[record.description] += 1\n return counted_orthologs", "def test_log_file_exists(shutdown_only):\n ray.init(num_cpus=1)\n session_dir = ray._private.worker.global_worker.node.address_info[\"session_dir\"]\n session_path = Path(session_dir)\n log_dir_path = session_path / \"logs\"\n\n log_rotating_component = [\n (ray_constants.PROCESS_TYPE_DASHBOARD, [\".log\", \".err\"]),\n (ray_constants.PROCESS_TYPE_DASHBOARD_AGENT, [\".log\"]),\n (ray_constants.PROCESS_TYPE_RUNTIME_ENV_AGENT, [\".log\", \".out\", \".err\"]),\n (ray_constants.PROCESS_TYPE_LOG_MONITOR, [\".log\", \".err\"]),\n (ray_constants.PROCESS_TYPE_MONITOR, [\".log\", \".out\", \".err\"]),\n (ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER, [\".log\"]),\n (ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER, [\".log\"]),\n # Below components are not log rotating now.\n (ray_constants.PROCESS_TYPE_RAYLET, [\".out\", \".err\"]),\n (ray_constants.PROCESS_TYPE_GCS_SERVER, [\".out\", \".err\"]),\n (ray_constants.PROCESS_TYPE_WORKER, [\".out\", \".err\"]),\n ]\n\n # Run the basic workload.\n @ray.remote\n def f():\n for i in range(10):\n print(f\"test {i}\")\n\n # Create a runtime env to make sure dashboard agent is alive.\n ray.get(f.options(runtime_env={\"env_vars\": {\"A\": \"a\", \"B\": \"b\"}}).remote())\n\n paths = list(log_dir_path.iterdir())\n\n def component_and_suffix_exists(component, paths):\n component, suffixes = component\n for path in paths:\n filename = path.stem\n suffix = path.suffix\n if component in filename:\n # core-worker log also contains \"worker keyword\". We ignore this case.\n if (\n component == ray_constants.PROCESS_TYPE_WORKER\n and ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER in filename\n ):\n continue\n if suffix in suffixes:\n return True\n else:\n # unexpected suffix.\n return False\n\n return False\n\n for component in log_rotating_component:\n assert component_and_suffix_exists(component, paths), (component, paths)", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)", "def StartLookingForEvents(self):\n for process_logger in self.process_loggers:\n process_logger.looking = True", "def main():\n args = parse_args()\n logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)\n\n max_age = datetime.timedelta(minutes=args.critical_age_minutes)\n now = args.fake_time or datetime.datetime.now(datetime.timezone.utc)\n oldest_acceptable_time = now - max_age\n logging.info('Current time is %s%s.',\n 'overridden for testing to ' if args.fake_time else '',\n now.isoformat())\n logging.info('Looking for log filenames dated %s or newer.',\n oldest_acceptable_time.isoformat())\n\n bucket = get_s3_bucket(args.env)\n s3_objs = objs_with_prefix(bucket, args.log_type, oldest_acceptable_time)\n if now.day != oldest_acceptable_time.day:\n s3_objs += objs_with_prefix(bucket, args.log_type, now)\n\n status = status_for_icinga(s3_objs, oldest_acceptable_time)\n print(status.name)\n sys.exit(status)", "def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))", "def log_paths(self): # pylint:disable=function-redefined\n return self._log_paths", "def showAllLogs():\n\t#Add sections to log screen\n\tallLogs=findFiles(getWorkingDirectory(),\".log\")\n\tcounter=-1\n\tfor l in allLogs:\n\t\tcounter+=1\n\t\tbase=getRootName(l)\n\t\tif base in logDict:\n\t\t\tbase=logDict[base]\n\t\t#Add to selection bar\n\t\tlogSelectionBar.addTab(base,command=lambda n=l: displayLog(n))\n\t\t#Store\n\t\tloadedLogs[counter]=l", "def main(args):\n data_directory = args.data_directory\n\n # create a list of dataframes for each of the 3 driving logs by reading their .csv files\n logs = ['Center', 'Left', 'Right']\n dfs = [pd.read_csv(os.path.join(data_directory, '{}_driving_log.csv'.format(log))) for log in logs]\n\n # create a dictionary containing the number of rows in each driving log file\n row_counts = {}\n for i, df in enumerate(dfs):\n row_counts[logs[i]] = df.shape[0]\n\n # print the number of rows in each driving log file\n print('Log files', logs)\n print('Row counts', row_counts)\n\n # get the minimum number of rows from the driving logs\n min_rows = row_counts[min(row_counts, key=lambda x: row_counts.get(x))]\n print('Minimum rows', min_rows)\n\n # create the dataframe to hold the entire driving log data\n combined_df = pd.DataFrame(columns=['Center', 'Left', 'Right', 'Angle'])\n\n # use the minimum rows as the iteration index ensuring each driving log has the particular row\n for i in range(min_rows):\n\n # grab the angles from each of the driving logs at the particular row\n angles = [df.iloc[i]['Angle'] for df in dfs]\n\n # calculate the average angle between the driving logs for that row\n average_angle = round(sum(angles) / 3, 3)\n\n # add a row to the concatenated driving log\n # each row contains the path to the centre, left and right images\n # as well as the averaged steering angle\n combined_df.loc[i] = [\n dfs[0].iloc[i]['Center'],\n dfs[1].iloc[i]['Left'],\n dfs[2].iloc[i]['Right'],\n average_angle\n ]\n\n # write the final driving log to a .csv file\n combined_df.to_csv(os.path.join(data_directory, 'driving_log.csv'), index=False)", "def getLog(self):\n pass", "def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename", "def FindStuffToDo(self):\n\n while self.topdir.endswith('/'):\n self.topdir = self.topdir[:-1]\n if hasattr(self, 'LogProcess'):\n self.LogProcess()\n\n# Look for data to process.\n self.WalkPath(self.topdir)\n if os.path.islink('%s/anatomicals' % self.topdir):\n# os.walk won't follow links, so do this one manually.\n if not os.path.exists('%s/dicoms' % self.topdir):\n# Don't do a duplicate search.\n pathname = os.readlink('%s/anatomicals' % self.topdir)\n self.WalkPath(pathname)\n\n# Pair-up fieldmaps with EPI's\n self._SetFmapInfo()\n\n# Pair fieldmaps with strucural images.\n self._SetAnatTgts()\n\n# Assocate a ref.dat file with each EPI.\n self._GetRefdat()\n\n self._MakeEpiScratchDir()\n\n# Order the EPIs so the names are correct.\n self._GetEpiOrder()\n\n# Associate each EPI with an anatomical, determine if it was\n# acquired before or after the epi\n self._SetBaseEpi()\n\n self.motcor_summary = self.SummarizeMotionTargets()\n f = open('%s/motion_corr.txt' % self.logdir, 'w')\n f.write(self.motcor_summary)\n f.close()\n if self.verbose:\n print self.motcor_summary", "def log_manager(self, source):\n if self.fail_count[source]:\n if not (self.dname.split('.')[-1] in self.ofr_list):\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.append(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + self.error_code\n self.sys_chans['fail'].setValue(1)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n\n if self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_err', self.ps_error, self.ofr_list, self.fail_count)\n elif self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_still_out', self.ps_error, self.ofr_list, self.fail_count)\n s = 0\n for k, v in self.fail_count.items():\n s += v\n if not s:\n if self.dname.split('.')[-1] in self.ofr_list:\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.delete(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + 'PS IS RUNNING'\n self.sys_chans['fail'].setValue(0)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n else:\n log = ''\n for k, v in self.fail_count.items():\n if v:\n log = log + k + '|'\n log = log[:-1]\n # self.sys_chans['errcode'].setValue(json.dumps(log))", "def get_logs(self, name):\n logs = self.get_status()\n\n for pod in self.list_pods(namespace=self.project):\n if name in pod.name: # get just logs from pods related to app\n pod_logs = pod.get_logs()\n if pod_logs:\n logs += pod_logs\n\n return logs", "def print_experiments(self):\n for env, agents in self.experiment_structure.items():\n for agent, _ in agents.items():\n self.logger.info('Environment: {}\\tAgent: {}'.format(env, agent))", "def test_call_logs_run(self):\r\n fd, log_path = mkstemp(\r\n prefix='RtaxTaxonAssignerTests_', suffix='.fasta')\r\n close(fd)\r\n self._paths_to_clean_up.append(log_path)\r\n\r\n p = RtaxTaxonAssigner({\r\n 'reference_sequences_fp': self.reference_seqs_fp,\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_fp,\r\n 'read_1_seqs_fp': self.read_1_seqs_fp})\r\n\r\n self._paths_to_clean_up += self.cleanAll(self.read_1_seqs_fp)\r\n\r\n actual = p(self.input_seqs_fp, log_path=log_path)\r\n\r\n log_file = open(log_path)\r\n log_file_str = log_file.read()\r\n log_file.close()\r\n # stderr.write(log_file_str)\r\n log_file_exp = [\r\n \"RtaxTaxonAssigner parameters:\",\r\n \"Application:RTAX classifier\",\r\n \"Citation:Soergel D.A.W., Dey N., Knight R., and Brenner S.E. 2012. Selection of primers for optimal taxonomic classification of environmental 16S rRNA gene sequences. ISME J (6), 1440-1444\",\r\n \"amplicon_id_regex:(\\S+)\\s+(\\S+?)\\/\",\r\n \"header_id_regex:\\S+\\s+(\\S+?)\\/\",\r\n \"id_to_taxonomy_fp:%s\" % self.id_to_taxonomy_fp,\r\n \"read_1_seqs_fp:%s\" % self.read_1_seqs_fp,\r\n \"read_2_seqs_fp:None\",\r\n \"read_id_regex:\\S+\\s+(\\S+)\",\r\n \"reference_sequences_fp:%s\" % self.reference_seqs_fp,\r\n \"single_ok:False\",\r\n \"no_single_ok_generic:False\"\r\n ]\r\n # compare data in log file to fake expected log file\r\n # NOTE: Since p.params is a dict, the order of lines is not\r\n # guaranteed, so testing is performed to make sure that\r\n # the equal unordered lists of lines is present in actual and expected\r\n self.assertItemsEqual(log_file_str.split('\\n')[0:12], log_file_exp)", "def print_logs(self, shell=False):\n for l, v in self.logs(shell).items():\n print('\\n### Container ', l, ', id ', v.get('id', 'None'), ' ###\\n')\n for part in ['stdout', 'stderr']:\n print('##', part, '##')\n print(v[part])", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages", "def _consolidate_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n with open(fn) as f:\n logger.info(\"Log from thread {0}:\\n{1}\".format(i, f.read()))\n open(fn, \"w\").write(\"\")", "def _log_file_processing_stats(self, known_file_paths):\n # File Path: Path to the file containing the DAG definition\n # PID: PID associated with the process that's processing the file. May\n # be empty.\n # Runtime: If the process is currently running, how long it's been\n # running for in seconds.\n # Last Runtime: If the process ran before, how long did it take to\n # finish in seconds\n # Last Run: When the file finished processing in the previous run.\n headers = [\"File Path\", \"PID\", \"Runtime\", \"# DAGs\", \"# Errors\", \"Last Runtime\", \"Last Run\"]\n\n rows = []\n now = timezone.utcnow()\n for file_path in known_file_paths:\n last_runtime = self.get_last_runtime(file_path)\n num_dags = self.get_last_dag_count(file_path)\n num_errors = self.get_last_error_count(file_path)\n file_name = os.path.basename(file_path)\n file_name = os.path.splitext(file_name)[0].replace(os.sep, \".\")\n\n processor_pid = self.get_pid(file_path)\n processor_start_time = self.get_start_time(file_path)\n runtime = (now - processor_start_time) if processor_start_time else None\n last_run = self.get_last_finish_time(file_path)\n if last_run:\n seconds_ago = (now - last_run).total_seconds()\n Stats.gauge(f\"dag_processing.last_run.seconds_ago.{file_name}\", seconds_ago)\n\n rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))\n\n # Sort by longest last runtime. (Can't sort None values in python3)\n rows.sort(key=lambda x: x[3] or 0.0)\n\n formatted_rows = []\n for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:\n formatted_rows.append(\n (\n file_path,\n pid,\n f\"{runtime.total_seconds():.2f}s\" if runtime else None,\n num_dags,\n num_errors,\n f\"{last_runtime:.2f}s\" if last_runtime else None,\n last_run.strftime(\"%Y-%m-%dT%H:%M:%S\") if last_run else None,\n )\n )\n log_str = (\n \"\\n\"\n + \"=\" * 80\n + \"\\n\"\n + \"DAG File Processing Stats\\n\\n\"\n + tabulate(formatted_rows, headers=headers)\n + \"\\n\"\n + \"=\" * 80\n )\n\n self.log.info(log_str)", "def collect_core_dump(self, log_dir):\n pass", "def logs(self, shell=False):\n if self.app_id:\n return self.yarn_api.logs(self.app_id, shell=shell)\n else:\n raise KnitException('Cannot get logs, app not started')", "def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def get_new_logs(log_paths,log_conf):\n if log_conf is None or log_conf.get_host() is None:\n return log_paths\n conf_logs = log_conf.get_host().get_logs()\n new_logs = [log_path for log_path in log_paths if log_path not in conf_logs]\n print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs)\n logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs)\n return new_logs", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def log_monitors(self, as_info=False):\n log_str = ''\n count = 0\n for fd in self.callbacks:\n name = self.callbacks[fd][0]\n log_str = log_str + ', %s fd %d' % (name, fd)\n count += 1\n\n log_func = LOGGER.info if as_info else LOGGER.debug\n log_func('Monitoring %d fds %s', count, log_str[2:])\n\n return count", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def get_access_logs(file_dir=log_dir):\n \n file_list = []\n for myfile in glob.glob1(file_dir, 'access_log*'):\n file_list.append('%s/%s' % (file_dir, myfile))\n# print file_list\n return file_list", "def getInfo(self):\n mr_job_cursor = self._storage.find()\n self._logger.info(\"Current jobs registered in MapReduce manager:\")\n for mr_job in mr_job_cursor:\n self._logger.info(\"\\t%s: Processed from %s to (%s --> %s)\" %\n (mr_job[\"_dataBlobID\"], mr_job['initial'], mr_job['start'], mr_job['end']))", "def gen_paths():\n global log_dir, events_file, log_file, datastream_dir\n\n #The root log directory\n log_root_dir = os.path.join(root_dir, \"logs\")\n\n #Figure out what log file index we should use\n #The log file index is a 4-digit number corresponding to an unused log folder\n index = 0\n #If our base log_root_dir exists:\n if os.path.exists(log_root_dir):\n\n #Get existing folders, convert to string list, and sort\n folders = os.listdir(log_root_dir)\n ids = [int(f) for f in folders]\n ids.sort()\n\n #This algorithm determines the next sequential value for our log index, it scans through the existing numbers\n #until either it finds a missing number in sequence, or runs out of numbers to scan.\n\n #Set this to a high number to start with, as it will get set every loop iteration\n last_id = 10000\n for present_index in ids:\n #If we have a break in the number sequence, abort and use what we have\n if present_index > last_id + 1:\n break\n #If we have found a bigger number to use for index\n if present_index > index:\n index = present_index\n\n last_id = present_index\n\n #Convert from largest existing index to the index we should use!\n index += 1\n\n #Set the log_dir, which is the directory for storing all logs during this run session\n log_dir = os.path.join(log_root_dir, str(index).zfill(4))\n\n #Set the log_file, which is a dump of all console output\n log_file = os.path.join(log_dir, \"main.log\")\n\n #Set the events_file, which is where all events are recorded\n events_file = os.path.join(log_dir, \"events.rec\")\n\n #Set the datastream_dir, within which all datastreams are recorded\n datastream_dir = os.path.join(log_dir, \"datastreams\")", "def analyse_screening_setup(self):\n\n control = self.control\n logger: LoggerProperties\n\n # Perform some input checks\n # Check project path exists\n if control.project_path == \"\":\n msg = \"Cannot process: Project location not set.\"\n raise LoggerWarning(msg)\n\n # Check at least one logger exists\n if not control.loggers:\n msg = \"Cannot process: No loggers exist in setup.\"\n raise LoggerWarning(msg)\n\n # Check all ids are unique\n control.check_logger_ids()\n\n # Check logging durations and sample lengths are positive\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n if logger.duration <= 0:\n msg = f\"Cannot process: Logging duration for logger {logger.logger_id} is {logger.duration}.\\n\"\n f\"Logging duration must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # TODO: Move to logger properties as a setup function\n if control.global_process_stats is True and logger.process_stats is True:\n if logger.stats_interval <= 0:\n msg = f\"Cannot process: Statistics sample length for logger \"\n f\"{logger.logger_id} is {logger.stats_interval}.\\n\"\n f\"Statistics sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n if control.global_process_spect is True and logger.process_spect is True:\n if logger.spect_interval <= 0:\n msg = f\"Cannot process: Spectral sample length for logger \"\n f\"{logger.logger_id} is {logger.spect_interval}.\\n\"\n f\"Spectral sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # Paths to output folders\n control.set_output_paths()\n\n # Get raw filenames, check timestamps and select files in processing datetime range\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n # Store logger filenames and check file timestamps\n self.statusbar.showMessage(\n f\"Checking setup: Checking file names for {logger.logger_id}. Please wait...\"\n )\n self.repaint()\n logger.get_filenames()\n\n # Select files to process and, if applicable, check file timestamps are valid\n logger.set_files_to_process()\n\n # Store expected file length\n logger.expected_data_points = logger.freq * logger.duration\n\n # Get all channel names and units if not already stored in logger object\n if len(logger.all_channel_names) == 0 and len(logger.all_channel_units) == 0:\n logger.get_all_columns()\n\n # Update column list in config dashboard if this logger is the one selected\n if logger.logger_id == self.inputDataModule.loggerList.currentItem().text():\n self.inputDataModule.set_logger_columns_list(logger)\n\n # Check requested channels exist\n # Connect warning signal to warning message box in DataLab class\n try:\n # Disconnect any existing connection to prevent repeated triggerings\n logger.logger_warning_signal.disconnect()\n except TypeError:\n pass\n logger.logger_warning_signal.connect(self.warning)\n\n # Set processed channel names and units as user values, if supplied, or file header values\n logger.set_selected_column_and_units_names()\n\n # Check for any columns without any units set and see if the units is embedded in the channel name;\n # if so extract units from channel name and add to units list\n logger.check_if_units_in_channel_name()\n\n # Check number of headers match number of columns to process\n # TODO: This should already have been enforced earlier so perhaps no longer required?\n logger.check_headers()", "def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]", "def main() -> None:\n jobStatus = list()\n adt = AuditManager(\n config.get(\"Audit\").get(\"database\"),\n config.get(\"Audit\").get(\"user\"),\n config.get(\"Audit\").get(\"password\"),\n config.get(\"Audit\").get(\"host\"),\n config.get(\"Audit\").get(\"port\"),\n )\n jobMeta = adt.getStepLogData()\n adt.closeConnection()\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=config.get(\"spark\").get(\"parallelJobs\", 2)\n ) as executor:\n spark_jobs = {\n executor.submit(processFile, fileMeta): fileMeta for fileMeta in jobMeta\n }\n for status in concurrent.futures.as_completed(spark_jobs):\n fileStatus = status.result()\n jobStatus.append(fileStatus)\n logger.info(jobStatus)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def getLog():\n # assign a current working directory + '/logs' to log_dir variable (platform independent)\n log_dir = os.path.join(os.getcwd(), \"logs\")\n # or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"logs\")\n # or --> user directory: log_dir = os.path.join(os.path.expanduser(\"~\"), \"logs\")\n\n try:\n # if logs directory(!) doesn't exist, create it\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n # open log file with prefix and timestamp (platform independent) in Append mode\n log = open(os.path.join(log_dir, \"rfaRunner_\" + getCurTime(\"%Y%m%d_%H-%M\") + \".log\"), \"a\")\n return log\n except (OSError, IOError):\n # return -1 in case of exception\n return -1", "def log_dir_stacks_contents(dir_stacks):\r\n for directory in dir_stacks:\r\n logging.info('-'*80)\r\n logging.info('Predicted directory contents of:\\n{0}'\r\n .format(directory.path))\r\n files = directory.file_names\r\n files = sorted(files)\r\n logging.info('Number of files: {0}'.format(len(files)))\r\n logging.info('Files:')\r\n logging.info('\\t'.join(files))", "def collect_pidin_ar(self, log_dir):\n log_type = \"pidin ar\"\n log_name = \"pidin_ar.txt\"\n cmd = \"pidin ar > /tmp/{}\".format(log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd)", "def logpath(self):\n return self.outpath", "def __init__(self):\n\n #initiate logging\n file_name = os.path.splitext(sys.argv[0])\n tc_name = file_name[0].split('/')[-1]\n log_name = os.path.join(config.LOG_DIR, ''.join([tc_name, '.log']))\n log.init(log_name)\n self.logging = logging.getLogger('objects')", "def get_log_paths(root_dir: str) -> List[str]:\n paths = []\n if not tf.io.gfile.isdir(root_dir):\n raise ValueError(f'{root_dir} is not a directory.')\n for path, _, files in tf.io.gfile.walk(root_dir):\n if 'metadata.riegeli' in files:\n paths.append(path)\n return paths", "def init_error_files(self): \n \n dir_path = self.init_logs_directory()\n log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)\n \n return log_errors", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def magic_runlog(self, parameter_s =''):\n\n for f in parameter_s.split():\n self.safe_execfile(f,self.user_ns,self.user_ns,islog=1)", "def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log", "def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())", "def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname", "def instantiate_logs(self):\n\n # Log file\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n self.log_dir = os.path.join(\"experiment_logs\", timestamp)\n\n # Create Log directory if it does not exist\n try:\n os.makedirs(self.log_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n self.info_file = os.path.join(self.log_dir, \"run_info.txt\")\n self.log_file = os.path.join(self.log_dir, \"data.csv\")\n\n with open(self.info_file, \"w+\") as f:\n f.write(\"Period = {}\\nMaxVel = {}\".format(self.period, self.max_vel))\n\n self.log_file_desc = open(self.log_file, \"w+\")\n self.log_file_desc.write(\"t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw\")", "def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()", "def build(self):\n my_cmd = self.process_cmd % (self.frun_path)\n Msg.user(\"Process Command: %s\" % (str(my_cmd)), \"LAUNCHER\")\n my_log = PathUtils.include_trailing_path_delimiter(self.frun_dir) + self.process_log\n return my_cmd, my_log", "def logs(self):\n return self.logger.logs()", "def check_paralogs():\n paralogs = set()\n paralog_fold = os.path.dirname(args.metadata)\n for file in glob.glob(f'{paralog_fold}/paralogs/*.fas'):\n for record in SeqIO.parse(file, 'fasta'):\n paralogs.add(record.name.split('.')[0])\n return paralogs", "def call_orthologs(self):\n ortholog_call = oma.OrthologFinder(self.sequence)\n try:\n self.orthologs = ortholog_call.get_HOGs()\n except exceptions.RequestException:\n self.orthologs = ortholog_call.get_orthologs()\n\n with open(\"%s.orth\" %(self.name), 'w') as o_file:\n o_file.write(self.orthologs)\n\n return os.getcwd() + os.sep + '%s.orth'%(self.name)", "def status_all(self):\n output = (\n '[ + ] acpid',\n '[ - ] alsa-utils',\n '[ + ] anacron',\n '[ + ] apparmor',\n '[ + ] apport',\n '[ + ] avahi-daemon',\n '[ + ] bluetooth',\n '[ - ] bootmisc.sh',\n '[ - ] brltty',\n '[ - ] checkfs.sh',\n '[ - ] checkroot-bootclean.sh',\n '[ - ] checkroot.sh',\n '[ + ] console-setup',\n '[ + ] cron',\n '[ + ] cups',\n '[ + ] cups-browsed',\n '[ + ] dbus',\n '[ - ] dns-clean',\n '[ + ] grub-common',\n '[ - ] hostname.sh',\n '[ - ] hwclock.sh',\n '[ + ] irqbalance',\n '[ - ] kerneloops',\n '[ - ] killprocs',\n '[ + ] kmod',\n '[ + ] lightdm',\n '[ - ] mountall-bootclean.sh',\n '[ - ] mountall.sh',\n '[ - ] mountdevsubfs.sh',\n '[ - ] mountkernfs.sh',\n '[ - ] mountnfs-bootclean.sh',\n '[ - ] mountnfs.sh',\n '[ + ] network-manager',\n '[ + ] networking',\n '[ + ] ondemand',\n '[ + ] open-vm-tools',\n '[ - ] plymouth',\n '[ - ] plymouth-log',\n '[ - ] pppd-dns',\n '[ + ] procps',\n '[ - ] rc.local',\n '[ + ] resolvconf',\n '[ - ] rsync',\n '[ + ] rsyslog',\n '[ - ] saned',\n '[ - ] sendsigs',\n '[ + ] speech-dispatcher',\n '[ + ] thermald',\n '[ + ] udev',\n '[ + ] ufw',\n '[ - ] umountfs',\n '[ - ] umountnfs.sh',\n '[ - ] umountroot',\n '[ - ] unattended-upgrades',\n '[ + ] urandom',\n '[ - ] uuidd',\n '[ + ] whoopsie',\n '[ - ] x11-common')\n for l in output:\n self.write(l + '\\n')", "def print_experiments(self):\n first = True\n for env, agents in self._experiment_structure.items():\n if not first:\n self.logger.weak_line()\n first = False\n self.logger.info(f'Environment: {env}')\n for agent, _ in agents.items():\n self.logger.info('- ' + agent)", "def sac_logs_file_location(log_dir, is_tri, running_reward_file, running_loss_file, actions_file):\n if is_tri:\n sac_module = sac_tri\n else:\n sac_module = sac\n if running_reward_file is None:\n running_reward_file = os.path.join(log_dir, sac_module.SacTrain.RUNNING_REWARD_FILE_NAME)\n if running_loss_file is None:\n running_loss_file = os.path.join(log_dir, sac_module.SacTrain.RUNNING_LOSS_FILE_NAME)\n if actions_file is None:\n actions_file = os.path.join(log_dir, sac_module.SacTrain.ACTIONS_FILE_NAME)\n return (running_reward_file, running_loss_file, actions_file)", "def config_logging(self):\n logging.basicConfig(filename='move_dupes.log',\n filemode='a',\n format='%(asctime)s,%(msecs)d ' +\\\n '%(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n logging.info(\"Running audio dupe mover\")\n \n return logging.getLogger('move_dupes')", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def GetAllLogFilePaths(ssh):\n ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]\n log_files = []\n try:\n files_output = utils.CheckOutput(\" \".join(ssh_cmd), shell=True)\n log_files = FilterLogfiles(files_output.splitlines())\n except subprocess.CalledProcessError:\n logger.debug(\"The folder(%s) that running launch_cvd doesn't exist.\",\n constants.REMOTE_LOG_FOLDER)\n return log_files", "def create_job_loggers(self, jobs):\n self.add_filehandler(\"apscheduler.executors.default\")\n self.add_filehandler(\"apscheduler.scheduler\")\n self.add_filehandler(\"flask_apscheduler\")\n for x in jobs:\n # Creating a logger for each job and adding a seperate filehandler for each logger. Job ids have to have the\n # same logger name of the functions that the jobs invoke. \n self.add_filehandler(x[\"id\"])", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def _get_all_run_infos(self):\r\n info_dir = self._settings.info_dir\r\n if not os.path.isdir(info_dir):\r\n return []\r\n paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)]\r\n\r\n # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.\r\n # We filter only those that have a timestamp, to avoid a race condition with writing\r\n # that field.\r\n return filter(lambda d: 'timestamp' in d, [RunInfo(os.path.join(p, 'info')).get_as_dict()\r\n for p in paths if os.path.isdir(p) and not os.path.islink(p)])", "def retrieveLogs(self, execution, localLogDestination):\n # TODO: Implement this in order to get your logs out. The parent implementation will take care of cpu.log in case\n # profiling was requested. Example:\n #\n # execution.host.getFile( '{0}/log.log'.format( self.getExecutionLogDir( execution ) ),\n # os.path.join( localLogDestination, 'log.log' ), reuseConnection = execution.getRunnerConnection() )\n # client.retrieveLogs(self, execution, localLogDestination)\n #\n # The use of the execution.getRunnerConnection() connection prevents errors with multi-threading.\n #\n # This assumes you have no logs of your own:\n client.retrieveLogs(self, execution, localLogDestination)", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def init_logging(input_file_parameters, dir_stacks):\r\n fl_name = '{0}_log_{1}_{2}.txt'.format(NAME,\r\n START_TIME,\r\n input_file_parameters.job_name)\r\n #NOTICE! Current_log_path.path is changed here!\r\n CurrentLogPath.path = os.path.join(input_file_parameters.output_dir,\r\n fl_name)\r\n logging.basicConfig(filename=CurrentLogPath.path, filemode='w',\r\n format='%(asctime)s %(levelname)s:%(message)s',\r\n level=logging.INFO)\r\n logging.info('{0} v. {1} started'.format(NAME, VERSION))\r\n logging.info('Job name: {0}'.format(input_file_parameters.job_name))\r\n logging.info('Starting point directory:\\n{0}'.format(dir_stacks[0]\r\n .path))\r\n logging.info('Output directory:\\n{0}'.format(input_file_parameters.output_dir))\r\n logging.info('-'*80)\r\n logging.info('staplefile contents:\\n{0}'.format('\\n'.join(input_file_parameters.staplefile)))\r\n logging.info('-'*80)\r\n logging.info('config.txt contents:\\n{0}'\r\n .format(utils.get_config_file()))\r\n logging.info('-'*80)", "def openLogfileConnection(self,):\n \n #\n # Imports\n #\n import sys\n import time\n import os\n \n #\n # for logmessages\n # \n tmpLogMessages = []\n \n #\n # check if logfile present open connection or create\n #\n SEAseqPipeLine.logfile = self.analysisPath + '/logfile.txt'\n if os.path.isfile(SEAseqPipeLine.logfile):\n if self.command == 'initiateAnalysis':\n print 'ERROR: the logfile already exists please use another path to initiate the analysis.\\n'\n sys.exit(1)\n else:\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'a',1)\n SEAseqPipeLine.logfile.write('----------------\\nConnection to logfile '+SEAseqPipeLine.logfile.name+' opened.\\n')\n return 0\n else:\n tmpLogMessage = 'Creating the logfile \"'+SEAseqPipeLine.logfile+'\".\\n'\n tmpLogMessages.append(tmpLogMessage)\n print tmpLogMessage\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'w',1)\n \n return tmpLogMessages", "def write_version_logs(paths):\n try:\n versions_log = get_path(paths, 'versions_log')\n python_executable = sys.executable\n # Check if currently in a conda env\n if 'conda' in python_executable:\n if \"active environment : None\" not in str(subprocess.run(['conda', 'info'], capture_output=True)): \n conda_list= subprocess.run(['conda', 'list'], capture_output=True, text=True).stdout\n versions_log = norm_path(versions_log)\n\n with io.open(versions_log, 'w', encoding = 'utf8', errors = 'ignore') as CONDALOG:\n time_start = str(datetime.datetime.now().replace(microsecond = 0))\n print(messages.note_dash_line, file = CONDALOG)\n print(\"Versions log started: \" + time_start, file = CONDALOG)\n print(messages.note_dash_line, file = CONDALOG)\n print(conda_list, file = CONDALOG)\n\n message = 'Version logs successfully written!'\n write_to_makelog(paths, message) \n print(colored(message, metadata.color_success)) \n else:\n message = 'Conda environment is not activated. Please activate the environment'\n raise ColoredError(message)\n\n except:\n error_message = 'Error with `write_version_logs`. Traceback can be found below.' \n error_message = format_message(error_message) \n write_to_makelog(paths, error_message + '\\n\\n' + traceback.format_exc())\n raise ColoredError(error_message, traceback.format_exc())", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def _get_logger(self):", "def log_diagnostics(self, paths, prefix=''):\n pass", "def commandLog(self,):\n \n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n\n #\n # Add run to runs table and open connection to logfile\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, False, MASTER)\n self.openLogfileConnection()\n SEAseqPipeLine.logfile.write(self.createLogHeader())\n \n # default all types of commands run\n runTypes = self.availableCommands.keys()\n \n SEAseqPipeLine.logfile.write('Writing commandLog to standard out.\\n')\n print 'Getting runs performed with the following commands '+', '.join(runTypes[:-1])+' or '+runTypes[-1]+'.'\n print '# StartTime: \\tFinished:\\tCommand:'\n for startTime, command, commandLine, finishedSuccessfully, masterPid in self.database.getRuns(runTypes):\n print str(startTime)+' \\t'+str(bool(finishedSuccessfully))+' \\t'+str(commandLine)\n \n #\n # update runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n SEAseqPipeLine.logfile.write('Finished exiting.\\n')", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def logs(self, container: Container) -> str:", "def GetAllInstancesInfo(self, hvparams=None):\n data = []\n for file_name in os.listdir(self._ROOT_DIR):\n path = utils.PathJoin(self._ROOT_DIR, file_name)\n if self._IsDirLive(path):\n data.append((file_name, 0, 0, 0, 0, 0))\n return data", "def CHANGE_appendAll(self):\r\n # Separate new files to be loaded\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n FoI.sort()\r\n for file in FoI:\r\n print(\"Loading {}\".format(file))\r\n filePath = os.path.join(self.listenDir, file)\r\n\r\n try:\r\n (newProj, newAngle) = self.read_projection_image(filePath)\r\n\r\n self.logTiltAngles = np.append(self.logTiltAngles, newAngle)\r\n\r\n # Invert Contrast for BF-TEM\r\n if self.invert:\r\n newProj *= -1\r\n\r\n newProj = self.background_subtract(newProj)\r\n\r\n # Apply Center of Mass (if selected)\r\n if self.alignMethod == 'CoM':\r\n newProj = self.center_of_mass_align(newProj)\r\n\r\n # Account for Python's disdain for AxAx1 arrays\r\n # (compresses to 2D)\r\n if (len(self.logTiltSeries0) == 0):\r\n dataDim = np.shape(newProj)\r\n self.logTiltSeries0 = np.zeros([dataDim[0], dataDim[1], 1])\r\n self.logTiltSeries0[:, :, 0] = newProj\r\n self.wbp = wbp.WBP(dataDim[0], dataDim[1], 1)\r\n else:\r\n self.logTiltSeries0 = np.dstack((self.logTiltSeries0,\r\n newProj))\r\n\r\n self.logFiles = np.append(self.logFiles, file)\r\n\r\n except Exception:\r\n print('Could not read : {}, will proceed with reconstruction\\\r\n and re-download on next pass'.format(file))\r\n break\r\n\r\n # Apply Cross-Correlation after reading images (if selected)\r\n if self.alignMethod == 'xcor':\r\n self.logTiltSeries = self.xcorr_align(self.logTiltSeries0)\r\n # update tilt angles and sinogram\r\n self.wbp.set_tilt_series(self.logTiltSeries, self.logTiltAngles)\r\n # re-center tilt axis\r\n self.logTiltSeries = self.shift_tilt_axis(self.logTiltSeries,\r\n self.logTiltAngles)\r\n else:\r\n self.logTiltSeries = self.logTiltSeries0", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def start_task_manager(params):\n logname = \"%s.task\" % params['api_id']\n frmt = u'[%(asctime)s: %(levelname)s/%(processName)s] ' \\\n u'%(name)s:%(funcName)s:%(lineno)d - %(message)s'\n \n frmt = u'[%(asctime)s: %(levelname)s/%(task_name)s:%(task_id)s] '\\\n u'%(name)s:%(funcName)s:%(lineno)d - %(message)s' \n \n log_path = u'/var/log/%s/%s' % (params[u'api_package'], \n params[u'api_env'])\n run_path = u'/var/run/%s/%s' % (params[u'api_package'], \n params[u'api_env']) \n \n #loggers = [logging.getLogger('beehive.common.event')]\n #LoggerHelper.rotatingfile_handler(loggers, logger_level, \n # '%s/%s.event.log' % (log_path, logname),\n # frmt=frmt) \n \n # base logging\n loggers = [\n logging.getLogger(u'beehive'),\n logging.getLogger(u'beehive.db'),\n logging.getLogger(u'beecell'),\n logging.getLogger(u'beedrones'),\n logging.getLogger(u'celery'),\n logging.getLogger(u'proxmoxer'),\n logging.getLogger(u'requests')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n u'%s/%s.log' % (log_path, logname),\n frmt=frmt, formatter=ExtTaskFormatter)\n\n # transaction and db logging\n loggers = [\n logging.getLogger('beehive.util.data'),\n logging.getLogger('sqlalchemy.engine'),\n logging.getLogger('sqlalchemy.pool')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n '%s/%s.db.log' % (log_path, logname))\n \n # performance logging\n loggers = [\n logging.getLogger('beecell.perf')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n '%s/%s.watch' % (log_path, params[u'api_id']), \n frmt='%(asctime)s - %(message)s')\n\n api_manager = ApiManager(params, hostname=gethostname())\n api_manager.configure()\n api_manager.register_modules()\n #worker = ProcessEventConsumerRedis(api_manager)\n #from beehive.module.tasks import task_manager\n task_manager.api_manager = api_manager\n\n logger_file = '%s/%s.log' % (log_path, logname)\n\n configure_task_manager(params['broker_url'], params['result_backend'],\n tasks=params['task_module'], expire=params['expire'],\n logger_file=logger_file)\n \n argv = [u'',\n u'--loglevel=%s' % logging.getLevelName(logger_level),\n #u'--pool=prefork',\n u'--pool=gevent',\n u'--purge',\n #'--time-limit=600',\n #'--soft-time-limit=300',\n u'--concurrency=100',\n u'--maxtasksperchild=100',\n #u'--autoscale=100,10',\n u'--logfile=%s' % logger_file,\n u'--pidfile=%s/%s.task.pid' % (run_path, logname)]\n \n def terminate(*args):\n #run_command(['celery', 'multi', 'stopwait', 'worker1', \n # '--pidfile=\"run/celery-%n.pid\"'])\n task_manager.stop()\n \n #for sig in (SIGHUP, SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT):\n # signal(sig, terminate)\n \n task_manager.worker_main(argv)", "def execute_experiment_callgrind(self):\n protocol_name = self.protocol_config['protocol']\n number_of_repetitions = self.protocol_config['numOfRepetitions']\n configurations = self.protocol_config['configurations']\n working_directory = self.protocol_config['workingDirectory']\n executables = self.protocol_config['executableName']\n for i in range(number_of_repetitions):\n for idx2 in range(len(configurations)):\n for idx in range(len(executables)):\n os.system(f'fab -f Execution/fabfile.py run_protocol_profiler:{self.protocol_config_path},'\n f'{configurations[idx2]},{executables[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def test_root_logger(self):\n # root logs are Stream handled\n # log_path = self.log_paths['']\n # log = logging.getLogger('df')\n # ctrl = self.md5(log_path)\n # log.debug(\"test\")\n # assert self.md5(log_path) != ctrl", "def _load_logs(self):\n logger.info(\"Loading call logs\")\n data = tichy.Persistance('calls/logs').load()\n if not data:\n return\n # TODO: add some checks for data consistency\n logs = []\n for kargs in data:\n #print kargs\n call = Call(**kargs)\n logs.append(call)\n self.logs[:] = logs" ]
[ "0.64035773", "0.64035773", "0.61854243", "0.6003146", "0.57708687", "0.5767324", "0.57531023", "0.57415503", "0.56718665", "0.5575059", "0.5452616", "0.54122627", "0.53984296", "0.53840137", "0.5376799", "0.5361503", "0.53584874", "0.53567886", "0.53554016", "0.53292704", "0.53264225", "0.5322508", "0.5277949", "0.52664465", "0.5253839", "0.52335984", "0.5212407", "0.5207186", "0.52037275", "0.51977396", "0.5193514", "0.5191106", "0.51871854", "0.5185312", "0.5181307", "0.51705796", "0.5169307", "0.51677984", "0.5167717", "0.51605755", "0.51575565", "0.5155084", "0.51321185", "0.51211625", "0.5117392", "0.511374", "0.5111361", "0.5111344", "0.51032186", "0.51021236", "0.50948584", "0.50944644", "0.5091023", "0.50861675", "0.5076414", "0.50681126", "0.50654733", "0.5065445", "0.50530326", "0.50228053", "0.50200236", "0.5018995", "0.50175244", "0.50108397", "0.50099707", "0.500928", "0.5008754", "0.50078195", "0.5006731", "0.5002409", "0.500202", "0.5001849", "0.5001716", "0.4990519", "0.49813423", "0.49780232", "0.49775115", "0.49697623", "0.49671882", "0.4960739", "0.49574903", "0.4953974", "0.49528444", "0.49516863", "0.49464044", "0.4945454", "0.49450544", "0.49450544", "0.49322987", "0.49321344", "0.49258265", "0.48942024", "0.48938957", "0.48900926", "0.4886563", "0.48837623", "0.48827717", "0.4875566", "0.48737168", "0.48715708" ]
0.5462737
10
detect the arangod instance PIDs
def detect_instance_pids(self): for instance in self.all_instances: instance.detect_pid( ppid=self.instance.pid, full_binary_path=self.cfg.real_sbin_dir, offset=0, ) self.show_all_instances() self.detect_arangosh_instances(self.cfg, self.cfg.version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more instances we expect to spawn the more patient:\n tries = 10 * self.expect_instance_count\n\n # Wait for forntend to become alive.\n all_instances_up = False\n while not all_instances_up and tries:\n self.all_instances = []\n detected_instances = []\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n for root, dirs, files in os.walk(self.basedir):\n for onefile in files:\n # logging.debug(\"f: \" + root + os.path.sep + onefile)\n if onefile.endswith(\"log\"):\n logfiles.add(str(Path(root) / onefile))\n\n for name in dirs:\n # logging.debug(\"d: \" + root + os.path.sep + name)\n match = None\n instance_class = None\n if name.startswith(\"sync\"):\n match = re.match(r\"(syncmaster|syncworker)(\\d*)\", name)\n instance_class = SyncInstance\n else:\n match = re.match(\n r\"(agent|coordinator|dbserver|resilientsingle|single)(\\d*)\",\n name,\n )\n instance_class = ArangodInstance\n # directory = self.basedir / name\n if match and len(match.group(2)) > 0:\n # we may see a `local-slave-*` directory inbetween,\n # hence we need to choose the current directory not\n # the starter toplevel dir for this:\n instance = instance_class(\n match.group(1),\n match.group(2),\n self.cfg.localhost,\n self.cfg.publicip,\n Path(root) / name,\n self.passvoid,\n self.cfg.ssl,\n self.cfg.version,\n self.enterprise,\n jwt=jwt,\n )\n instance.wait_for_logfile(tries)\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n detected_instances.append(instance.instance_type)\n self.all_instances.append(instance)\n\n print(self.expect_instances)\n detected_instances.sort()\n print(detected_instances)\n attach(str(self.expect_instances), \"Expected instances\")\n attach(str(detected_instances), \"Detected instances\")\n if (self.expect_instances != detected_instances) or (not self.get_frontends()):\n tries -= 1\n time.sleep(5)\n else:\n all_instances_up = True\n\n if not self.get_frontends():\n print()\n logging.error(\"STARTER FAILED TO SPAWN ARANGOD\")\n self.show_all_instances()\n logging.error(\"can not continue without frontend instance\")\n logging.error(\"please check logs in\" + str(self.basedir))\n for logf in logfiles:\n logging.debug(logf)\n message = \"if that does not help try to delete: \" + str(self.basedir)\n logging.error(message)\n raise Exception(message)\n self.show_all_instances()", "def getIDs():", "def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def inspire_pidstore():", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def ppid(self):", "def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))", "def pid(self):", "def get_instance_ids(temporary_user, config, state, now, tz):\n try:\n data = temporary_user.describe_instances(Filters=[{'Name':'instance-state-name', 'Values': [state]}])\n logger.info(\"The date is : {} , {}\".format(now.strftime(\"%A, %d %B %Y %H:%M:%S\"), tz))\n\n action_required, no_action_required = categorise_instances(data, config, temporary_user)\n return action_required, no_action_required\n except Exception as error:\n logger.info(\"Describing the instances failed with the following error : {}\".format(error))", "def _instantiated_ids(self):\n return self._identity_map.keys()", "def dynamic_pid(self):\n pass", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def wait_for_exec_to_start():\n node_instances = self.client.node_instances.list()\n for ni in node_instances:\n # this will keyerror out (and be retried) if the operation\n # didn't run yet\n pids[ni.node_id] = ni.runtime_properties['pid']", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_ids(self, instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def _get_ids_from_ip(self, ip):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip)\r\n except socket.error:\r\n return []\r\n\r\n # Find the server via ip address. First try public ip, then private\r\n results = self.list_hardware(public_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_hardware(private_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def get_ceph_pids():\n pids = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n with open(cfg['pid_file'], 'r') as file_fd:\n pids.append((srv, int(file_fd.read())))\n return pids", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def list_instance_uuids(self):\n return self.list_instances()", "def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def get_id_of_instance(self, label):\n query = read_query('id/id_of_instance') % label\n response = self._submit_query(query)\n return [elem['id']['value'] for elem in response] if response else []", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_asg_instance_ids(self, asg_name):\n instance_ids = []\n # Grab the first item in the list because we're only asking for 1 ASG\n try:\n asg_data = self.asg.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]\n except Exception as e: \n logger.info(e)\n return []\n\n for instance_data in asg_data['Instances']:\n instance_ids.append(instance_data['InstanceId'])\n\n return instance_ids", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def assign_ids(self, instances):\n if len(instances) == 0:\n return #no new detections to check\n\n # Compute iou with either boxes or masks:\n is_crowd = np.zeros((len(instances),), dtype=np.bool)\n if instances[0].bbox is None:\n assert instances[0].mask_rle is not None\n # use mask iou only when box iou is None\n # because box seems good enough\n rles_old = [x.mask_rle for x in self._old_instances]\n rles_new = [x.mask_rle for x in instances]\n ious = mask_util.iou(rles_old, rles_new, is_crowd)\n threshold = 0.5\n else:\n boxes_old = [x.bbox for x in self._old_instances]\n boxes_new = [x.bbox for x in instances]\n ious = mask_util.iou(boxes_old, boxes_new, is_crowd)\n threshold = 0.6\n if len(ious) == 0:\n ious = np.zeros((len(self._old_instances), len(instances)), dtype=\"float32\")\n\n # Only allow matching instances of the same label:\n for old_idx, old in enumerate(self._old_instances):\n for new_idx, new in enumerate(instances):\n if old.label != new.label:\n ious[old_idx, new_idx] = 0\n\n matched_new_per_old = np.asarray(ious).argmax(axis=1)\n max_iou_per_old = np.asarray(ious).max(axis=1)\n\n # Try to find match for each old instance:\n extra_instances = []\n for idx, inst in enumerate(self._old_instances):\n if max_iou_per_old[idx] > threshold:\n newidx = matched_new_per_old[idx]\n if instances[newidx].color is None:\n instances[newidx].color = inst.color\n if instances[newidx].objID is None:\n instances[newidx].objID = inst.objID\n continue\n # If an old instance does not match any new instances,\n # keep it for the next frame in case it is just missed by the detector\n inst.ttl -= 1\n if inst.ttl > 0:\n extra_instances.append(inst)\n\n # Assign random color to newly-detected instances:\n for inst in instances:\n if inst.color is None:\n inst.color = random_color(rgb=True, maximum=1)\n if inst.objID is None:\n #Assign random 32-bit hex key\n inst.objID = ''.join(random.choices(string.ascii_letters + string.digits, k=32))\n self._old_instances = instances[:] + extra_instances\n return [d.objID for d in instances]", "def identify_exec_running(self, record):\n return [\"running\"]", "def get_instances(dbpath):\n odb = openOdb(path=dbpath)\n _instances = []\n for _name,_inst in odb.rootAssembly.instances.items():\n _nodes = len(_inst.nodes)\n _elements = len(_inst.elements)\n _instances.append((_name,_nodes,_elements))\n return _instances", "def __init__(self, instance_ids: np.ndarray, instance_id: int):\n if (instance_id == -1):\n return\n self.instance_id = int(instance_id)\n self.gt_mask = (instance_ids == instance_id)\n self.instance_count = int((instance_ids == instance_id).sum())", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_hardware(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def all_env_ids(self) -> np.ndarray:", "def get_elb_instance_ids(elbclient, elbname):\r\n try:\r\n resp = elbclient.describe_load_balancers(LoadBalancerNames=[elbname])\r\n except:\r\n print(ex.message)\r\n return None\r\n return list(map(\r\n lambda x:x['InstanceId'],\r\n resp['LoadBalancerDescriptions'][0]['Instances']\r\n ))", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def test_add_multiple_pis_simultaneously_to_vpg_check_reallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id = VPG_CLASS._process_ae_id\n\n class MockVpg(VPG_CLASS):\n org_process_ae_id = VPG_CLASS._process_ae_id\n HOLD_API = True\n @classmethod\n def mock_process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None):\n while cls.HOLD_API:\n print('sleeping for HOLD_API to clear for '\n 'args = %s' % obj_dict)\n gevent.sleep(0.5)\n return cls.org_process_ae_id(db_obj_dict, vpg_name, obj_dict)\n\n def process_ae_ids(x):\n return [int(i) for i in sorted(x) if i is not None]\n\n def get_zk_ae_ids(prs=None):\n prefix = os.path.join(\n self.__class__.__name__,\n 'id', 'aggregated-ethernet')\n zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client\n if not prs:\n prs = [os.path.join(prefix, pr.name) for pr in pr_objs]\n else:\n if not isinstance(prs, list):\n prs = [prs]\n prs = [os.path.join(prefix, pr) for pr in prs]\n ae_ids = {}\n for pr in prs:\n pr_org = os.path.split(pr)[-1]\n ae_ids[pr_org] = zk_client.get_children(pr)\n return ae_ids\n\n pi_per_pr = 6\n pi_objs = {}\n pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for\n i in range(1, pi_per_pr + 1)]\n pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names)\n pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names)\n pi_objs.update(pr1_pi_objs)\n pi_objs.update(pr2_pi_objs)\n\n # create a VPG\n vpg_count = 3\n vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range(\n 1, vpg_count + 1)]\n vpg_objs = self._create_vpgs(fabric_obj, vpg_names)\n\n # record AE-IDs in ZK before creating any VPG\n ae_ids = [x for x in get_zk_ae_ids().values() if x]\n self.assertEqual(len(ae_ids), 0)\n\n def _attach_pi_simultaneously(vpg_obj, pi_uuids):\n # Attach PIs from PR1 to VPG-1\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n try:\n # mock _process_ae_id at VPG resource\n VPG_CLASS._process_ae_id = MockVpg.mock_process_ae_id\n MockVpg.HOLD_API = True\n for pi_uuid in pi_uuids:\n gevent.spawn(\n self.api.ref_update,\n \"virtual-port-group\",\n vpg_obj.uuid,\n \"physical-interface\",\n pi_uuid,\n None,\n \"ADD\",\n None)\n gevent.sleep(2)\n MockVpg.HOLD_API = False\n gevent.sleep(3)\n except gevent.timeout.Timeout:\n self.assertFalse(\n False,\n '%s failed unexpectedly' % VPG_CLASS._process_ae_id)\n finally:\n # reset mock to original\n VPG_CLASS._process_ae_id = org_process_ae_id\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n return vpg_obj, pi_refs\n\n # Case 1\n # Attach 2 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[0:2]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 2\n # Attach 2 PIs from PR1 to VPG-2\n vpg_name = vpg_names[1]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[2:4]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [1, 1])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 3\n # Deattach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.del_physical_interface(pi_obj)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 1)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertIsNone(list(vpg_ae_ids.values())[0])\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 4\n # Attach 2 PIs from PR1 to VPG-3\n vpg_name = vpg_names[2]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_uuids = [pi.uuid for pi in list(pr1_pi_objs.values())[4:6]]\n vpg_obj, pi_refs = _attach_pi_simultaneously(vpg_obj, pi_uuids)\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [0, 0])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])\n\n # Case 5\n # Attach 1 PIs from PR1 to VPG-1\n vpg_name = vpg_names[0]\n vpg_obj = vpg_objs[vpg_name]\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_obj = list(pr1_pi_objs.values())[0]\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_obj.uuid)\n vpg_obj.add_physical_interface(pi_obj)\n self._vnc_lib.virtual_port_group_update(vpg_obj)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid)\n pi_refs = vpg_obj.get_physical_interface_refs()\n # verify PI-refs are correct\n self.assertEqual(len(pi_refs), 2)\n vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num\n for ref in pi_refs}\n # verify all AE-IDs allocated per prouter are unique\n self.assertEqual(len(set(vpg_ae_ids.keys())), len(pi_refs))\n self.assertEqual(len(list(vpg_ae_ids.values())), 2)\n self.assertEqual(len(set(vpg_ae_ids.values())), 1)\n ae_id_sorted = process_ae_ids(vpg_ae_ids.values())\n self.assertEqual(ae_id_sorted, [2, 2])\n # verification at ZK for AE-IDs in Physical Routers\n pr_ae_ids = get_zk_ae_ids()\n self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1, 2])\n self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0)\n self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [])", "def detect_arangosh_instances(self, config, old_version):\n if self.arangosh is None:\n config.port = self.get_frontend_port()\n config.passvoid = self.passvoid\n self.arangosh = ArangoshExecutor(config, self.get_frontend(), old_version)\n self.arango_importer = ArangoImportExecutor(config, self.get_frontend())\n self.arango_restore = ArangoRestoreExecutor(config, self.get_frontend())\n if config.hot_backup_supported:\n self.hb_instance = HotBackupManager(\n config,\n self.raw_basedir,\n config.base_test_dir / self.raw_basedir,\n self.get_frontend(),\n )\n self.hb_config = HotBackupConfig(\n config,\n self.raw_basedir,\n config.base_test_dir / self.raw_basedir,\n )", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def _get_instance_id(self):\n return self.__instance_id", "def vios_uuids(self):\n raise NotImplementedError()", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def running_instances(hostnames=None):\n\n global api\n\n all_inst = []\n try:\n all_inst = api.get_all_instances()\n except Exception, e:\n logging.error(\"Can't get list of instances (maybe wrong credentials?)\")\n return None\n\n # Resolve IPs\n if hostnames is not None:\n ips = []\n for h in hostnames:\n try:\n ipv4 = gethostbyname(h)\n ips.append(ipv4)\n except Exception:\n # Don't add host if IP address could not be found\n logging.warning(\"Ignoring hostname %s: can't reslove IPv4 address\" % h)\n ips=list(set(ips))\n\n if hostnames is not None:\n logging.debug(\"Input hostnames: %s\" % (','.join(hostnames)))\n logging.debug(\"Input IPs: %s\" % (','.join(ips)))\n else:\n logging.debug(\"No input hostnames given\")\n\n # Add only running instances\n inst = []\n for i in all_inst:\n if i.status(token_id=api.keystone.token_id) == 'running':\n if hostnames is None:\n # Append all\n inst.append(i)\n else:\n found = False\n for ipv4 in ips:\n if i.network_ip(network_name=cf[\"api\"][\"network_name\"]) == ipv4:\n inst.append(i)\n logging.debug(\"Found IP %s corresponding to instance\" % ipv4)\n found = True\n break\n if not found:\n logging.warning(\"Cannot find instance %s in the list of known IPs\" % i.network_ip(network_name=cf[\"api\"][\"network_name\"]))\n\n return inst", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def existing_pipe_ids():\n ids_list = []\n if not os.path.exists(os.path.dirname(__file__) + LAST_RUN_FILE): # Check if record file exist\n pipe_id_file = open(os.path.dirname(__file__) + LAST_RUN_FILE, \"a+\") # if not then create\n else:\n pipe_id_file = open(os.path.dirname(__file__) + LAST_RUN_FILE, \"r+\") # else, start checking the list\n pipelines = []\n for existing_pipeline in pipe_id_file:\n pipelines = existing_pipeline.split(\",\")\n\n ids_list = [int(pipeline) for pipeline in pipelines]\n\n pipe_id_file.close()\n return ids_list", "def _get_id(self):\n self.unix_time = self.__get_unix_now()\n self.mac_addr = self.__get_random_12_str() # self.__get_mac_address()\n self.pid_hex = self.__get_pid()\n self.sequence_num = self.__get_sequence_number()\n\n # uuid format: 8 + 4 + 4 + 4 + 12\n _tmp_list = [self.unix_time[:8],\n self.unix_time[8:],\n self.mac_addr[:4],\n self.mac_addr[4:8],\n self.mac_addr[8:] + self.pid_hex + self.sequence_num]\n return _tmp_list", "def instance_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_list\")", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def check_openvpn_pid():\n return call_command('ps aux')[0].split('\\n')", "def vulnerability_ids():\n\n if S3VulnerabilityModel.indicator_pids is None:\n db = current.db\n table = db.vulnerability_indicator\n rows = db(table.deleted == False).select(table.parameter_id)\n S3VulnerabilityModel.indicator_pids = [i.parameter_id for i in rows]\n\n return S3VulnerabilityModel.indicator_pids", "def show_instances():\n return get_instances()", "def __set_ids__(self, line):\r\n try:\r\n self.ids = [x for x in ' '.join(line.split()).split(\" \")]\r\n except ValueError:\r\n logging.error(\"Error while reading process id's: INVALID NUMBER.\")\r\n return False\r\n except Exception as cause:\r\n logging.error(\"Error while reading process id's: %s\", cause)\r\n return False\r\n if len(self.ids) != self.no_process:\r\n logging.error(\"Given number of ids and process count doesn't match\")\r\n return False\r\n logging.info([\"Process ids: \"] + self.ids)\r\n return True", "def _psa(cmd, allmatching=True, paths=None):\n import psutil\n pids = list()\n cmdlines = list()\n procs = list()\n cmdline = ''\n bins = _whicha(cmd, paths)\n if not allmatching:\n bins = bins[:1]\n for pid in psutil.pids():\n try:\n proc = psutil.Process(pid)\n cmdline = proc.cmdline()\n if any([bin in cmdline for bin in bins]):\n cmdlines.append(cmdline)\n pids.append(pid)\n procs.append(proc)\n except psutil.ZombieProcess:\n pass\n except psutil.AccessDenied:\n pass\n return (pids, cmdlines, procs)", "def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)", "def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def test_instances(self):\n kb = logic.PropKB()\n kb.tell(logic.expr('ISA(mammal, animal)'))\n kb.tell(logic.expr('ISA(cat, mammal)'))\n kb.tell(logic.expr('INSTANCEOF(petunia, cat)'))\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISA(petunia, ?x)')),\n [{'?x': 'petunia'}, {'?x': 'cat'}, {'?x': 'mammal'}, {'?x': 'animal'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('INSTANCEOF(petunia, ?x)')),\n [{'?x': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n self.assertAllBindingsEqual(kb.ask_all(logic.expr('INSTANCEOF(?x, ?y)')),\n [{'?x': 'petunia', '?y': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def idn(self):\n\n if self.driver in [drivers.pyvisa, drivers.lgpib]:\n return self.ask('*idn?')", "def validate_openvpn_pid(result):\n for ps in result:\n if 'openvpn --daemon' in ps:\n print 'OpenVPN Process - OK'\n return True\n print 'OpenVPN Process - DOWN'\n return False", "def test_processid(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='processid',\n cmd_key='sh_ospf_ints')) == 1, 'OSPF Interface: process ID not found')", "def get_vid_pid_list(self):\n\n return self.vid_pid_s", "def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])", "def __validate_instance_id(self, instance_ids):\n try:\n if instance_ids:\n for id in instance_ids:\n self.euca.validate_instance_id(id)\n except InstanceValidationError:\n sys.exit(1)", "def pids(self):\n return self._pidToProcess.iterkeys()", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def identify(self):\n print(self.query(\"*IDN?\\n\"))", "def get_matching_appids(\n service: str, instance: str, client: MarathonClient, embed_tasks: bool = False\n) -> List[str]:\n marathon_apps = get_all_marathon_apps(\n client, service_name=service, instance_name=instance, embed_tasks=embed_tasks\n )\n return [\n app.id for app in marathon_apps if does_app_id_match(service, instance, app.id)\n ]", "def source_instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def __init__(self):\n\t\tself.instances = {}\n\t\twith open(os.path.join(os.path.dirname(__file__), 'conf', 'parliaments.json'), 'r') as f:\n\t\t\tparliaments = json.load(f)\n\t\tfor c, cp in parliaments.items():\n\t\t\tfor p in cp:\n\t\t\t\tpfx = c + '/' + p['code']\n\t\t\t\tself.instances[pfx] = create_app(c, p)", "def get_instances(instance_ids: np.ndarray, class_ids: np.ndarray,\n class_labels: List[str], id2label: Dict) -> Dict:\n assert len(class_labels) == len(class_ids)\n instances = {}\n for label in class_labels:\n instances[label] = []\n # traverse all instances\n inst_ids = np.unique(instance_ids)\n for id in inst_ids:\n # skip 0 and negative instance id (background points)\n if id <= 0:\n continue\n # get instance\n inst = VertInstance(instance_ids, id)\n # record in correspond class dict\n if inst.label_id in class_ids:\n instances[id2label[inst.label_id]].append(inst.dict)\n return instances", "def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found", "def update_pid_running_on_dpdk_cpu(self):\n #cpu_list = self.get_cpu_list_by_mask", "def running_rhel_containers_id(broker):\n containers_info = []\n for container in broker[running_rhel_containers]:\n containers_info.append((container[1], container[2]))\n return containers_info", "def __get_multi_images_ids(self, num_images=0): \n availability_images = imageInstance()\n images = availability_images.get_images()\n images_ids = []\n for image in images:\n if image.type == 'machine':\n images_ids.append( image.id.encode(\"latin-1\") )\n if num_images>1:\n random.shuffle(images_ids)\n return images_ids[:num_images]\n return images_ids", "def get_pid_of_all_slaves(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and \"slave\" in i.name:\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_pid_list(disallowed_prefixes, allowed_prefixes):\n # exceptions\n but = disallowed_prefixes if disallowed_prefixes is not None else []\n if allowed_prefixes is None:\n # if nothing setted - all ps will be returned except setted\n result = [pid\n for pid in os.listdir('/proc')\n if pid.isdigit() and pid not in but]\n else:\n result = []\n for pid in os.listdir('/proc'):\n if pid.isdigit() and pid not in but:\n name = get_pid_name(pid)\n if pid in allowed_prefixes or \\\n any(name.startswith(val) for val in allowed_prefixes):\n print name\n # this is allowed pid?\n result.append(pid)\n return result", "def get_instances(cls):\n raise NotImplementedError", "def GetXIDs():\n return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == slivermanager.sliver_password_shell])", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def test_integer_identifier(self):\n self._compare_avp(\n avp.AVP(1, 'Hello'),\n avp.UTF8StringAVP(\n 1, value='Hello', vendor=avp.VendorId.DEFAULT,\n flags=avp.FLAG_MANDATORY,\n name='User-Name',\n ),\n )\n\n # Unknown AVPs default to unknown AVP\n self._compare_avp(\n avp.AVP(0xdeadb33f, b'wut'),\n avp.UnknownAVP(\n 0xdeadb33f, value=b'wut',\n vendor=avp.VendorId.DEFAULT,\n flags=0, name='Unknown-AVP',\n ),\n )", "def source_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def source_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"source_instance_ids\")", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def create_instances(self):\n disk_d = \"//\"+self.host+\"/d$\"\n mask = r\"^IBM$|^WebSphere.*\"\n root_flag = 0\n # print(os.listdir(disk_d)) #checkpoint\n for item in os.listdir(disk_d):\n searchObj = re.search(mask, item, re.M|re.I)\n if searchObj:\n root_flag = 1\n rootdir=disk_d+\"/\"+searchObj.group()\n # print(rootdir) #checkpoint\n\n if os.path.isdir(rootdir):\n candidates=os.listdir(rootdir)\n # print(candidates) #checkpoint\n for candidate in candidates:\n if os.path.isdir(rootdir+'/'+candidate+'/profiles'):\n user_install_root=rootdir+'/'+candidate\n candidate_instance=Instance(user_install_root)\n candidate_instance.get_profiles()\n if candidate_instance.profiles:\n self.instances.append(candidate_instance)\n # print(candidate_instance.uir+\": \"+str(candidate_instance.profiles)) #checkpoint\n\n if root_flag == 0: print(self.host+\" does not have IBM or WebSphere directory on disk D\")", "def instances(args, config):\n print('Does something? More to come.')", "def _choose_among_stopped_instances(self):\n\n instances = self.compute.get_not_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def _get_ids_from_name_public(self, name):\r\n results = self.list_public_images(name=name)\r\n return [result['id'] for result in results]", "def find_id(self):\n\t\tx , y = self.id.split(':')\n\t\treturn int(x), int(y)", "def test_vpn_id():\n for v in vpns:\n assert len(v.id) > 0", "def pid():\n return 0x0204", "def pid():\n return 0x0204", "def get_machine_uuids_for_application(application):\n\n uuids = []\n for machine in get_machines_for_application(application):\n uuids.append(get_machine_status(machine, key='instance-id'))\n return uuids" ]
[ "0.6319383", "0.61950517", "0.6192594", "0.61528647", "0.6111595", "0.6052284", "0.605194", "0.59694993", "0.59247625", "0.5862299", "0.5763337", "0.5762358", "0.5724921", "0.5709404", "0.5638399", "0.56117487", "0.5553753", "0.55345756", "0.54887575", "0.54887056", "0.5459837", "0.5450734", "0.5407005", "0.5404297", "0.53547686", "0.534805", "0.53450406", "0.53387016", "0.5330876", "0.53133714", "0.52949494", "0.5274032", "0.5270608", "0.5254726", "0.52231914", "0.52096415", "0.5207332", "0.51860595", "0.5185979", "0.517679", "0.51725805", "0.5165778", "0.51613027", "0.5158561", "0.51544976", "0.5148519", "0.5145168", "0.5124383", "0.5117711", "0.5107728", "0.5105263", "0.5096645", "0.5088482", "0.5079278", "0.50700533", "0.5068809", "0.5055819", "0.505534", "0.505417", "0.50493926", "0.5044116", "0.5043994", "0.50380653", "0.5018822", "0.5018292", "0.50101244", "0.5009643", "0.5009497", "0.50053287", "0.5004664", "0.4999773", "0.49864212", "0.49861228", "0.49806556", "0.49765342", "0.49664485", "0.49636757", "0.49539393", "0.4952481", "0.4950928", "0.4947877", "0.49426398", "0.49313572", "0.49258468", "0.4925661", "0.49236825", "0.49111617", "0.49105155", "0.4910474", "0.4910474", "0.49062407", "0.49013945", "0.49000952", "0.48943403", "0.48837152", "0.48826912", "0.488055", "0.48639816", "0.48639816", "0.48634386" ]
0.77291846
0
scan all instances for `FATAL` statements
def detect_fatal_errors(self): for instance in self.all_instances: instance.detect_fatal_errors()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors_fatal(self) -> List[Error]:", "def fatal_error_processor(self):\n while True:\n _ = (yield)\n self.failed = True\n self.converged = False\n self.solve_completed = False", "def getFatalErrors(self):\n global hadFatalErrors\n if hadFatalErrors:\n text = '\\n'.join(hadFatalErrors)\n hadFatalErrors = []\n return text", "def fatal(self, *args, **kwargs):", "def get_fatal_alerts(self, path):", "def test_clean_log():\n\n log_file_path = \"mobile_testkit_tests/test_data/mock_clean_log.txt\"\n\n scan_logs.scan_for_errors(['panic'], log_file_path)", "def has_errors_fatal(self) -> bool:", "def search_for_warnings(self):\n log = str()\n print(self.default_starter_args + self.arguments)\n if not self.log_file.exists():\n print(str(self.log_file) + \" not there. Skipping search\")\n return\n print(str(self.log_file))\n with self.log_file.open(errors=\"backslashreplace\") as log_f:\n for line in log_f.readline():\n if \"WARN\" in line or \"ERROR\" in line:\n print(line.rstrip())\n log += line.rstrip()\n attach(log, \"WARN or ERROR lines from starter log\")", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def scan_error(self, line: int, message: str):\n self.report(line, \"\", message)", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def errors_fatal(self) -> List[Error]:\n return self._errors_fatal_files + self._errors_fatal", "def scanForSimpleError(script):\n\tlangage = identifyLangage(script)\n\tline_number = 0\n\tlogFile_name = \"scan.log\"\n\n\t# Scanning File\n\tlogFile = open(logFile_name, 'w')\n\tscriptFile = open(script, 'r')\n\tfor line in scriptFile:\n\t\tline_number +=1\n\t\tlineWithoutBackN = line.replace(\"\\n\", \"\")\n\t\tlineInArray = lineWithoutBackN.split(\" \")\n\t\tlastWord = lineInArray[-1]\n\t\tlastWordInArray = list(lastWord)\n\t\tlineInCharacterArray = list(lineWithoutBackN)\n\n\t\t#########################\n\t\t# looking for a shebang #\n\t\t# => for perl\t\t#\n\t\t# => for bash\t\t#\n\t\t#########################\n\t\tif(langage == \"perl\" and line_number == 1 and lineInArray[0] != \"#!/usr/bin/perl\"):\n\t\t\tlogFile.write(\"[WARNING]: SET line \"+str(line_number)+\" TO #!/usr/bin/perl\\n\")\n\t\tif(langage == \"bash\" and line_number == 1 and line != \"#!/bin/bash\"):\n\t\t\tlogFile.write(\"[WARNING]: SET line \"+str(line_number)+\" TO #!/bin/bash\\n\")\n\n\t\t#########################\n\t\t# Check for semi-column\t#\n\t\t# => for perl\t\t#\n\t\t#########################\n\t\tif(len(lastWordInArray) > 0):\n\t\t\tif(langage == \"perl\" and line_number != 1 and lastWordInArray[-1] != \";\"):\n\t\t\t\tif(lastWordInArray != \"}\"):\n\t\t\t\t\tfirstNonEmptyCharacter = getFirstNonEmptyCharInArray(lineInCharacterArray)\n\t\t\t\t\tif(firstNonEmptyCharacter != \"#\"):\n\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\";\\\" to line \"+str(line_number)+\"\\n\")\n\n\t\t#################################\n\t\t# Check variable declaration\t#\n\t\t# => for perl\t\t\t#\n\t\t#################################\n\t\tif(getFirstNonEmptyCharInArray(lineInCharacterArray) != \"#\" ):\n\t\t\tword_number = 0\n\t\t\tfor word in lineInArray:\n\t\t\t\tif(word == \"my\"):\n\t\t\t\t\tvariable = lineInArray[word_number+1]\n\t\t\t\t\tvariableInArray = list(variable)\n\t\t\t\t\tif(variableInArray[0] != \"$\" and variableInArray[0] != \"@\"):\n\t\t\t\t\t\tif \"list\" in variable:\n\t\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\"@\\\" to \"+variable+\", line \"+str(line_number)+\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\"$\\\" to \"+variable+\", line \"+str(line_number)+\"\\n\")\n\t\t\t\t\n\n\t\t\t\n\t\t\t\t\t\n\n\tscriptFile.close()\n\tlogFile.close()", "def delete_error():\r\n item = core.get_all_items()\r\n for i in item:\r\n if \"Error\" in i or \"Warning\" in i:\r\n if core.does_item_exist(i):\r\n reset_error(i)", "def check_for_errors(self):\n\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"*** Psi4 exiting successfully.\" in line:\n return {\"success\": True}\n\n elif \"*** Psi4 encountered an error.\" in line:\n return {\"success\": False, \"error\": \"Not known\"}\n\n return {\"success\": False, \"error\": \"Segfault\"}", "def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def errors(conf, daemon):\n # persisted dict interface for long term memory\n errors = Shove('file://{0}'.format(conf.app.errors), protocol=2, flag='r')\n if any(errors):\n print(\"errors found\")\n for path, error in six.iteritems(errors):\n pp(error)\n errors.close()\n exit(1)\n # ⏏ exit the program with an error\n else:\n print(\"no errors found - OK\")\n print()\n errors.close()", "def quieter():\n try:\n ttsEng.quieter()\n except Exception, e:\n logging.error(e)", "def fatal(self, msg, exitst):\n return libruss.russ_sconn_fatal(self._ptr, strtobytes(msg), exitst)", "def fatal(self, *args):\n self.mylog.critical(*args)\n sys.exit(1)", "def _log_crash_report():\n # For each crash report we find, dump its contents.\n # In theory we clean up after a crash so there should be only one.\n cwd = os.getcwd()\n for entry in os.listdir('.git'):\n if entry.startswith('fast_import_crash_'):\n with open(os.path.join(cwd, '.git', entry)) as f:\n report = f.read()\n # Keep the message free of repetition.\n LOG.error(\"git {}:\\n{}\".format(entry, report))", "def fatal ( self , message , *args , **kwargs ) :\n return self.logger.fatal ( message , *args , **kwargs )", "def test_explain_non_existent_code(self):\n command_line = [\"pool\", \"explain\", \"bogus\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def report_errors(errors):\n if len(errors) > 0:\n for error in errors:\n logger.debug(error)\n sys.exit(0)", "def listBadRefScripts(self):\n if not self.log: return\n ids = []\n for record in self.records:\n if record.name != 'SCPT': continue\n rnam = record.rnam\n if rnam and rnam.data == chr(255)*4:\n ids.append(record.getId())\n if ids:\n self.log.setHeader(_('Detached Global Scripts'))\n for id in sorted(ids,key=string.lower):\n self.log(id)", "def ignore_errors(self):\n field = \"ignore_errors\"\n value = \"y\"\n ignore_emptys = [step for step in self.all_steps if self.is_value(step, field, value)]\n self.add_all_issues(ignore_emptys, self.WARNINGS, self.issue_messages.ignore_insert_errors)", "def clear_errors(heroku_app=HEROKU_APP):\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'SELECT COUNT(*) FROM error_report;',\n )\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'DELETE FROM error_report;',\n )\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'SELECT COUNT(*) FROM error_report;',\n )", "def check_no_silent_crash(self, override=False):\n if self.results:\n score = self.results.linter.stats.get('global_note', False)\n if score is False:\n messages = self.results.linter.stats.get('by_msg', {})\n if messages.get('syntax-error', False) and not override:\n self.logging.warning('\\n------------------------------------------------------------------')\n self.logging.warning('PYLINT FAILED BECAUSE SYNTAX ERROR.')\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning('\\n')\n self.failed_files.append(self.fname)\n return False\n self.logging.info('\\n------------------------------------------------------------------')\n self.logging.info('FILE WAS IGNORED.')\n self.logging.info('------------------------------------------------------------------')\n return True\n return False", "def clean_errors(self):\n self._vim.eval('clearmatches()')\n self._errors = []\n self._matches = []\n # Reset Syntastic notes - TODO: bufdo?\n self._vim.current.buffer.vars['ensime_notes'] = []", "def print_unknown(msg):\n print('UNKNOWN - %s' % (msg))\n sys.exit(3)", "def warnings_active(self) -> List[Error]:", "def logged_batch_throws_uae_test(self):\n cursor = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n cursor.consistency_level = 'ONE'\n assert_unavailable(cursor.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def fatal(msg, ex=None):\n if ex == None:\n exMsg = \"\"\n else:\n exMsg = \" \\n \" + repr(ex)\n s = format_log(\"\\n\\n FATAL ERROR! %s%s\\n\\n\" % (msg,exMsg))\n print(s)\n log['fatal'].append({'msg':s, 'ex':ex})\n f_errors.write(s)\n exit(1)", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def check_errors(stderr):\n for ee in err_regex:\n if ee['re'].search(stderr) is not None:\n raise RuntimeError(ee['message'])", "def get_unbroken_instances(self):\n return self._get_cond_instance(cond=0)", "def logged_batch_throws_uae_test(self):\n session = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n session.consistency_level = 'ONE'\n assert_unavailable(session.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def warnings(self) -> List[Error]:", "def report_invalid_sources(self):\n if not self.invalid_sources:\n return\n total = sum(self.invalid_sources[s] for s in self.invalid_sources)\n self.logger.info(\n \"Dropping %d messages with invalid sources: %s\",\n total,\n \", \".join(\"%s: %s\" % (s, self.invalid_sources[s])\n for s in self.invalid_sources)\n )\n self.invalid_sources = defaultdict(int)", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def get_errors(cursor):\n while True:\n message = cursor.lpop(\"errors\")\n if message is None:\n print(\"There are no errors more\")\n return None\n print(message)", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def test_get_counturingErr(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, COUNTURING_ERR_IDX, COUNTURING_ERR_SUB)\n param_obj = self.__dict__[servo_type]._get_counturingErr()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in counturingErr...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def emergency_recover_states_from_failure():\n _emergency_state_check()\n _emergency_iobuf_extract()", "def replicate_no_data_error():\r\n for i in range(10):\r\n content = ust.get_web_xml(2002)\r\n print(content[:100])\r\n if not content.startswith(\"<?xml\"):\r\n ust.save_local_xml(\"error\", content)", "def upkeep():\n sys.exc_clear() # Just in case, let's clear the exception info.\n if os.name == 'nt':\n try:\n import msvcrt\n msvcrt.heapmin()\n except ImportError:\n pass\n except IOError: # Win98 sux0rs!\n pass\n if conf.daemonized:\n # If we're daemonized, sys.stdout has been replaced with a StringIO\n # object, so let's see if anything's been printed, and if so, let's\n # log.warning it (things shouldn't be printed, and we're more likely\n # to get bug reports if we make it a warning).\n assert not type(sys.stdout) == file, 'Not a StringIO object!'\n if not hasattr(sys.stdout, 'getvalue'):\n # Stupid twisted sometimes replaces our stdout with theirs, because\n # \"The Twisted Way Is The Right Way\" (ha!). So we're stuck simply\n # returning.\n log.warning('Expected cStringIO as stdout, got %r.', sys.stdout)\n return\n s = sys.stdout.getvalue()\n if s:\n log.warning('Printed to stdout after daemonization: %s', s)\n sys.stdout.reset() # Seeks to 0.\n sys.stdout.truncate() # Truncates to current offset.\n assert not type(sys.stderr) == file, 'Not a StringIO object!'\n s = sys.stderr.getvalue()\n if s:\n log.error('Printed to stderr after daemonization: %s', s)\n sys.stderr.reset() # Seeks to 0.\n sys.stderr.truncate() # Truncates to current offset.\n doFlush = conf.supybot.flush() and not starting\n if doFlush:\n flush()\n # This is so registry._cache gets filled.\n # This seems dumb, so we'll try not doing it anymore.\n #if registryFilename is not None:\n # registry.open(registryFilename)\n if not dying:\n log.debug('Regexp cache size: %s', len(sre._cache))\n log.debug('Pattern cache size: %s', len(ircutils._patternCache))\n log.debug('HostmaskPatternEqual cache size: %s',\n len(ircutils._hostmaskPatternEqualCache))\n #timestamp = log.timestamp()\n if doFlush:\n log.info('Flushers flushed and garbage collected.')\n else:\n log.info('Garbage collected.')\n collected = gc.collect()\n if gc.garbage:\n log.warning('Noncollectable garbage (file this as a bug on SF.net): %s',\n gc.garbage)\n return collected", "def _fatal(self, message: str) -> NoReturn:\n self._trace(message, Level.ERROR)\n sys.exit(1)", "def fatal(self, msg, stderr=True):\n self.log(msg, level=self.FATAL, stderr=stderr)", "def logged_batch_doesnt_throw_uae_test(self):\n cursor = self.prepare(nodes=3)\n self.cluster.nodelist()[-1].stop(wait_other_notice=True)\n cursor.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", consistency_level=\"ANY\")\n assert True", "def graceful(self):\n if self.debug:\n print(\"%s graceful\" % self.name)\n self.quit()", "def _fatal(msg):\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)", "def pl_stderr_ignore(line):\n for s in PL_STDERR_IGNORE_LIST:\n if s.search(line):\n return True\n return False", "def handle_errors(self, output):\n\n if \"Error: Could not find specified account(s).\" in output:\n return []\n\n logger.error(\"LastPass Error: %s\", output)\n raise LastPassError(output)", "def isFatal(self):\n return _libsbml.XMLError_isFatal(self)", "def check_puppet_logs_ubuntu(self):\n\n remote = self.env.get_admin_remote()\n\n err_start = 'The following packages have unmet dependencies:'\n err_end = ('Unable to correct problems,'\n ' you have held broken packages.')\n cmd = ('fgrep -h -e \" Depends: \" -e \"{0}\" -e \"{1}\" '\n '/var/log/docker-logs/remote/node-*/'\n 'puppet*.log'.format(err_start, err_end))\n result = remote.execute(cmd)['stdout']\n\n err_deps = {}\n err_deps_key = ''\n err_deps_flag = False\n\n # Forming a dictionary of package names\n # with sets of required packages.\n for res_str in result:\n if err_deps_flag:\n if err_end in res_str:\n err_deps_flag = False\n elif \": Depends:\" in res_str:\n str0, str1, str2 = res_str.partition(': Depends:')\n err_deps_key = ''.join(str0.split()[-1:])\n if err_deps_key not in err_deps:\n err_deps[err_deps_key] = set()\n if 'but it is not' in str2 or 'is to be installed' in str2:\n err_deps[err_deps_key].add('Depends:{0}'\n .format(str2))\n elif 'Depends:' in res_str and err_deps_key:\n str0, str1, str2 = res_str.partition('Depends:')\n if 'but it is not' in str2 or 'is to be installed' in str2:\n err_deps[err_deps_key].add(str1 + str2)\n else:\n err_deps_key = ''\n elif err_start in res_str:\n err_deps_flag = True\n\n return err_deps", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def test_cap_panic_in_log():\n\n log_file_path = \"mobile_testkit_tests/test_data/mock_panic_log.txt\"\n\n with pytest.raises(AssertionError) as e:\n scan_logs.scan_for_errors(['Panic'], log_file_path)\n\n error_message = str(e.value)\n assert error_message.startswith(\"Panic found!!\")", "def dump_diags():\n try:\n stat_log.info(\"=== DIAGNOSTICS ===\")\n for name, diags_function in _registered_diags:\n stat_log.info(\"--- %s ---\", name)\n diags_function(stat_log)\n stat_log.info(\"=== END OF DIAGNOSTICS ===\")\n except Exception:\n # We don't want to take down the process we're trying to diagnose...\n try:\n stat_log.exception(\"Failed to dump diagnostics\")\n except Exception:\n pass", "def fix(self):\n exceptionError = ''\n for each in self.errorNodes:\n try:\n pm.delete(each)\n except exceptionError:\n print exceptionError", "def has_errors_fatal(self) -> bool:\n return len(self.errors_fatal) > 0", "def fatal(*message, **kwargs):\n err(*message, **kwargs)\n sys.exit(1)", "def errProc(self, szErrMsg):\n self.logger.critical(szErrMsg)\n self.logger.debug(szErrMsg)\n for i in self.lstSimulators:\n i.quit()\n return False", "def quiet_hook(kind, message, traceback):\n if QuietException in kind.__bases__:\n # Only print Error Type and Message\n print('{0}: {1}'.format(kind.__name__, message))\n else:\n # Print Error Type, Message and Traceback\n sys.__excepthook__(kind, message, traceback)", "def suppressMessages():\n dislin.unit(0)", "def check_processed(args):\n\n unknown_fns = glob.glob(\"*M.txt\") + glob.glob(\"unknowns/*M.txt\")\n unknown_fns = [os.path.basename(fn) for fn in unknown_fns]\n\n with sqlite3.connect(args.search_db) as conn:\n conn.row_factory = sqlite3.Row\n sql_ranges = conn.execute('SELECT * FROM range ORDER BY p, d, m_start').fetchall()\n\n # At some later point maybe don't load all (group by thousands or something)\n conn.row_factory = None\n results = conn.execute('SELECT P, D, m FROM result').fetchall()\n print(f\"\\tLoaded {len(results):,} results\")\n\n # ---- Add file only ranges\n ranges, lookup = sql_and_file_ranges(sql_ranges, unknown_fns)\n\n # ---- Find results not belonging to any range\n build_and_count_pd_results(results, ranges, lookup)\n\n print_results(conn, ranges, lookup)", "def filter_errlog(columns):\r\n return bool( min(0, int(columns[POS_RETCODE])) )", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")", "def test_panic_in_log():\n\n log_file_path = \"mobile_testkit_tests/test_data/mock_panic_log.txt\"\n\n with pytest.raises(AssertionError) as e:\n scan_logs.scan_for_errors(['panic'], log_file_path)\n\n error_message = str(e.value)\n assert error_message.startswith(\"panic found!!\")", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def get_member_errors(rows):\n\n member_errors = []\n\n for row in rows:\n if row['svname'] not in ['FRONTEND', 'BACKEND'] and \\\n row['status'] not in ['UP', 'no check']:\n member_errors.append(row['pxname'] + ':' + row['svname'])\n\n return member_errors", "def select(self):\n for elem in self.errorLog:\n print elem", "def select(self):\n for elem in self.errorLog:\n print elem", "def rejected(eachtweet):\n import general_functions\n try:\n # write to logfile\n # remove tweet from ReadyForAck\n general_functions.csv_writelist('logfile', 'logfile', eachtweet, 1)\n general_functions.csv_writelist('ReadyForAck', 'ReadyForAck', eachtweet, 3)\n except:\n \"ietsgaatfout\"", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def print_loop_warning(file_name):\n print('pwgrep: warning: {}: recursive directory loop'.format(file_name),\n file=sys.stderr)", "def check_errors(self) -> None:", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def clear_errors(self) -> None:", "def clear_errors(self) -> None:", "def faulty(self, *args):\n for each in args:\n if not self.is_faulty(each):\n self._faults.add(each)", "def go_quiet():\n my_logger = logging.getLogger(__name__)\n my_logger.setLevel(LEVELS['WARNING'])", "def verify_no_cable_errors(self):\n i = 0\n for dpid in self.dpids:\n i += 1\n labels = {'dp_id': '0x%x' % int(dpid), 'dp_name': 'faucet-%u' % i}\n self.assertEqual(\n 0, self.scrape_prometheus_var(\n var='stack_cabling_errors_total', labels=labels, default=None))\n self.assertGreater(\n self.scrape_prometheus_var(\n var='stack_probes_received_total', labels=labels), 0)", "def __look__missing_termcount_info(self):\n logging.debug('Starting method that looks for missing Term Count data.')\n counter = 0\n max_vids_to_process = self.num_vids_to_use\n logging.info('Examining ' + str(max_vids_to_process) + ' records.')\n list_vids_no_tc_data = []\n percent_tracker = PercentTracker(max_vids_to_process, int_output_every_x_percent=10)\n for vid_id in self.transcripts_ds:\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n if (not execution_should_continue) or (counter >= max_vids_to_process):\n break\n transcript = Transcript(vid_id)\n transcript.set_transcript_directory(self.str_path_to_transcripts_files)\n transcript.load_transcript_object_from_dictionary(self.transcripts_ds.fetch_data(vid_id))\n has_tc_data = transcript.is_termcount_filename_populated()\n if not has_tc_data:\n # we are here if the video has a transcript (it exists in the transcripts SimpleDS),\n # but the field for the filename of the Term Count file has never been populated.\n list_vids_no_tc_data.append(vid_id)\n counter += 1\n percent_tracker.update_progress(counter,\n str_description_to_include_in_logging='Finding missing term-count files.')\n return list_vids_no_tc_data", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def logged_batch_doesnt_throw_uae_test(self):\n session = self.prepare(nodes=3)\n self.cluster.nodelist()[-1].stop(wait_other_notice=True)\n query = SimpleStatement(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", consistency_level=ConsistencyLevel.ANY)\n session.execute(query)\n assert True", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def find_all_unprocessed():\n ns, errors = note.find_all_unprocessed()\n try:\n return [db_to_model(n) for n in ns], errors\n except:\n return [], [sys.exc_info] + errors", "def _warn_exit_early(self):\n ready_outputs = self.n_completed_tasks - self._nb_consumed\n is_completed = self._is_completed()\n msg = \"\"\n if ready_outputs:\n msg += (\n f\"{ready_outputs} tasks have been successfully executed \"\n \" but not used.\"\n )\n if not is_completed:\n msg += \" Additionally, \"\n\n if not is_completed:\n msg += (\n f\"{self.n_dispatched_tasks - self.n_completed_tasks} tasks \"\n \"which were still being processed by the workers have been \"\n \"cancelled.\"\n )\n\n if msg:\n msg += (\n \" You could benefit from adjusting the input task \"\n \"iterator to limit unnecessary computation time.\"\n )\n\n warnings.warn(msg)", "def warn():\n pass", "def test_stratis_no_subcommand(self):\n for command_line in [[], [\"daemon\"]]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)" ]
[ "0.61938083", "0.6015998", "0.59813035", "0.5981033", "0.5679613", "0.55990446", "0.550483", "0.54907763", "0.5387474", "0.5358392", "0.5331162", "0.53206944", "0.5311066", "0.5309457", "0.5303004", "0.5290556", "0.5203423", "0.5195811", "0.51785743", "0.517628", "0.51631856", "0.51450163", "0.5143785", "0.51363826", "0.5134749", "0.51189876", "0.51103044", "0.5080967", "0.506726", "0.50608265", "0.5033425", "0.5014879", "0.49913225", "0.49674612", "0.49567387", "0.4945774", "0.49362805", "0.49357828", "0.49350226", "0.49189064", "0.48906893", "0.48906893", "0.48906893", "0.48906893", "0.48906893", "0.48906893", "0.48906893", "0.48906893", "0.48869187", "0.48743838", "0.48724315", "0.48680234", "0.48486432", "0.4828028", "0.4814638", "0.48016292", "0.47885004", "0.47873124", "0.47650728", "0.4763636", "0.47622085", "0.47488636", "0.47455493", "0.47436807", "0.47431558", "0.47420394", "0.474115", "0.47360662", "0.47325164", "0.4727423", "0.4717856", "0.4716676", "0.47137165", "0.4711229", "0.47032696", "0.47014505", "0.4700642", "0.4700611", "0.4700611", "0.46949464", "0.46924013", "0.46924013", "0.4678952", "0.4677883", "0.46768495", "0.46723834", "0.4671567", "0.4665642", "0.4665642", "0.4663837", "0.46633974", "0.46620128", "0.4661188", "0.46595535", "0.46570903", "0.4652479", "0.46500593", "0.46441624", "0.46414024", "0.46348894" ]
0.69902325
0
gets the arangosh instance to speak to the frontend of this starter
def detect_arangosh_instances(self, config, old_version): if self.arangosh is None: config.port = self.get_frontend_port() config.passvoid = self.passvoid self.arangosh = ArangoshExecutor(config, self.get_frontend(), old_version) self.arango_importer = ArangoImportExecutor(config, self.get_frontend()) self.arango_restore = ArangoRestoreExecutor(config, self.get_frontend()) if config.hot_backup_supported: self.hb_instance = HotBackupManager( config, self.raw_basedir, config.base_test_dir / self.raw_basedir, self.get_frontend(), ) self.hb_config = HotBackupConfig( config, self.raw_basedir, config.base_test_dir / self.raw_basedir, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(approot, instance):\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()", "async def __aenter__(self) -> \"HomeAssistantClient\":\n await self.connect()\n return self", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def client():", "def shell(self):\n import code\n client = self.connect(VAULT_TOKEN)\n code.interact(local=locals())", "def app(self):\n return self.__key.app()", "def __call__( self, agi ):\n\t\tself.agi = agi \n\t\treturn self.start()", "def start(linker, logger, discord_token, discord_channel, asana_token, asana_workspace):\n bot = AsanaBot(linker, logger, discord_token, discord_channel, asana_token, asana_workspace)\n bot.run()", "def app(self):\r\n return self._app", "def main():\n\ttoken = os.getenv(\"BOT_TOKEN\")\n\tapplication = Application.builder().token(token).read_timeout(30).write_timeout(30).build()\n\tload_interactions(application)\n\tprint(\"Simple Media Converter instance started!\")\n\tapplication.run_polling()", "def app(self):\n return self.__app", "def start(self):\n return self._args[0]", "def get_spice_console(self, instance):\n raise NotImplementedError()", "def main():\n\n # Play start sound\n play_wave_file(\"start.wav\")\n\n # Connect to Lego Boost\n hub = connect()\n\n # If hub works, starts the main app flow\n if hub:\n speech(\n \"Olá. Eu sou a Faustina, uma robô assistente do ueivespeisse. Em que posso ajudar?\", hub, {})\n while True:\n try:\n act({\"legoAction\": \"colorGreen\"}, hub)\n\n recorded_file = audio.record()\n\n act({\"legoAction\": \"colorRed\"}, hub)\n\n wit_response = wit_client.get_response(recorded_file)\n\n if wit_response[\"_text\"]:\n print(wit_response)\n answer = get_answer(wit_response)\n\n text = add_information_to_text(\n answer) if answer else \"Desculpa, nao entendi o que voce quis dizer\"\n\n speech(text, hub, answer)\n if answer:\n act(answer, hub)\n else:\n act({\"legoAction\": \"colorYellow\"}, hub)\n print(\"No sound detected\")\n time.sleep(2)\n except Exception as exception:\n print(exception)\n\n time.sleep(2)\n hub.motor_external.stop()", "def setup(bot: Bot) -> None:\n bot.add_cog(VoiceGate(bot))", "def GetInstance():\n pass", "def __init__(self, bot):\r\n Component.__init__(self, bot)\r\n \r\n self.bot = bot \r\n self.logger = logging.getLogger('components.topic') \r\n self.persistence = self.bot.get_subsystem('local-persistence')", "def get_app(self):\n app.init_options()\n return app.NBViewer().tornado_application", "def app():\n return aplicattion", "def _get_instance(self):", "def _get_instance(self):", "async def get_app():\n import aiohttp_debugtoolbar\n conf = load_config(PROJ_ROOT / 'config' / 'config.yml')\n app = await init(conf)\n aiohttp_debugtoolbar.setup(app)\n return app", "def speak(self):\n print(\"hello\")", "def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)", "def init(self, sevabot):\n self.sevabot = sevabot\n self.standard_xml = \"sevabot/alice/std-startup.xml\"\n\n self.commands = {\n \"!alice start\": self.start,\n \"!alice stop\" : self.stop\n }", "def startapp():", "def client(self):\n raise NotImplementedError()", "def __init__(self, *, specified_loop=None):\n intents = discord.Intents(\n members=True,\n presences=True,\n guilds=True,\n emojis=True,\n invites=True,\n messages=True,\n reactions=True,\n voice_states=True,\n )\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession(loop=loop)\n\n # Load all the environment variables\n load_dotenv(\"config/Bot/token.env\")\n load_dotenv(\"config/Apis/tokens.env\")\n load_dotenv(\"config/Database/db.env\")\n\n # Read the emoji file\n self.emoji_config = CustomEmojis.from_json(read_file(\"config/General/emojis.json\"))\n # Read the config file\n self.config = Config.from_json(read_file(\"config/General/config.json\"))\n\n # Set the HTTPException error codes dict to a custom property for easy access\n self.httpexception_codes = load_json(\"assets/data/httpexception_codes.json\", make_keys_int=True)\n\n # We save the bot start time to a variable\n self.started_at = datetime.datetime.utcnow()\n\n # APIs\n self.cleverbot = async_cleverbot.Cleverbot(\n os.environ[\"cleverbot\"],\n session=session,\n context=async_cleverbot.DictContext(),\n )\n self.dagpi = asyncdagpi.Client(os.environ[\"dagpi\"])\n self.google_api = async_cse.Search(os.environ[\"google_search\"], session=session)\n self.translate_api = aiogoogletrans.Translator()\n self.aki = Akinator()\n self.apis = [\"OMDB\", \"tenor\", \"owlbot\", \"gender_api\", \"nasa\"]\n self.api_keys = {api: os.environ[api.lower()] for api in self.apis}\n\n # For the snipe command\n self.snipes = {}\n\n # For tracking commands\n self.command_uses = {}\n\n # For api requests\n self.session = session\n\n super().__init__(\n command_prefix=get_prefix,\n case_insensitive=True,\n intents=intents,\n session=session,\n loop=specified_loop or loop,\n strip_after_prefix=True,\n owner_ids=self.config.owner_ids,\n )\n\n # For before_invoke\n self._before_invoke = self.before_invoke\n # For blacklisted check\n self._checks.append(self.bot_check)", "def run(self):\n self.ae.start()", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def setup(bot):\n bot.add_cog(Session(bot))", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def get_object_to_run(self):", "def app(self):\n return self._app", "def launch(self):", "async def __aenter__(self) -> \"SwitcherBridge\":\n await self.start()\n return self", "def start(self) :\n\t\tp = OceanOpticsSpectrometer(self.child_connection)\n\t\tp.start()\n\t\treturn p", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def main():\n return execute_api(Freta(), [Endpoint], __version__)", "def start(self):\n self.kb_client.subscribe(self.kb_ID, {\"_data\": {\"tag\": TAG_ANSWER, \"text\": \"$input\", \"timestamp\": \"$time\", \"language\": \"$lang\"}}, self.add_emotion) # from the 'gnlp' module", "def launch_request_handler(handler_input):\n speech_text = \"Hello! Are you looking to connect and play with others?\"\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello! Are you looking to connect and play with others?\", speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def client():\n\n client = Client()\n return client", "def app(self) -> traits.RESTAware:", "def app(self) -> traits.RESTAware:", "def client_asgi() -> TestClient:\n return TestClient(base_app)", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def setup(bot):\n bot.add_cog(MyAnimeList())", "def main():\n my_painting_mqtt_client = MyPaintingMQTTClient()\n my_painting_mqtt_client.run_app()", "def cli(self, env):\n raise NotImplementedError", "def main():\n frontend_query(PATH, USER)", "def __init__(self):\n\n\t\tself.account_sid = os.environ['TWILIO_ACCOUNT_SID']\n\t\tself.auth_token = os.environ['TWILIO_AUTH_TOKEN']\n\t\tself.twilio_phone_number = os.environ['TWILIO_PHONE_NUMBER']\n\t\tself.client = Client(self.account_sid, self.auth_token)\n\n\t\tself.call_domain = 'http://twimlets.com/echo?Twiml='", "def test_koza(koza: KozaApp):\n global koza_app\n koza_app = koza", "def shell():\n from flask.globals import _app_ctx_stack\n banner = 'Welcome to Opsy!'\n app = _app_ctx_stack.top.app\n shell_ctx = {'create_app': create_app,\n 'db': db,\n 'User': User,\n 'Role': Role,\n 'Permission': Permission,\n 'Zone': Zone,\n 'Host': Host,\n 'Group': Group,\n 'HostGroupMapping': HostGroupMapping}\n shell_ctx.update(app.make_shell_context())\n try:\n from IPython import embed\n embed(user_ns=shell_ctx, banner1=banner)\n return\n except ImportError:\n import code\n code.interact(banner, local=shell_ctx)", "def start_transcribing():\n transcribe.main()", "def __call__(self):\n self.logger.debug(\"Calling the hortator's run.\")\n self.builder.hortator()\n return", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def get_app(self):\n return Application()", "def get_client():\n return Client(__address, authkey='strumamor')", "def client():\n from csuibot import app\n app.config['TESTING'] = True\n return app.test_client()", "def topic(cls, **kwargs) -> str:\n return \"hermes/asr/startListening\"", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, bot):\n self.bot = bot", "def speak(self):\n print(\"meow!\")", "def main():\n door = TalkingDoor()\n\n application = ApplicationBuilder().token(TOKEN).build()\n\n application.add_handlers(\n [\n CommandHandler([\"start\", \"help\"], door.help),\n CommandHandler(\"status\", door.status),\n CommandHandler(\"alarm\", door.alarm),\n CommandHandler(\"last_vid\", door.last_vid),\n CommandHandler(\"last_vids\", door.last_vids),\n CommandHandler(\"stop\", door.stop),\n CommandHandler(\"last\", door.last),\n CommandHandler(\"lines\", door.last_lines),\n ]\n )\n application.add_handler(CallbackQueryHandler(door.button))\n\n application.post_init = send_keyboard\n\n application.run_polling()", "def __init__(self, app):\n pass", "def __init__(self):\n\n\t\tself.Helpers = Helpers(\"TassAI\", False)\n\n\t\tself.qs = 16\n\t\tself.context = InferenceContext([self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"]], \"\", \"\", \"\")\n\n\t\tself.Helpers.logger.info(\"TassAI Helper Class initialization complete.\")", "def setup(bot: Bot) -> None:\n bot.add_cog(Armory(bot))", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def service(self):\n pass", "def __init__(self):\n self._eng = pyttsx.init()\n self._eng.connect(\"started-utterance\", self._onStart)\n self._eng.connect(\"started-word\", self._onWord)\n self._eng.connect(\"finished-utterance\", self._onEnd)", "def start( self ):\n\t\treturn self.agi.answer().addCallbacks( self.onAnswered, self.answerFailure )" ]
[ "0.5918849", "0.5901219", "0.57980865", "0.5683462", "0.55928767", "0.55773854", "0.55701894", "0.5559227", "0.54972243", "0.53930515", "0.5382782", "0.5344606", "0.5341296", "0.53286755", "0.5324452", "0.5287806", "0.52836734", "0.52671164", "0.52298224", "0.52198905", "0.52198905", "0.5217069", "0.52147317", "0.52071303", "0.52043283", "0.5201664", "0.51913995", "0.5183476", "0.5182812", "0.5181024", "0.5177634", "0.5174731", "0.5165623", "0.5165623", "0.51638114", "0.51604253", "0.51517296", "0.51507276", "0.51455", "0.5139062", "0.5129067", "0.51211715", "0.51142764", "0.50959224", "0.5094123", "0.5094123", "0.509397", "0.5089602", "0.5079794", "0.5074061", "0.50697786", "0.50655305", "0.50497055", "0.50447017", "0.5039954", "0.50304073", "0.5025452", "0.50201976", "0.5015821", "0.5012864", "0.50100905", "0.5006079", "0.50047576", "0.50047576", "0.50025976", "0.5001516", "0.49991763", "0.49926418", "0.4990662", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49896127", "0.49880517", "0.49865288", "0.49851358" ]
0.53324765
13
launch an arangobench instance to the frontend of this starter
def launch_arangobench(self, testacse_no, moreopts=None): arangobench = ArangoBenchManager(self.cfg, self.get_frontend()) arangobench.launch(testacse_no, moreopts) return arangobench
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n tng.api.runner()", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def launch_test():\n import sys\n from kothrak.envs.KothrakEnv import KothrakEnv\n from kothrak.envs.game.MyApp import style\n from PyQt5.QtWidgets import QApplication, QWidget\n\n qapp = QApplication(sys.argv)\n qapp.setStyleSheet(style)\n window = QWidget()\n window.setWindowTitle('Kothrak training')\n\n env = KothrakEnv(qapp, window)\n window.show()\n\n trainer = Trainer(env)\n # trainer.load('saves/031421-1523.zip')\n trainer.run()\n\n qapp.exec_()", "def launch_analysis_v2():\n\n # add explicit instructions for user\n\n os.system(\"pip install -r requirements.txt\")\n os.chdir(f'{os.getcwd()}/gui')\n\n # explicit version checking\n if os.system(\"node -v\") != 0:\n print(\"Please install node before proceeding.\")\n exit(-1)\n\n if os.system(\"npm install\") != 0:\n print(\"Could not install npm packages. \")\n\n os.system(\"npm run start-backend &\")\n os.system(\"npm start\")", "def launch(self):", "def main():\n driver = Driver()\n driver.start()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def run_experiment():\n pass", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def main():\n indicator = AyatanaIndicator()\n indicator.run()", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def startTest(asset):", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(self, argv):\n\n np.random.seed(42)\n self.setup_logging()\n self.command_line(argv)\n start_time = time.time()\n\n logging.info(\"Starting Kaggle-CTMI Experiment\\n\")\n\n logging.info(\"Finding data and groundtruth...\")\n cohort = Cohort(self.shaip)\n train_cohort, test_cohort = cohort.split_cohort_train_test(0.3)\n logging.info(\"Found %d datasets\", cohort.size)\n\n if self.args.train:\n logging.info(\"Training on %d datasets...\", train_cohort.size)\n model = self.algorithm.train(train_cohort)\n Algorithm.save_model(model, self.shaip.models_dir + 'model')\n else:\n logging.info(\"Skipping training, model saved from earlier run\")\n model = self.algorithm.load_model(self.shaip.models_dir + 'model')\n\n if self.args.predict:\n logging.info(\"Prediction on %d datasets...\", test_cohort.size)\n test_predictions = self.algorithm.predict(model, test_cohort)\n else:\n logging.info(\"Skipping prediction, using predictions from earlier run\")\n # TODO: need to sort out caching of predictions\n test_predictions = None\n\n if self.args.evaluate:\n logging.info(\"Generating results to ShaipWorkspace/outputs/results/index.html...\")\n self.results.show_results(train_cohort, test_cohort,\n self.algorithm.history, test_predictions)\n\n logging.info(\"Kaggle-CTMI Experiment done in %4.1f seconds.\\n\", (time.time() - start_time))", "def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def launch(config):\n \n launch_with_configs([config])", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def startapp():", "def __main() :\n launchTests()", "def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--input', '-i', type=str, required=True, help='Path to video')\n parser.add_argument('--snapshot_path', '-s', type=str, required=False, default='', help='Path to snapshot')\n parser.add_argument('--out_scale', type=float, default=1.0, help='Output frame scale')\n parser.add_argument('--deploy', '-d', action='store_true', help='Execute in deploy mode')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.input)\n assert exists(args.snapshot_path + '.index')\n assert args.out_scale > 0.0\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n task_monitor.demo(args.input, args.out_scale, args.deploy)", "def run_experiment(arguments):\n\n logging.info('Arguments: %s', arguments)\n\n # Get estimator\n estimator = get_estimator(arguments)\n # my_module.\n\n # Run training and evaluation\n _train_and_evaluate(estimator, arguments.job_dir)", "def create_and_run():\n\n app = App()\n app.run()", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(self, verbose=1, timeout=10):\n runtime = Runtime()\n runtime.execute(self, verbose, timeout)", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n CLI_APP.run()", "def run():\n main()", "def run(self):\n self.speed_test.start()", "def run(self):\n self.arbiter.start()", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def start():\n trio.run(_main)", "def _run_express_job(self, class_name, options=\"\"):\n cmd = \"source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}\"\n cmd = cmd.format(\n bento_home=self.bento_home,\n jar=os.path.join(self.movie_advisor_home, self.express_jar),\n myclass=class_name,\n kiji_uri=self.kiji_uri,\n ) + \" \" + options\n print(run(cmd))", "def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)", "def run(self):\n self.ae.start()", "def main():\n run_nutanix_vm_creation_module()", "def test_script(self) -> None:\n main()", "def main():\n obj = PowerMaxJob()\n obj.perform_module_operation()", "def faster(self):\n self.run_command('faster')", "def main():\n # Parameters:\n parser = define_parser()\n args = parser.parse_args()\n # General:\n save_policy = args.save_policy\n verbose = args.verbose\n wb = args.wandb\n benchmark = args.benchmark\n # Training:\n total_timesteps = args.total_timesteps\n # DQN:\n batch_size = args.batch_size\n epsilon_0 = args.epsilon_0\n train_freq = args.train_freq\n discount_factor = args.gamma\n learning_rate = args.learning_rate\n epsilon_min = args.eps_min\n exploration_fraction = args.exploration_fraction\n buffer_size = args.buffer_size\n tau = args.tau\n update_interval = args.update_interval\n gradient_steps = args.gradient_steps\n min_exp = args.min_exp\n\n timestamp = datetime.now().strftime(\"%Y%m%d.%H%M%S\")\n random_tag = \"\".join(random.choices(string.ascii_lowercase + string.digits, k=8))\n run_id = f\"{timestamp}-{random_tag}\"\n\n # Define path for logs:\n log_dir = Path(args.log_dir).resolve().joinpath(run_id)\n # Create directory if not already existing:\n log_dir.mkdir(parents=True, exist_ok=True)\n\n config = {\n \"total_timesteps\": total_timesteps,\n \"batch_size\": batch_size,\n \"buffer_size\": buffer_size,\n \"min_exp\": min_exp,\n \"target_update_interval\": update_interval,\n \"exploration_fraction\": exploration_fraction,\n \"epsilon_0\": epsilon_0,\n \"epsilon_min\": epsilon_min,\n \"train_freq\": (train_freq, \"episode\"),\n \"discount_factor\": discount_factor,\n \"learning_rate\": learning_rate,\n \"tau\": tau,\n \"gradient_steps\": gradient_steps,\n }\n\n # Weights & Biases (https://wandb.ai):\n if wb:\n import wandb\n from wandb.integration.sb3 import WandbCallback\n\n os.environ[\"WANDB_DISABLE_GIT\"] = \"True\"\n run = wandb.init(\n project=\"simulink_gym\",\n group=\"simulink_cartpole_env\" if not benchmark else \"gym_cartpole_env\",\n job_type=\"examples\",\n tags=[\"DQN\"],\n sync_tensorboard=True,\n config=config,\n dir=log_dir,\n save_code=False,\n id=run_id,\n )\n callback = WandbCallback()\n else:\n callback = None\n\n # Create training environment:\n if not benchmark:\n env = CartPoleSimulink()\n else:\n import gym\n\n env = gym.make(\"CartPole-v1\")\n\n # Create learning agent:\n agent = DQN(\n \"MlpPolicy\",\n env,\n buffer_size=config[\"buffer_size\"],\n batch_size=config[\"batch_size\"],\n gamma=config[\"discount_factor\"],\n learning_rate=config[\"learning_rate\"],\n learning_starts=config[\"min_exp\"],\n target_update_interval=config[\"target_update_interval\"],\n exploration_fraction=config[\"exploration_fraction\"],\n exploration_initial_eps=config[\"epsilon_0\"],\n exploration_final_eps=config[\"epsilon_min\"],\n train_freq=config[\"train_freq\"],\n tau=config[\"tau\"],\n gradient_steps=config[\"gradient_steps\"],\n verbose=verbose,\n tensorboard_log=str(log_dir),\n )\n\n # Train agent:\n agent.learn(\n total_timesteps=config[\"total_timesteps\"],\n log_interval=4,\n callback=callback,\n progress_bar=True,\n )\n\n # Save policy:\n if save_policy:\n policy = agent.policy\n policy.save(f\"{log_dir}/learned_policy\")\n\n env.close()\n\n if wb:\n run.finish()", "def main(period_in_minutes, path):\n #Initiate the peridic test object\n speed_test = periodictest.SpeedTest(path, period_in_minutes)\n\n start_tray_app(speed_test)", "def run(tag, devmode, img_passwd_file, install_server_hostname,\n custom_cli_subnet, custom_db_subnet, clitests, builder):\n manager = Manager(\n 'run', tag, devmode=devmode, img_passwd_file=img_passwd_file,\n install_server_hostname=install_server_hostname,\n custom_cli_subnet=custom_cli_subnet, custom_db_subnet=custom_db_subnet,\n clitests=clitests, builder_hostname=builder)\n manager.run()", "def test_run_experiment_locally(self) -> None:\n\n experiment = Experiment(\n name=\"torchx_booth_sequential_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n )\n ),\n options=SchedulerOptions(),\n )\n\n try:\n for _ in range(3):\n scheduler.run_n_trials(max_trials=2)\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.", "def startTestRun(self):", "def run_starter(self, expect_to_fail=False):\n logging.info(\"running starter \" + self.name)\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n\n lh.log_cmd(args)\n self.instance = psutil.Popen(args)\n logging.info(\"my starter has PID:\" + str(self.instance.pid))\n if not expect_to_fail:\n self.wait_for_logfile()\n self.wait_for_port_bind()", "def start(config: Config):\n return Optimizer(config).start()", "def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def runner(app):\n\n return app.test_cli_runner()", "def dev():\n trio.run(_dev_main)", "def main():\n executor(option().host)", "def main(args):\n\n # Compose the model list\n modellist = []\n if args['model']:\n modellist.append(bmark.ModelInfo(args['model'], os.getcwd(), args['classname']))\n\n # Load the benchmark settings\n benchmark = None\n benchmark = bmark.load_benchmark(args['benchmark'])\n corresponding_data = False\n if 'corresponding_data' in benchmark:\n corresponding_data = benchmark['corresponding_data']\n\n # Only extend if not cached\n cache_df = None\n if not args['cache']:\n modellist.extend(benchmark['models'])\n else:\n cache_df = pd.read_csv(args['cache'])\n\n # Extract comparator settings from benchmark description\n eval_comparator = comparator.EqualityComparator()\n if 'comparator' in benchmark:\n if benchmark['comparator'] == 'nvc':\n eval_comparator = comparator.NVCComparator()\n\n # Run the model evaluation\n is_silent = (args['output'] in ['html', 'server'])\n eva = None\n if benchmark['type'] == 'adaption':\n eva = evaluator.AdaptionEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n elif benchmark['type'] == 'coverage':\n # Check for benchmark validity\n if benchmark['data.train'] or benchmark['data.train_person']:\n print('WARNING: Ignoring specified training and train_person data ' \\\n + 'for coverage evaluation...')\n\n eva = evaluator.CoverageEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n else:\n raise ValueError('Unknown benchmark type: {}'.format(benchmark['type']))\n\n with silence_stdout(is_silent):\n res_df = eva.evaluate()\n\n if 'save' in args:\n res_df.to_csv(args['save'], index=False)\n\n # Run the metric visualizer\n htmlcrtr = html_creator.HTMLCreator([\n viz_plot.AccuracyVisualizer(),\n viz_plot.BoxplotVisualizer(),\n viz_plot.TableVisualizer()\n ])\n\n # Prepare the benchmark output information and visualize the evaluation results\n benchmark_info = {\n 'name': os.path.basename(args['benchmark']),\n 'data.train': os.path.basename(\n benchmark['data.train']) if benchmark['data.train'] else '',\n 'data.train_person': os.path.basename(\n benchmark['data.train_person']) if benchmark['data.train_person'] else '',\n 'data.test': os.path.basename(benchmark['data.test']),\n 'type': benchmark['type'],\n 'corresponding_data': benchmark['corresponding_data'],\n 'domains': list(res_df['domain'].unique()),\n 'response_types': list(res_df['response_type'].unique()),\n }\n\n if args['output'] == 'browser':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n server.load_in_default_browser(html.encode('utf8'))\n elif args['output'] == 'server':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=True)\n sys.stdout.buffer.write(html.encode('utf-8'))\n elif args['output'] == 'html':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n print(html)", "def run(self, workbench, engine):\n pass", "def __init__(self, idx=None, arch=None, direction='encrypt', cipher_alg=None,\n hash_alg=None, aead_alg=None, sizes=None, offset=None,\n cold_cache=False, shani_off=False, force_job_api=False,\n unhalted_cycles=False, quick_test=False, smoke_test=False,\n imix=None, aad_size=None, job_iter=None, no_time_box=False, buffer_offset=None):\n global PERF_APP\n\n self.idx = idx\n self.arch = arch\n self.direction = direction\n self.cipher_alg = cipher_alg\n self.hash_alg = hash_alg\n self.aead_alg = aead_alg\n self.sizes = sizes\n self.offset = offset\n self.cmd = '{} --no-progress-bar '.format(PERF_APP)\n self.cmd_output = ''\n self.out = []\n self.core = None\n self.cold_cache = cold_cache\n self.shani_off = shani_off\n self.force_job_api = force_job_api\n self.unhalted_cycles = unhalted_cycles\n self.quick_test = quick_test\n self.smoke_test = smoke_test\n self.imix = imix\n self.aad_size = aad_size\n self.job_iter = job_iter\n self.no_time_box = no_time_box\n self.buffer_offset = buffer_offset\n\n if self.arch is not None:\n self.cmd += ' --arch {}'.format(self.arch)\n\n if self.offset is not None:\n self.cmd += ' -o {}'.format(self.offset)\n\n if self.aead_alg is not None:\n if self.cipher_alg is not None or \\\n self.hash_alg is not None:\n print(\"Invalid combination: aead + cipher / hash\", \\\n file=sys.stderr)\n sys.exit(1)\n self.cmd += ' --aead-algo {}'.format(self.aead_alg)\n\n if self.cipher_alg is not None:\n if self.aead_alg is not None:\n print(\"Invalid combination: aead + cipher\", file=sys.stderr)\n sys.exit(1)\n self.cmd += ' --cipher-algo {}'.format(self.cipher_alg)\n\n if self.hash_alg is not None:\n if self.aead_alg is not None:\n print(\"Invalid combination: aead + hash\", file=sys.stderr)\n sys.exit(1)\n self.cmd += ' --hash-algo {}'.format(self.hash_alg)\n\n if self.cipher_alg is not None or \\\n self.aead_alg is not None:\n self.cmd += ' --cipher-dir {}'.format(self.direction)\n\n if self.sizes is not None:\n self.cmd += ' --job-size {}'.format(self.sizes)\n\n if self.cold_cache is True:\n self.cmd += ' -c'\n\n if self.shani_off is True:\n self.cmd += ' --shani-off'\n\n if self.force_job_api is True:\n self.cmd += ' --force-job-api'\n\n if self.unhalted_cycles is True:\n self.cmd += ' --unhalted-cycles'\n\n if self.quick_test is True:\n self.cmd += ' --quick'\n\n if self.smoke_test is True:\n self.cmd += ' --smoke'\n\n if self.no_time_box is True:\n self.cmd += ' --no-time-box'\n \n if self.imix is not None:\n self.cmd += ' --imix {}'.format(self.imix)\n\n if self.aad_size is not None:\n self.cmd += ' --aad-size {}'.format(self.aad_size)\n\n if self.job_iter is not None:\n self.cmd += ' --job-iter {}'.format(self.job_iter)\n\n if self.buffer_offset is not None:\n self.cmd += ' --buffer-offset {}'.format(self.buffer_offset)", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def main():\r\n args = getargs()\r\n testng_file = args.testng_file\r\n url = args.url\r\n fetch_testng(testng_file, url)", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def quick_run(self, *args):\n self.inputs(*args)\n self.run()", "def main():\n\n run_manual_session()\n # run_automated_session()", "def main():\n parser = argparse.ArgumentParser(\n description='A testbench for the Google Cloud C++ Client Library')\n parser.add_argument('--host', default='localhost',\n help='The listening port')\n parser.add_argument('--port', help='The listening port')\n # By default we do not turn on the debugging. This typically runs inside a\n # Docker image, with a uid that has not entry in /etc/passwd, and the\n # werkzeug debugger crashes in that environment (as it should probably).\n parser.add_argument('--debug', help='Use the WSGI debugger',\n default=False, action='store_true')\n arguments = parser.parse_args()\n\n # Compose the different WSGI applications.\n application = wsgi.DispatcherMiddleware(root, {\n '/httpbin': httpbin.app,\n GCS_HANDLER_PATH: gcs,\n UPLOAD_HANDLER_PATH: upload,\n })\n serving.run_simple(arguments.host, int(arguments.port), application,\n use_reloader=True, use_debugger=arguments.debug,\n use_evalex=True)", "def Run(benchmark_spec):\n vms = benchmark_spec.vms\n master_vm = vms[0]\n run_command = 'cd %s && %s ./%s' % (hpcg.HPCG_DIR,\n _GetEnvironmentVars(benchmark_spec),\n RUN_SCRIPT)\n output, _ = master_vm.RobustRemoteCommand(run_command)\n return _MakeSamplesFromOutput(benchmark_spec, output)", "def bench_oneshot():\n sh(\"%s -Wa scripts\\\\internal\\\\bench_oneshot.py\" % PYTHON)", "def run(expt_config, result_q):\n # Set up sacred experiment and initialize a mongo observer.\n ex = Experiment()\n ex.add_config(expt_config)\n ex.observers.append(MongoObserver.create())\n\n @ex.capture\n def agent_setup(env_name, seed, gamma, model_layer_sizes,\n model_learning_rate, v_function_coeff, entropy_coeff,\n n_workers, n_train_episodes, activation_fn, n_steps):\n\n env = GymEnv(env_name)\n\n np.random.seed(seed)\n tf.set_random_seed(seed)\n env.env.seed(seed)\n\n network_config = dict(layer_sizes=model_layer_sizes,\n activation=activation_fn)\n output_sizes = [env.n_actions, 1]\n combined_model = ValuePolicyNetworkDense(network_config,\n output_sizes, gamma,\n n_steps=n_steps)\n\n agent = A2CAgent(env, combined_model, model_learning_rate,\n v_function_coeff=v_function_coeff,\n gamma=gamma,\n entropy_coeff=entropy_coeff,\n n_workers=n_workers,\n experiment=ex)\n\n return agent, env\n\n @ex.main\n def agent_train_and_eval(n_train_episodes):\n agent, env = agent_setup()\n _ = agent.interact(n_train_episodes, show_progress=False)\n returns = agent.logger.get_values(\"episode.returns\")\n result_q.put(returns)\n\n return ex.run()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def run(*args):\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()", "def main():\n run_program()", "def main():\n run_test_all()", "def main():\n # Get credentials.\n logging.info('Obtaining Mega login credentials.')\n credentials = {}\n if os.path.exists(CREDENTIALS_FILE):\n credentials = json.load(open(CREDENTIALS_FILE))\n else:\n credentials['user'] = raw_input('User: ')\n credentials['password'] = getpass.getpass()\n \n # Create the required Mega API objects.\n executor = AsyncExecutor()\n api = MegaApi(APP_KEY, None, None, 'Python CRUD example')\n listener = AppListener(executor.continue_event)\n api.addListener(listener)\n\n # Run the operations.\n start_time = time.time()\n worker(api, listener, executor, credentials)\n logging.info('Total time taken: {} s'.format(time.time() - start_time))", "def run_datalab_fbs():\n\n appctxt = ApplicationContext()\n win = DataLab()\n # debug_setup(win)\n win.show()\n exit_code = appctxt.app.exec_()\n sys.exit(exit_code)", "def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)", "def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def __init__(self,l,options):\n #### Setup options\n self.options = options\n # For execution\n self.shots = 1000 if options.get('shots') == None\\\n else options.get('shots')\n self.seed = options.get('seed')\n if self.seed != None:\n from qiskit.aqua import aqua_globals\n aqua_globals.random_seed = self.seed\n self.prnt = options.get('print')\n self.ancilla_measure = options.get('ancilla') if options.get('ancilla') != None else False\n\n self.ibmq = False\n if options.get('ibmq') == True:\n print('Running on real quantum computer')\n self.ibmq = True\n self.backend = options['backend']\n from qiskit.tools.monitor import job_monitor\n self.monitor = job_monitor\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n None,\n self.shots)\n \n else:\n # For Backend\n if options.get('backend') == None:\n self.options['backend'] = 'qasm_simulator' \n self.backend = qk.Aer.get_backend(options['backend'])\n # For noise model, coupling map and basis gates\n self.noise_model, self.coupling_map, self.basis_gates = None,None,None\n self.meas_fitter = None\n if options.get('device') != None:\n device = QuantumComputer(options.get('device'))\n if options.get('noise_model') != None:\n self.noise_model = device.noise_model\n # Create error mitigation fitter\n if options.get('meas_fit') in [None,True]:\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n device,\n self.shots)\n if options.get('coupling_map') != None:\n self.coupling_map = device.coupling_map\n if options.get('basis_gates') != None:\n self.basis_gates = device.basis_gates\n # Qubit layout, virtual to physical\n self.layout = options.get('layout')\n # Optimization level\n self.optimization_level= 1 if options.get('optimization_level')==None else options['optimization_level']\n\n # GPU accelerated\n if options.get('gpu'):\n from qiskit_qcgpu_provider import QCGPUProvider\n Provider = QCGPUProvider()\n self.backend = Provider.get_backend(options['backend'])", "def launch_instance(cls, argv=None, **kwargs):\n try:\n return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)\n except NoStart:\n return", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def main():\n rclpy.init()\n\n worker_id = int(sys.argv[1])\n policy_type = sys.argv[2]\n node = WorkerSync(worker_id, 'worker_node', policy_type)\n\n try:\n executor = MultiThreadedExecutor()\n steps = 0\n\n while rclpy.ok():\n if node.flag.pull:\n node.pull(executor)\n\n elif node.flag.collect:\n steps = node.collect()\n\n elif node.flag.compute:\n node.compute(steps)\n\n elif node.flag.push:\n experiment_complete = node.push(executor)\n node.upkeep()\n\n # End experiment if passed number of max episodes.\n if experiment_complete:\n node.test(100)\n break\n\n except KeyboardInterrupt:\n pass\n\n # Destroy the node explicitly\n node.destroy_node()\n rclpy.shutdown()", "def run(self):\n \n pass", "def run_starter(self, expect_to_fail=False):", "def start(\n context: click.Context,\n case_id: str,\n gender: str,\n genome_version: str,\n panel_bed: str,\n pon_cnn: str,\n slurm_quality_of_service: str,\n run_analysis: bool,\n dry_run: bool,\n):\n LOG.info(f\"Starting analysis for {case_id}\")\n try:\n context.invoke(resolve_compression, case_id=case_id, dry_run=dry_run)\n context.invoke(link, case_id=case_id, dry_run=dry_run)\n context.invoke(\n config_case,\n case_id=case_id,\n gender=gender,\n genome_version=genome_version,\n panel_bed=panel_bed,\n pon_cnn=pon_cnn,\n dry_run=dry_run,\n )\n context.invoke(\n run,\n case_id=case_id,\n slurm_quality_of_service=slurm_quality_of_service,\n run_analysis=run_analysis,\n dry_run=dry_run,\n )\n except DecompressionNeededError as error:\n LOG.error(error)", "def prepare_new_app(config):\n app_dir = 'tmp/test_benchmarks/'\n config['app_dir'] = app_dir\n #: Create an app to to test\n if exists(app_dir):\n #: If using an emulator enable forwarding\n if \"emulator-\" in sh.adb('devices'):\n sh.adb(\"forward\", \"tcp:8888\", \"tcp:8888\")\n\n return # App already made\n #if config['app_built']:\n # return # App already made\n #else:\n # #: Cleanup the old app\n # cleanup_app(config)\n\n enamlnative = sh.Command('./enaml-native')\n print(enamlnative('init', 'Benchmarks', 'com.codelv.enamlnative.benchmarks',\n 'tmp/test_benchmarks/'))\n\n config['app_built'] = True\n\n with cd(join(app_dir,'Benchmarks')):\n with source_activated('venv', 'enaml-native') as enamlnative:\n #: Now build python\n print(enamlnative('build-python'))\n\n #: Build and do a gradle sync, this will NOT include jni and native libs!\n print(enamlnative('build-android'))\n\n #: Now build python (again) to put them in the correct spot\n print(enamlnative('build-python'))\n\n #: Now try to run it and see if it crashes\n #: Requires emulator or device\n assert len(sh.adb('devices').strip().split(\"\\n\")) > 0, \"No device is connected, \" \\\n \"can't test the build!\"\n #: Flush logcat\n sh.adb('logcat', '--clear')\n\n #: Do a build and run\n print(enamlnative('run-android'))\n #: Wait a few seconds\n\n #: If using an emulator enable forwarding\n if \"emulator-\" in sh.adb('devices'):\n sh.adb(\"forward\", \"tcp:8888\", \"tcp:8888\")", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def main():\n conf = load_configuration()\n config.load_configuration(conf)\n args = config.get_conf()\n if args.debug:\n logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\n else:\n logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)\n\n logging.getLogger('kazoo').setLevel(logging.WARNING)\n logging.getLogger('requests').setLevel(logging.WARNING)\n logging.getLogger('urllib3').setLevel(logging.WARNING)\n logging.getLogger('docker').setLevel(logging.INFO)\n logging.getLogger(\"tornado\").setLevel(logging.DEBUG)\n\n state = FakeSQLManager()\n\n zapp_description = json.load(args.jsonfile)\n\n print('Validating zapp description...')\n zoe_lib.applications.app_validate(zapp_description)\n\n exec_id = state.execution_new('test', 'fake_user', zapp_description)\n e = state.execution_list(only_one=True, id=exec_id)\n _digest_application_description(state, e)\n\n print('Zapp digested, starting containers...')\n execution_to_containers(e)\n\n print('Giving the containers a few seconds to start...')\n time.sleep(5)\n\n swarm = SwarmClient(args)\n for service in e.services:\n print(\"Service {}, docker ID: {}\".format(service.name, service.docker_id))\n logs = swarm.logs(service.docker_id, False)\n logs = logs.decode('utf-8').split('\\n')\n for log_line in logs[-10:]:\n print(log_line)\n\n print(\"Execution as been started, press CTRL-C to terminate it\")\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n pass\n\n print('Terminating...')\n terminate_execution(e)", "def test_runner(self):\n run(HelloWorldApi, [\"/bin/test_api.py app_root/bin/libs\", \"app_root/bin/libs\"], sys.stdin.buffer,\n sys.__stdout__.buffer)", "def main():\n print(\"def main\")\n return APP.run()", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id$\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option( \"-p\", \"--proc\", dest=\"processors\", type=\"int\",\n help = \"use # processors [%default]\" )\n\n parser.set_defaults(\n processors = 1 )\n\n\n options, args = E.Start( parser, argv = argv )\n\n t1 = Test( RunnerGat, \n small_test_segmented_workspaces(), \n [ ValidatorNumSamples,\n ValidatorSegmentDistribution ] )\n\n t1.run( options.stdout, \n processors = options.processors )\n\n E.Stop()", "def runtest(self):" ]
[ "0.6429091", "0.6309221", "0.62545246", "0.61604047", "0.6152724", "0.60954607", "0.6065459", "0.6057383", "0.60311", "0.59965384", "0.59894437", "0.5950306", "0.58948225", "0.5882779", "0.5840209", "0.5838446", "0.5817836", "0.58095634", "0.5783761", "0.5774965", "0.57688946", "0.576136", "0.5720185", "0.5715629", "0.57140607", "0.5701221", "0.5699784", "0.5675939", "0.5670083", "0.5669999", "0.5655669", "0.5651759", "0.5644453", "0.5635426", "0.5619572", "0.56145185", "0.56119764", "0.5605504", "0.5603435", "0.5599896", "0.55785805", "0.55748177", "0.55696565", "0.5558786", "0.5557691", "0.5537888", "0.5534075", "0.55311394", "0.5530368", "0.5530092", "0.55277884", "0.5527727", "0.5526887", "0.5522915", "0.55187863", "0.55168724", "0.5510815", "0.5510387", "0.5507324", "0.5503287", "0.54961723", "0.5493514", "0.5492179", "0.5490238", "0.548531", "0.5484979", "0.5484979", "0.5484979", "0.54833436", "0.5482518", "0.5480856", "0.54747725", "0.54690516", "0.5466279", "0.5453024", "0.5450925", "0.5450925", "0.5450925", "0.54448336", "0.54327524", "0.5431008", "0.5430707", "0.54294837", "0.5427931", "0.54266447", "0.5426117", "0.5424452", "0.54243433", "0.5419784", "0.54069376", "0.54051644", "0.5404532", "0.54041284", "0.54030794", "0.5394127", "0.53890455", "0.5387017", "0.53855973", "0.53808683", "0.53797877" ]
0.78506184
0
detecting whether the processes the starter spawned are still there
def detect_instance_pids_still_alive(self): missing_instances = [] running_pids = psutil.pids() for instance in self.all_instances: if instance.pid not in running_pids: missing_instances.append(instance) if len(missing_instances) > 0: logging.error( "Not all instances are alive. The following are not running: %s", str(missing_instances), ) logging.error(get_process_tree()) raise Exception("instances missing: " + str(missing_instances)) instances_table = get_instances_table(self.get_instance_essentials()) logging.info("All arangod instances still running: \n%s", str(instances_table)) attach_table(instances_table, "Instances table")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_started(self):\n return bool(self._processes)", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()", "def check_subprocesses(self) : \n for sp_ident in self.active_subprocesses :\n if not os.path.exists(\"%s/%s.rc\" % (self.spool_dir, sp_ident ) ) : continue\n self.finished_subprocesses[sp_ident] = self.get_subprocess_result(sp_ident)\n self.active_subprocesses.pop(sp_ident)", "def check_process_full(self) -> None:\n if len(self.process_queue) >= self.max_processes:\n task_name, sp = self.process_queue.pop()\n sp.wait()", "def check_running(self):\n remove = []\n\n # iterate over all \"running\" processes\n for proc in self.processes:\n # if the process has stopped\n if proc['proc'].poll() is not None:\n if proc['type'] == 'rtmpdump':\n self.logger.info(\n proc['model'] + \" is no longer being captured\")\n if os.path.isfile(proc['filename']):\n proc_stats = self.get_proc_stats(proc)\n if proc_stats['file_size'] == 0:\n self.logger.warning(\"Capture size is 0kb, deleting.\")\n os.remove(proc['filename'])\n else:\n self.move_to_complete(proc)\n message = (\"Finished:\" +\n proc['model'] + \" - \" +\n \"Started at \" +\n proc_stats['started_at'] + \" | \" +\n \"Size:\" +\n proc_stats['formatted_file_size'] + \" | \" +\n \"Duration:\" +\n proc_stats['recording_time'])\n self.logger.info(message)\n if self.push_bullet is not None:\n self.push_bullet.push_note(\"Chaturbate\", message)\n elif proc['type'] == 'ffmpeg':\n if proc['proc'].poll() == 0:\n os.remove(proc['source'])\n else:\n self.logger.warning(\"Something went wrong with ffmpeg, not deleting\")\n\n remove.append(proc['id'])\n\n # remove all items in remove from self.processes\n procs = self.processes\n for item in remove:\n procs = [f for f in procs if f['id'] != item]\n self.processes = procs", "def race_condition():\n if len(allocated_pids) != len(set(allocated_pids)):\n return True\n else:\n return False", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def isstarted():", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def _keep_running():\n return True", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def should_fan_out(number_of_subprocesses):\n return number_of_subprocesses > 1", "def should_keep_running(self):\n return len(self.party.active_users())", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def check_launcher():\n\n # Storage in memory which holds info about currently running checks\n storage = {}\n\n # Storage in memory which holds process info: process id and project objects\n processes = {}\n\n # Close previously opened connections (if the exist)\n django.db.connections.close_all()\n\n while True:\n # Making Copy in order to compare updates in data base\n new_storage = copy.deepcopy(storage)\n\n # Fetch data from database\n check_sync(new_storage)\n\n # Get storage keys in order to compare storages for changes\n old_keys = set(storage.keys())\n new_keys = set(new_storage.keys())\n\n # Get keys of elements in init storage and updated storage\n added_checks = new_keys.difference(old_keys)\n deleted_checks = old_keys.difference(new_keys)\n common_checks = new_keys.intersection(old_keys)\n\n # Launch new processes\n for check_id in added_checks:\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n # Stop (kill) deleted check's prorcesses\n for check_id in deleted_checks:\n stop_process(check_id, storage, processes)\n\n for check_id in common_checks:\n if storage[check_id] != new_storage[check_id]:\n stop_process(check_id, storage, processes)\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n storage = copy.deepcopy(new_storage)\n time.sleep(30)", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "def procs_running():\n \n return __proc_stat('procs_running')", "def running(self) -> bool:", "def test_workers_die_when_main_process_dies(self):\n manager = Manager()\n return_list = manager.list()\n\n def run_process_pool(return_list):\n pool = ProcessPool(1)\n pool.start(WorkerIdGeneratingWorker)\n return_list.append(pool._workers[0].pid)\n # We dont call pool.stop() and hence leave workers alive\n\n process = Process(target=run_process_pool, args=(return_list,))\n process.start()\n process.join()\n # The worker has now started\n\n worker_pid = return_list[0]\n\n for _ in range(20):\n worker_is_alive = pid_exists(worker_pid)\n if not worker_is_alive:\n break\n time.sleep(0.1)\n self.assertFalse(worker_is_alive)", "def is_alive(self):\n result = execute('ps -Ao pgid', check_pg_alive=False, stdout=PIPE)\n pgids = result['stdout'].decode('utf8').split()\n return str(self.process.pid) in pgids", "def is_running(program):\n return program in get_running()", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def check_processes(process_list):\n running = 1 # 0 when the subprocesses are all done\n while running:\n for proc in process_list:\n proc.poll()\n if proc.returncode == 1:\n raise RuntimeError(\"Program \" +\n \"number \" +\n \"{}\".format(process_list.index(proc)) +\n \" failed.\")\n running = bool(sum([int(proc.returncode) for proc in process_list]))\n return True", "def checkProcess(self):\n process = subprocess.Popen(\"ps -A | grep g13d\", stdout=subprocess.PIPE, shell=True)\n out, err = process.communicate()\n if out != '':\n self.ui.but_activate.setEnabled(False)\n self.ui.lab_active.setText(\"Running ok\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : green; }\");\n else:\n self.ui.but_activate.setEnabled(True)\n self.ui.lab_active.setText(\"Not Started\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : red; }\");", "def already_running(pid_file):\n\n\tassert ltrace(TRACE_PROCESS, u'| already_running({0}) ↣ {1}',\n\t\t(ST_PATH, pid_file), (ST_ATTR, os.path.exists(pid_file) and\n\t\t\tos.path.exists('/proc/' + open(pid_file, 'r').read().strip())))\n\n\treturn os.path.exists(pid_file) and \\\n\t\tos.path.exists('/proc/' + open(pid_file, 'r').read().strip())", "def num_processes():\n return 1", "def check_lock(self):\n if self._lockfilename is None:\n print \"No lockfile specified in the configuration for the application.\"\n sys.exit(1)\n lockers = self.config.options('locks')\n for locker in lockers:\n lockfilename = self.config.get('locks', locker)\n if os.path.isfile(lockfilename):\n if not AppHandler.is_running(lockfilename):\n AppHandler.remove_lock(lockfilename)\n else:\n print \"Process is currently running. Please wait for it to finish.\"\n sys.exit(1)", "def num_processes(self):\n return 1", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def at_least_one_alive(self, containers):\n for container in self.get_standard_containers(containers):\n # Update container variables so that status is accurate.\n container.container.reload()\n if container.container.status != 'exited':\n return True\n return False", "def wait_process_running(process):\n assert process.is_running()", "def check_all_critical_processes_running(duthost):\n processes_status = duthost.all_critical_process_status()\n for container_name, processes in processes_status.items():\n if processes[\"status\"] is False or len(processes[\"exited_critical_process\"]) > 0:\n return False\n\n return True", "def running(self):\n return not self._kill_event.is_set()", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def is_running(self):\n\t\treturn self in _running", "def sync_processes(self, *args, **kwargs):\n return True", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def running(self):\n return self.sub_process and self.sub_process.is_alive()", "def running(self):\n\t\treturn self._start is not None", "def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))", "def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def test_no_empty_process(self):\n perf_data = TestHelper.testdata_path('two_process_perf.data')\n record_data = self.get_record_data(['-i', perf_data])\n processes = record_data['sampleInfo'][0]['processes']\n self.assertEqual(len(processes), 2)\n\n # One process is removed because all its threads are removed for not\n # reaching the min_func_percent limit.\n record_data = self.get_record_data(['-i', perf_data, '--min_func_percent', '20'])\n processes = record_data['sampleInfo'][0]['processes']\n self.assertEqual(len(processes), 1)", "def test_PSA_ONLY_PROCESS(self):\n self.verify_references_to_prerequisites(processes.PSA_ONLY_PROCESS)", "def waitUntilSubprocessLaunched(self):\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched", "def test_all_workers_are_active_processes(self):\n WORKERS_COUNT = 10\n\n # Testing only ProcessPool since only it has the mechanism that waits for all workers to come online before\n # start finishes\n pool = ProcessPool(WORKERS_COUNT)\n\n pool.start(WorkerIdGeneratingWorker)\n for _ in range(100):\n pool.ventilate()\n\n active_worker_ids = [pool.get_results() for _ in range(100)]\n self.assertEqual(set(range(WORKERS_COUNT)), set(active_worker_ids))\n\n pool.stop()\n pool.join()", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def _is_running(self):\n try:\n # Process is not killed, os.kill(pid, 0) does nothing but raise if process does not\n # exist.\n os.kill(self.pid, 0)\n except ProcessLookupError:\n return False\n else:\n return True", "def basic_overcloud_processes_running(self):\n for attempt_number in range(600):\n\n try:\n\n for process_name in self.processes_to_check:\n # osp16/python3 process is \"neutron-server:\"\n if process_name == 'neutron-server' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'neutron-server:'\n # osp17 mysqld process name is mysqld_safe\n if process_name == 'mysqld' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'mysqld_safe'\n # redis not deployed on osp17 by default, only if some\n # other services such as designate and octavia are deployed\n if (process_name == 'redis-server' and\n not overcloud.is_redis_expected()):\n redis_message = (\"redis-server not expected on OSP 17 \"\n \"and later releases by default\")\n if self.oc_procs_df.query(\n f'PROCESS==\"{process_name}\"').empty:\n LOG.info(redis_message)\n continue\n else:\n raise OvercloudProcessesException(\n process_error=redis_message)\n\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n LOG.info(\"overcloud processes status checks: \"\n \"process {} is \"\n \"in running state\".format(process_name))\n continue\n else:\n LOG.info(\"Failure : overcloud processes status checks:\"\n \"process {} is not running \".format(\n process_name))\n raise OvercloudProcessesException(\n process_error=\"process {} is not running \".format(\n process_name))\n # if all procs are running we can return true\n return True\n except OvercloudProcessesException:\n LOG.info('Retrying overcloud processes checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n # exhausted all retries\n tobiko.fail('Not all overcloud processes are running !\\n')", "def isRunning(self):\n if not self.hasBeenStarted():\n return False\n \n if not self._slave_dhcp_client_proc.poll(): # Poll our direct child (sudo)\n return False\n \n for pid in self._all_processes_pid:\n if not self._checkPid(pid):\n return False\n \n return True", "def running(self):\n return self._lifetime_state in {\"starting\",\"running\",\"finishing\"}", "def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False", "def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True", "def isUp(server: str) -> bool:\n\n for process in psutil.process_iter(attrs=['cmdline']):\n if f'{server}.jar' in process.info['cmdline']:\n return True\n return False", "def i_am_locking(self):\n result = False\n current_pid = os.getpid()\n pidfile_pid = self.read_pid()\n if current_pid == pidfile_pid:\n result = True\n return result", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def is_monitor_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "def is_process_running(self, name):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Checking to see if the process {} is \"\n \"running\".format(log_tag, name))\n return self.get_pids(name) is not None", "async def _check_parent():\n try:\n curr_proc = psutil.Process()\n parent_death_cnt = 0\n while True:\n parent = curr_proc.parent()\n # If the parent is dead, it is None.\n parent_gone = parent is None\n init_assigned_for_parent = False\n parent_changed = False\n\n if parent:\n # Sometimes, the parent is changed to the `init` process.\n # In this case, the parent.pid is 1.\n init_assigned_for_parent = parent.pid == 1\n # Sometimes, the parent is dead, and the pid is reused\n # by other processes. In this case, this condition is triggered.\n parent_changed = self.ppid != parent.pid\n\n if parent_gone or init_assigned_for_parent or parent_changed:\n parent_death_cnt += 1\n logger.warning(\n f\"Raylet is considered dead {parent_death_cnt} X. \"\n f\"If it reaches to {_PARENT_DEATH_THREASHOLD}, the agent \"\n f\"will kill itself. Parent: {parent}, \"\n f\"parent_gone: {parent_gone}, \"\n f\"init_assigned_for_parent: {init_assigned_for_parent}, \"\n f\"parent_changed: {parent_changed}.\"\n )\n if parent_death_cnt < _PARENT_DEATH_THREASHOLD:\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n continue\n\n log_path = os.path.join(self.log_dir, \"raylet.out\")\n error = False\n msg = f\"Raylet is terminated: ip={self.ip}, id={self.node_id}. \"\n try:\n with open(log_path, \"r\", encoding=\"utf-8\") as f:\n # Seek to _RAYLET_LOG_MAX_TAIL_SIZE from the end if the\n # file is larger than that.\n f.seek(0, io.SEEK_END)\n pos = max(0, f.tell() - _RAYLET_LOG_MAX_TAIL_SIZE)\n f.seek(pos, io.SEEK_SET)\n # Read remaining logs by lines.\n raylet_logs = f.readlines()\n # Assume the SIGTERM message must exist within the last\n # _RAYLET_LOG_MAX_TAIL_SIZE of the log file.\n if any(\n \"Raylet received SIGTERM\" in line\n for line in raylet_logs\n ):\n msg += \"Termination is graceful.\"\n logger.info(msg)\n else:\n msg += (\n \"Termination is unexpected. Possible reasons \"\n \"include: (1) SIGKILL by the user or system \"\n \"OOM killer, (2) Invalid memory access from \"\n \"Raylet causing SIGSEGV or SIGBUS, \"\n \"(3) Other termination signals. \"\n f\"Last {_RAYLET_LOG_MAX_PUBLISH_LINES} lines \"\n \"of the Raylet logs:\\n\"\n )\n msg += \" \" + \" \".join(\n raylet_logs[-_RAYLET_LOG_MAX_PUBLISH_LINES:]\n )\n error = True\n except Exception as e:\n msg += f\"Failed to read Raylet logs at {log_path}: {e}!\"\n logger.exception(msg)\n error = True\n if error:\n logger.error(msg)\n # TODO: switch to async if necessary.\n ray._private.utils.publish_error_to_driver(\n ray_constants.RAYLET_DIED_ERROR,\n msg,\n gcs_publisher=ray._raylet.GcsPublisher(\n address=self.gcs_address\n ),\n )\n else:\n logger.info(msg)\n sys.exit(0)\n else:\n parent_death_cnt = 0\n await asyncio.sleep(\n dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_S\n )\n except Exception:\n logger.exception(\"Failed to check parent PID, exiting.\")\n sys.exit(1)", "def check(self):\n\n if not self.running:\n return False\n\n # On my machine, os.kill is faster and takes ~0.3usec while os.stat and P.exists take ~1.5usec (using timeit)\n # However, with kill if the process is under a separate UID, PermissionError is raised\n # Could try os.kill and fallback to P.exists and save the choice, but that's just overcomplicated\n\n running = P.exists(self.path)\n if running:\n self.update_status()\n else:\n # Process ended since last check, recond end time\n self.running = False\n self.ended_datetime = datetime.now()\n # TODO duration attribute could have a value while running; update in getter method\n self.duration = self.ended_datetime - self.created_datetime\n # Formats like 3:06:29.873626, so cutoff microseconds\n text = str(self.duration)\n self.duration_text = text[:text.rfind('.')]\n\n return running", "def has_finished():", "def work_sleep(arg):\n time, pids = arg\n sleep(time)\n res = True\n for p in pids:\n res &= psutil.pid_exists(p)\n return res", "def running(self):\n # search for a PID file for the manager\n candidate_files = os.listdir(self.cache_dir_)\n for file_name in candidate_files:\n if file_name == MGR_PID_FILE:\n file_name = os.path.join(self.cache_dir_, file_name)\n\n try:\n f = open(file_name, 'r')\n self.server_pid_ = int(f.readline())\n except ValueError:\n logging.warning('Server PID file accessed while in creation')\n return False\n return True\n\n return False", "def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False", "def poll_process_done(self) -> None:\n while len(self.process_queue) >= self.max_processes:\n self.check_process_done()", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)", "def is_alive(self):\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))", "def waitForSubprocessNotRunning(self):\n if not self._has_launched or not self._server_thread.is_alive():\n return\n self._server_thread.join()\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched", "def refresh_pids(active_pids, resources):\n still_active_pids = []\n no_change = True\n for info in active_pids:\n pid, gpu, title, cmd, lock_path = info\n if still_active(pid, cmd):\n still_active_pids.append(info)\n else:\n print(f\"[{time.strftime(time.ctime())}] {title} seems to be over.\")\n os.remove(lock_path)\n resources.free(gpu=gpu)\n no_change = False\n return still_active_pids, no_change", "def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active", "def other_threads_are_active():\n return len(fake_threads) >= 2", "def process_exists(pid=None, name=None):\n\n return count_processes(pid, name) > 0", "def check_ambari_server_process_up(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output)", "def __is_complete__(self,config,mockdb):\n if GenericProcess.__is_complete__(self):\n return True\n if self.pipelines is None:\n return False\n for pipeline in self.__current_pipeline_list__(mockdb):\n if not pipeline.__is_complete__():\n return False\n return True", "def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def is_main_process() -> bool:\n return multiprocessing.current_process().name == 'MainProcess' and os.environ['main_process_pid'] == str(os.getpid())", "def status(self, *args):\n for k, v in self.processers.items():\n if v:\n if v.poll() is None:\n status = 'running'\n else:\n status = 'dead'\n else:\n status = 'stoped'\n print '%s - %s' % (k, status)", "def check_gpsync_running(options):\n \n return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0", "def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists", "def wait_for_exec_to_start():\n node_instances = self.client.node_instances.list()\n for ni in node_instances:\n # this will keyerror out (and be retried) if the operation\n # didn't run yet\n pids[ni.node_id] = ni.runtime_properties['pid']", "def finalize(self):\n for p in self._processes:\n if p.join(30) is None and p.exitcode is None:\n p.kill()", "def is_proc_alive(pid):\n return os.path.isdir(\"/proc/%i\" % pid)", "def is_scheduled(self) -> bool:\n return not self.terminated and self.__state != Process.IDLE", "def is_running(self) -> bool:\n return False", "def running(self):\n return bool(self.proc and self._running())", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def is_running(self):\n if self.__process.poll() is not None: # process has ended\n for nbsr in (\"stdout\", \"stderr\"):\n getattr(self, nbsr).finalise()\n return False\n return True", "def archive_is_running():\n if get_matching_pids(\"enbackup-archive\"):\n return True\n else:\n return False" ]
[ "0.70939624", "0.70884717", "0.7061167", "0.7040123", "0.6903951", "0.68766594", "0.6748461", "0.66941243", "0.6645801", "0.6616109", "0.6574495", "0.6571236", "0.6525034", "0.64792585", "0.6474497", "0.6439188", "0.64198804", "0.6415135", "0.64056075", "0.63858753", "0.6368202", "0.6352989", "0.63491726", "0.63445574", "0.63423085", "0.63312155", "0.6328948", "0.6319107", "0.6318055", "0.63140976", "0.63111997", "0.62984645", "0.62901115", "0.62882817", "0.6222427", "0.6211795", "0.62065357", "0.6196628", "0.6196548", "0.61942095", "0.6192526", "0.61835396", "0.6176957", "0.6160777", "0.6156542", "0.61495334", "0.6132919", "0.6129565", "0.61256886", "0.61156535", "0.61034626", "0.610318", "0.6100206", "0.6092125", "0.60843915", "0.6072034", "0.60609204", "0.6052557", "0.60501695", "0.6042851", "0.6039846", "0.6037389", "0.60167915", "0.60131025", "0.60109615", "0.6010186", "0.60053146", "0.6001416", "0.5994073", "0.59872717", "0.59782445", "0.59759843", "0.59723574", "0.59618807", "0.5952248", "0.59456587", "0.594414", "0.5939244", "0.59324616", "0.5928691", "0.5927685", "0.59252536", "0.59126735", "0.59125394", "0.5911498", "0.59022176", "0.58969736", "0.5894902", "0.5891877", "0.58760077", "0.58727324", "0.5872249", "0.5871829", "0.5869835", "0.5861016", "0.5854176", "0.58526665", "0.5846545", "0.58377475", "0.58319896" ]
0.6200317
37
enables / disables maintainance mode
def maintainance(self, on_off, instance_type): print(("enabling" if on_off else "disabling") + " Maintainer mode") tries = 60 while True: reply = self.send_request( instance_type, requests.put, "/_admin/cluster/maintenance", '"on"' if on_off else '"off"', ) if len(reply) > 0: print("Reply: " + str(reply[0].text)) if reply[0].status_code == 200: return print(f"Reply status code is {reply[0].status_code}. Sleeping for 3 s.") time.sleep(3) tries -= 1 else: print("Reply is empty. Sleeping for 3 s.") time.sleep(3) tries -= 1 if tries <= 0: action = "enable" if on_off else "disable" raise Exception(f"Couldn't {action} maintainance mode!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_maintence_mode(self):\n pass", "def test_enable_maintence_mode1(self):\n pass", "def maintenance_mode():\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def check_enable_mode(self, *args, **kwargs):\n pass", "async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")", "async def toggle(self, ctx: BBContext):\n\n self.code_enabled = not self.code_enabled\n e = 'enabled.' if self.code_enabled else 'disabled.'\n await ctx.send(f\"Bunker code auto reaction has been : **{e}**\")\n self.bot.logger.info('Bunker code listener %s by %s', e, str(ctx.author))", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def mode_auto(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Bot Auto Mode Set\")\n self.patrol()", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def _on_autonomous_enable(self) -> None:\n\n # XXX: FRC Dashboard compatibility\n # -> if you set it here, you're stuck using it. The FRC Dashboard\n # doesn't seem to have a default (nor will it show a default),\n # so the key will only get set if you set it.\n auto_mode = wpilib.SmartDashboard.getString(\"Auto Selector\", None)\n if auto_mode is not None and auto_mode in self.modes:\n logger.info(\"Using autonomous mode set by LabVIEW dashboard\")\n self.active_mode = self.modes[auto_mode]\n else:\n self.active_mode = self.chooser.getSelected()\n\n if self.active_mode is not None:\n logger.info(\"Enabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_enable()\n else:\n logger.warning(\n \"No autonomous modes were selected, not enabling autonomous mode\"\n )", "def enable(self) -> None:", "def check_enable_mode(self, check_string='#'):\n return True", "def toggle(self, *_):\r\n \r\n global ac\r\n if self.author_f_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_m_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_l_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n else:\r\n self.add_a['state'] = 'disabled'", "def enable(self):\n pass", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enable(self):\r\n self.update(enabled=True)", "def enable(self):", "def setAutomaticMode(self, enabling: bool) -> None:\n ...", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def exit_enable_mode(self, *args, **kwargs):\n pass", "async def _hardcore_setheist(self, ctx):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n\r\n if config[\"Hardcore\"]:\r\n config[\"Hardcore\"] = False\r\n msg = \"Hardcore mode now OFF.\"\r\n else:\r\n config[\"Hardcore\"] = True\r\n msg = \"Hardcore mode now ON! **Warning** death will result in credit **and chip wipe**.\"\r\n await self.thief.config.guild(guild).Config.set(config)\r\n await ctx.send(msg)", "def __set_mode(self, value):\n # update Nuke\n localization.setMode(str(value.lower()))\n # update panel UI\n logger.debug('disabling pause button: %s', value=='Off')\n # if the localization mode is off diasble pause and force widgets\n self.pauseBtn.setDisabled(value == 'Off')\n self.updateBtn.setDisabled(value == 'Off')\n self.__update_pause_icon()", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return", "def turn_test_mode_off_by_default(test_mode_off):", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def handleModeToggle(self):\n self.filesList.changeMode(not self.autoMode)\n if self.autoMode:\n self.modeToggle.setText(\"Auto Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Manual Mode)\")\n else:\n self.modeToggle.setText(\"Manual Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Auto Mode)\")\n self.autoMode = not self.autoMode", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def disable(ctx):\n\n fc_group_cfg = {}\n fc_group_cfg['FLEX_COUNTER_STATUS'] = DISABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", ACL, fc_group_cfg)", "def apply_settings(self):\n return True", "def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})", "def _disable(self):\n self.enabled = False", "def enable(self):\n hoomd.util.print_status_line();\n\n if self.enabled == False:\n hoomd.context.msg.error(\"you cannot re-enable DCD output after it has been disabled\\n\");\n raise RuntimeError('Error enabling updater');", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def develope_mode(self, _):\n global develope_mode\n develope_mode = not develope_mode", "def enabled(self):\n return True", "def enabled(self):\n return True", "def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()", "def doctest_mode(self, parameter_s=''):\n\n # Shorthands\n shell = self.shell\n meta = shell.meta\n disp_formatter = self.shell.display_formatter\n ptformatter = disp_formatter.formatters['text/plain']\n # dstore is a data store kept in the instance metadata bag to track any\n # changes we make, so we can undo them later.\n dstore = meta.setdefault('doctest_mode',Struct())\n save_dstore = dstore.setdefault\n\n # save a few values we'll need to recover later\n mode = save_dstore('mode',False)\n save_dstore('rc_pprint',ptformatter.pprint)\n save_dstore('xmode',shell.InteractiveTB.mode)\n save_dstore('rc_separate_out',shell.separate_out)\n save_dstore('rc_separate_out2',shell.separate_out2)\n save_dstore('rc_separate_in',shell.separate_in)\n save_dstore('rc_active_types',disp_formatter.active_types)\n\n if not mode:\n # turn on\n\n # Prompt separators like plain python\n shell.separate_in = ''\n shell.separate_out = ''\n shell.separate_out2 = ''\n\n\n ptformatter.pprint = False\n disp_formatter.active_types = ['text/plain']\n\n shell.magic('xmode Plain')\n else:\n # turn off\n shell.separate_in = dstore.rc_separate_in\n\n shell.separate_out = dstore.rc_separate_out\n shell.separate_out2 = dstore.rc_separate_out2\n\n ptformatter.pprint = dstore.rc_pprint\n disp_formatter.active_types = dstore.rc_active_types\n\n shell.magic('xmode ' + dstore.xmode)\n\n # mode here is the state before we switch; switch_doctest_mode takes\n # the mode we're switching to.\n shell.switch_doctest_mode(not mode)\n\n # Store new mode and inform\n dstore.mode = bool(not mode)\n mode_label = ['OFF','ON'][dstore.mode]\n print('Doctest mode is:', mode_label)", "def enable(ctx):\n\n fc_group_cfg = {}\n fc_group_cfg['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", ACL, fc_group_cfg)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def enable(self):\n self._enabled = True", "def is_enabled(self):", "def Enabled(self) -> bool:", "def disable_setup(self):\n self.high_ver_entry.config(state=\"disabled\")\n self.low_ver_entry.config(state=\"disabled\")\n self.left_hor_entry.config(state=\"disabled\")\n self.right_hor_entry.config(state=\"disabled\")", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()", "def enable(self):\n if not self.enabled:\n self._router_request(\n self._make_request_data(\n 'editMaintWindow',\n data=dict(\n params=dict(\n uid=self.parent,\n id=self.id,\n startDate=self.startDate,\n startHours=self.startHours,\n startMinutes=self.startMinutes,\n durationDays=self.durationDays,\n durationHours=self.durationHours,\n startProductionState=self.startProdState,\n repeat=self.repeat,\n enabled=True,\n occurrence=self.occurrence,\n days=self.days,\n )\n )\n )\n )\n self.enabled = True\n\n return True", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def change_mode(self, mode):\r\n self.update_enrollment(mode=mode)", "def __toggle_mode(self):\n # Update mode\n # Update mode, default canvas controls\n self.__ui_mode = {\n UImode.CANVASCONTROL: UImode.TEACHPANEL,\n UImode.TEACHPANEL: UImode.CANVASCONTROL\n }.get(self.__ui_mode, UImode.CANVASCONTROL)\n\n # Update UI\n # get list of robots\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n self.__reload_caption(new_list)", "def disable(self):", "def enable(self, modname):\n try: self.cfg.blacklist and self.cfg.blacklist.remove(modname)\n except ValueError: pass \n if self.cfg.loadlist and modname not in self.cfg.loadlist: self.cfg.loadlist.append(modname)\n self.cfg.save()", "def test_save_toggle_active(self):\n # is_active will change from True to False\n assert self.course_enrollment.is_active\n data = {\n 'user': str(self.course_enrollment.user.id),\n 'course': str(self.course_enrollment.course.id),\n 'is_active': 'false',\n 'mode': self.course_enrollment.mode,\n }\n\n with override_waffle_switch(COURSE_ENROLLMENT_ADMIN_SWITCH, active=True):\n response = self.client.post(\n reverse('admin:student_courseenrollment_change', args=(self.course_enrollment.id, )),\n data=data,\n )\n assert response.status_code == 302\n\n self.course_enrollment.refresh_from_db()\n assert not self.course_enrollment.is_active", "def ceph_enabled(self):", "def auto_mode(self, enabled):\n self.set_auto_mode(enabled)", "async def enable(self, ctx):\n self.bot.db.execute(\"UPDATE starboards SET enabled = 1 WHERE channel_id = ?\", (ctx.channel.id,))\n await ctx.say(\"star.enabled\")", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def disable(self):\n pass", "def CashMode(self):\n self.cred_left = 0\n self.is_member = False\n self.cred_id = ''\n self.cred_card = ''\n self.builder.get_object('GuiMode').set_label(\"Payment in Cash\")", "def disable(self):\n if self.enabled:\n self._router_request(\n self._make_request_data(\n 'editMaintWindow',\n data=dict(\n uid=self.parent,\n id=self.id,\n params=dict(\n startDate=self.startDate,\n startHours=self.startHours,\n startMinutes=self.startMinutes,\n durationDays=self.durationDays,\n durationHours=self.durationHours,\n startProductionState=self.startProdState,\n repeat=self.repeat,\n enabled=False,\n occurrence=self.occurrence,\n days=self.days,\n )\n )\n )\n )\n self.enabled = False\n\n return True", "def activate(self, id):\n self.db.commit([\n 'UPDATE comments SET',\n ' mode=1',\n 'WHERE id=%s AND mode=2'], (id, ))", "async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))", "def disable(self):\n self.enabled = False", "def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"", "def ToggleApprovalTracker(self, event):\n pass", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def enter_maintenance_mode(self):\n cmd = self._cmd('enterMaintenanceMode')\n if cmd.success:\n self._update(_get_role(self._get_resource_root(), self._path()))\n return cmd", "def disable(self) -> None:", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n rif_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def test_enable(self):\n # OSA script should have been installed and enabled in setUp function\n # Now let's disable it, which should return True.\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT, False]))\n # Double check the script was disabled, as intended.\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Now re-enable\n self.assertTrue(self.run_function(\"assistive.enable\", [OSA_SCRIPT]))\n # Double check the script was enabled, as intended.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def disable(self, is_top_level=True):\n self.enabled = False", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "def disable(ctx):\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_TRAP\", fc_info)", "def activate(wait, timeout, logger, client):\n\n if timeout and not wait:\n msg = \"'--timeout' was used without '--wait'.\"\n error = exceptions.CloudifyCliError(msg)\n error.possible_solutions = [\n \"Add the '--wait' flag to the command in order to wait.\"\n ]\n raise error\n\n logger.info('Entering maintenance mode...')\n client.maintenance_mode.activate()\n\n if wait:\n logger.info(\"Cloudify manager will enter Maintenance mode once \"\n \"there are no running or pending executions...\\n\")\n deadline = time.time() + timeout\n\n while True:\n if _is_timeout(timeout, deadline):\n raise exceptions.CloudifyCliError(\n \"Timed out while entering maintenance mode. \"\n \"Note that the manager is still entering maintenance mode\"\n \" in the background. You can run \"\n \"'cfy maintenance-mode status' to check the status.\")\n\n status_response = client.maintenance_mode.status()\n if status_response.status == MAINTENANCE_MODE_ACTIVE:\n logger.info('Manager is in maintenance mode.')\n logger.info('While in maintenance mode most requests will '\n 'be blocked.')\n return\n time.sleep(DEFAULT_TIMEOUT_INTERVAL)\n logger.info(\"Run 'cfy maintenance-mode status' to check the \"\n \"maintenance mode's status.\\n\")", "def toggle(self) -> None:\n if self.value is None:\n raise ValueError('Cannot toggle dark mode when it is set to auto.')\n self.value = not self.value", "def disable(self) -> None:\n if self.active_mode is not None:\n logger.info(\"Disabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_disable()\n\n self.active_mode = None", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "async def disable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation disabled.\"))", "async def enable(self) -> None:\n try:\n await self.adguard.request(\n \"parental/enable\", method=\"POST\", data=\"sensitivity=TEEN\"\n )\n except AdGuardHomeError as exception:\n raise AdGuardHomeError(\n \"Enabling AdGuard Home parental control failed\"\n ) from exception", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = {}\n queue_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE\", queue_info)" ]
[ "0.7372797", "0.73507535", "0.67397463", "0.6203601", "0.6188932", "0.6090768", "0.59693944", "0.5953735", "0.5913658", "0.5828994", "0.5818898", "0.5807375", "0.57376415", "0.5717043", "0.57043606", "0.5703792", "0.56936777", "0.56936777", "0.56642973", "0.5659629", "0.56460094", "0.5645479", "0.5620982", "0.5599804", "0.5593283", "0.5584609", "0.5580536", "0.55733424", "0.55355", "0.55305153", "0.55305153", "0.5517643", "0.55146384", "0.5509295", "0.54706126", "0.5465844", "0.54608625", "0.5460274", "0.5456756", "0.5431258", "0.5427993", "0.5427993", "0.54110765", "0.540267", "0.5394132", "0.5386846", "0.5384077", "0.5382301", "0.53769135", "0.5373066", "0.537128", "0.53700453", "0.5367205", "0.53618693", "0.5349164", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.5341484", "0.53361356", "0.53345335", "0.5333784", "0.53161764", "0.5307715", "0.530559", "0.5292386", "0.5292381", "0.5290407", "0.5283914", "0.5277935", "0.5273955", "0.52686536", "0.52677417", "0.5266946", "0.52620625", "0.5261994", "0.52474695", "0.52342933", "0.52338576", "0.5233793", "0.52244043", "0.52191633", "0.5217009", "0.5211258", "0.52094704", "0.52047694", "0.52021277", "0.5200688", "0.51929706", "0.5185863", "0.51847446", "0.51794624", "0.5171397" ]
0.7124232
2
in active failover detect whether we run the leader
def detect_leader(self): # Should this be moved to the AF script? lfs = self.read_db_logfile() became_leader = lfs.find("Became leader in") >= 0 took_over = lfs.find("Successful leadership takeover:" + " All your base are belong to us") >= 0 self.is_leader = became_leader or took_over if self.is_leader: url = self.get_frontend().get_local_url("") reply = requests.get(url, auth=requests.auth.HTTPBasicAuth("root", self.passvoid), timeout=120) print(f"{url} => {str(reply)}") if reply.status_code == 503: self.is_leader = False return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def probe_leader(self):\n # Should this be moved to the AF script?\n self.is_leader = False\n for instance in self.get_frontends():\n if instance.probe_if_is_leader():\n self.is_leader = True\n return self.is_leader", "def is_leader(self):\n return self.__is_leader", "def is_cluster_leader(self):\n return self.leader == 'self'", "def isLeader(self):\n return self.datacenter_id == self.leader_id", "def check_leader(leader=None):\n grastate_dat = '/var/lib/mysql/grastate.dat'\n grastate = open(grastate_dat)\n for line in grastate.readlines():\n if 'safe_to_bootstrap' in line and '1' in line:\n leader = True\n if not leader:\n print 'It may not be safe to bootstrap the cluster from this node.'\n print 'It was not the last one to leave the cluster and may not contain all the updates.'\n print 'To force cluster bootstrap with this node, edit the {} file manually and set safe_to_bootstrap to 1'.format(grastate_dat)\n os.sys.exit(1)", "def is_cluster_leader(target, schema=None):\n try:\n return cluster_status(target, schema=schema).get('leader') == 'self'\n except subprocess.CalledProcessError:\n return False", "def is_elected_leader(resource):\n if is_clustered():\n if not is_crm_leader(resource):\n log('Deferring action to CRM leader.', level=INFO)\n return False\n else:\n peers = peer_units()\n if peers and not oldest_peer(peers):\n log('Deferring action to oldest service unit.', level=INFO)\n return False\n return True", "def start_election(self):\n print \"---------\\nStarting an election...\\n---------\"\n processes = self.get_processes()\n if len(processes) == 0:\n print \"Not enough servers up yet. Cannot initiate election.\"\n return \"Not enough servers up yet.\"\n higher_active_process = False\n for uid, server in processes.iteritems():\n if uid <= self.uid:\n continue # only contact higher processes\n try:\n ack = server.elect_leader()\n if (ack == \"I am leader.\"):\n self.global_time_server = server\n self.time_server_set = True\n print \"OUTCOME:\\nLeader is %d\\n---------\"%(uid)\n higher_active_process = True\n break\n except socket.error:\n pass\n if higher_active_process:\n return \"I am NOT leader.\"\n else:\n self.am_leader = True\n self.time_server_set = True\n print \"OUTCOME:\\nI am leader.\\n---------\"\n return \"I am leader.\"", "def leader(self):\n pass", "def leader(self):\n pass", "def this_needs_work_test_hook_leader_elected(\n self, h_is_leader, h_leader_set\n ):\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)", "def active_failover_detect_hosts(self):\n self.check_that_instance_is_alive()\n # this is the way to detect the master starter...\n lfs = self.get_log_file()\n if lfs.find(\"Just became master\") >= 0:\n self.is_master = True\n else:\n self.is_master = False\n regx = re.compile(r\"Starting resilientsingle on port (\\d*) .*\")\n match = regx.search(lfs)\n if match is None:\n raise Exception(timestamp() + \"Unable to get my host state! \" + self.basedir + \" - \" + lfs)\n\n self.frontend_port = match.groups()[0]", "def is_crm_leader(resource):\n cmd = [\n \"crm\", \"resource\",\n \"show\", resource\n ]\n try:\n status = subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n return False\n else:\n if get_unit_hostname() in status:\n return True\n else:\n return False", "def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False", "def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")", "def attempt_to_acquire_leader(self, permanent=False):", "def check_server_activity(self):\n if (self.am_leader == True):\n return \"Time server connected.\"\n elif (self.time_server_set == False):\n print \"I am not aware of a time server. Fetching from existing process.\"\n if (self.fetch_time_server() == False):\n print \"Fetch failed. Electing a leader.\"\n self.start_election()\n if self.time_server_not_responding():\n print \"The time server is not responding.\" \n self.start_election()\n return \"Time server elected.\"", "def test_bootstrap_source_not_leader(self):\n self.is_leader.return_value = False\n ceph_hooks.bootstrap_source_relation_changed()\n self.assertEqual(self.leader_set.call_count, 0)", "def is_current_node_active(self, device, partition):\n if self.is_version_sufficient(min_version='11.3.0') is False:\n print \"!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!\"\n print \"! UNABLE TO VERIFY FAILOVER STATE !\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n stop = raw_input('Do you want to continue? [y|N]')\n if stop.strip() == \"y\" or stop.strip() == \"Y\":\n return True\n else:\n return False\n \"\"\" Determines if the connect device is the master, if not Bail with an error.\"\"\"\n try:\n self.connection.System.Session.set_active_folder(\"/Common\")\n status = self.connection.Management.Device.get_failover_state([device])\n if status == ['HA_STATE_ACTIVE']:\n self.connection.System.Session.set_active_folder(\"/\"+partition)\n return True\n else:\n return False\n except:\n raise Exception(\"Failed to determine if {} is a master\".format(device))", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "def start_leader_election():\n\t\ttime.sleep(5)\n\t\ttry:\n\n\t\t\tprint(\"starting the election... \")\n\t\t\tthread = Thread(target=contact_vessel,args=(next_address(),\"/election/electing\",{'start_id':node_id,'highest_value':randomized_value,'winning_id':node_id}))\n\t\t\tthread.daemon = True\n\t\t\tthread.start()\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn True", "def is_running(self):\n\t\treturn self in _running", "def take_leader(self):", "def running(self):\n\t\treturn self._start is not None", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def isstarted():", "def running(self) -> bool:", "def mmo_replset_has_primary(self, mmo_connection, rs):\n rs_status = self.mmo_execute_on_secondaries(mmo_connection, { \"replSetGetStatus\": 1 }, replicaset=rs, first_available_only=True)\n has_primary = False\n\n\n for member in rs_status[0][\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n has_primary = True\n break\n return has_primary", "def is_alive(self):", "def do_test_we_are_the_leader(self, h_is_leader, h_leader_set):\n states = r_state.r_get_states()\n r_state.remove_state(LEADER_STATE)\n no_leader = r_state.r_get_states()\n r_state.set_state(LEADER_STATE)\n leader = r_state.r_get_states()\n self.assertNotEquals(no_leader, leader)\n self.assertEquals(no_leader.union(set([LEADER_STATE])), leader)\n\n is_leader_call_count = h_is_leader.call_count\n leader_set_call_count = h_leader_set.call_count\n # is_leader() fails\n h_is_leader.return_value = False\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 1, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 0, h_leader_set.call_count)\n\n def raise_fail(*args, **kwargs):\n \"\"\"\n Simulate a leader_set() failure.\n \"\"\"\n raise Exception(\"oops\")\n\n # is_leader() succeeds, but leader_set() fails\n h_is_leader.return_value = True\n h_leader_set.side_effect = raise_fail\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 2, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 1, h_leader_set.call_count)\n\n self.lset_args = None\n self.lset_kwargs = None\n\n def record_leader_set_args(*args, **kwargs):\n \"\"\"\n Make sure leader_set() was invoked with the correct parameters.\n \"\"\"\n self.lset_args = args\n self.lset_kwargs = kwargs\n\n # ...and now it all works out\n h_is_leader.return_value = True\n h_leader_set.side_effect = record_leader_set_args\n testee.we_are_the_leader()\n self.assertEquals(leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 3, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 2, h_leader_set.call_count)\n self.assertEquals((), self.lset_args)\n self.assertEquals(\n {\"charm_storpool_block_unit\": sputils.MACHINE_ID}, self.lset_kwargs\n )\n\n r_state.r_set_states(states)", "def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False", "def can_run(self):\n\t\treturn self._start is None", "def is_node_running(node_name: str) -> bool:\n try:\n # Select only nodes that have automatic_drive in their name\n return len([n for n in rosnode.get_node_names() if node_name in n]) > 0\n except rosnode.ROSNodeIOException:\n # Happens when roscore is not up yet\n return False", "def is_alive(addr, user):\n return _ssh_master_cmd(addr, user, 'check') == 0", "def can_failover(self):\n return self._can_failover", "def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def active(self):\n return self.server.is_active() or self.executing", "def should_keep_running(self):\n return len(self.party.active_users())", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def am_I_master(self, ipdict):\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ipdict.get(ip_address).is_master", "def slaveConnected(slaveName):", "def test_hostmgr_failover(self, failure_tester):\n hosts1 = self._get_hosts(failure_tester)\n\n leader1 = failure_tester.fw.get_leader_info(failure_tester.hostmgr)\n assert leader1\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n failure_tester.wait_for_leader_change(failure_tester.hostmgr, leader1)\n failure_tester.reset_client()\n\n # verify that we can query the new leader\n def check_hosts():\n hosts2 = self._get_hosts(failure_tester)\n return len(hosts1) == len(hosts2)\n\n failure_tester.wait_for_condition(check_hosts)", "def becomeLeader(self):\n logging.info('become leader for term {}'.format(self.current_term))\n\n # no need to wait for heartbeat anymore\n self.election_timer.cancel()\n\n self.role = 'leader'\n self.leader_id = self.datacenter_id\n # keep track of the entries known to be logged in each data center\n # note that when we are in the transition phase\n # we as the leader need to keep track of nodes in\n # the old and the new config\n self.loggedIndices = dict([(center_id, 0)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n # initialize a record of nextIdx\n self.nextIndices = dict([(center_id, self.getLatest()[1]+1)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n\n self.sendHeartbeat()\n self.heartbeat_timer = Timer(self.heartbeat_timeout, self.sendHeartbeat)\n self.heartbeat_timer.daemon = True\n self.heartbeat_timer.start()", "def isStandby(self):\n logger.debug(\"Checking if %s is TM Standby\" % self)\n is_standby = self.getClusterRole()\n logger.debug(\"Is %s standby: %s\" % (self, is_standby))\n return is_standby", "def alive(self):\n return True", "def cluster_ready(self,cluster_name,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n cluster = self.cluster(project_id,cluster_name)\n pprint.pprint(cluster)\n return cluster['stateName'] == 'IDLE'", "def is_alive(self):\n pass", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node", "def is_node_master(self) -> bool:\n self._assert_local_rank_set()\n return self.local_rank == 0", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_client_active(self, clid):\n # TODO: update ZooKeeper when active status changes\n #t = self.client_last_active.get(str(clid))\n return self.scheduler.is_active(clid)", "def isMaster(self):\n logger.debug(\"Checking if %s is Cloudera Master\" % self)\n is_master = self.getClusterRole()\n logger.debug(\"Is %s master: %s\" % (self, is_master))\n return is_master", "def is_alive(self):\n params = {'detail': 'true', 'path': '/clusterstate.json'}\n\n try:\n response = self.client.get('zookeeper', params)\n except solr_errors.SolrError:\n logger.exception('Failed to check zookeeper')\n return False\n else:\n try:\n data = json.loads(response['znode']['data'])\n except ValueError:\n return False\n\n for name, collection in data.items():\n shards = collection['shards']\n for shard, shard_info in shards.items():\n replicas = shard_info['replicas']\n for replica, info in replicas.items():\n state = info['state']\n if name == self.solr_collection and state != 'active':\n return False\n\n return True", "def is_active():\n return True", "def is_alive(self):\n return True", "def getClusterRole(self):\n\tlist = []\n\tcounter = 0\n\tf = self.sendCmd(\"cat /etc/hadoop/conf/hdfs-site.xml\")\n\tlist = f.split(\"\\n\")\n\tfor line in list:\n\t\tcounter = counter + 1\n\t\tif \"dfs.ha.namenodes\" in line:\n\t\t\tlogger.info(\"Setup is HA\")\n\t\t\tlogger.info (\"Finding nameservices id\")\n\t\t\ta = list[counter]\n\t\t\tnameservice1=a.split(\"value>\")[1].split(\"<\")[0].split(\",\")[0].strip()\n\t\t\tnameservice2=a.split(\"value>\")[1].split(\"<\")[0].split(\",\")[1].strip()\n\tnode_ip = self.run_cmd(\"grep \" + nameservice2 + \" /etc/hosts | awk '{print $1}'\")\n print \"===================\"\n\tprint \"node ip is %s\" %node_ip.split(\"\\n\")[0]\n print \"===================\"\n cmd = \"ifconfig | grep \" + node_ip.split(\"\\n\")[0] \n\toutput = self.run_cmd(cmd)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n cmd=\"/usr/bin/hdfs haadmin -getServiceState \" + nameservice2\n\t\trole=self.run_cmd(cmd)\n\t\tif \"active\" in role:\n\t\t\treturn \"master\"\n\t\telif \"standby\" in role:\n\t\t\treturn \"standby\"\n\t\telse:\n\t\t\tlogger.info(\"command was not executed successfully.Please check hadoop\")\n\telse:\n\t\tprint \"in else....\"\n\t\tcmd=\"/usr/bin/hdfs haadmin -getServiceState \" + nameservice1\n\t\trole=self.run_cmd(cmd)\n\t\tif \"active\" in role:\n\t\t\treturn \"master\"\n\t\telif \"standby\" in role:\n\t\t\treturn \"standby\"\n\t\telse:\n\t\t\tlogger.info(\"command was not executed successfully.Please check hadoop\")", "def check_hosts(zk,host_name,task,scheduler_log):\n\n #scheduler_log.debug(\"Scheduler Working...!!!\")\n try:\n #Leader Election\n leader = leaderCheck(zk=zk)\n #scheduler_log.debug(\"Leader Election Over\")\n #Update alive status to zookeeper - seems unnecessary\n imalive(zk=zk)\n #scheduler_log.debug(\"Alive Status Updated\")\n\n #If current Host is the Leader perform Scheduled Checks \n if (leader == host_name):\n scheduler_log.debug(\"%s : I am the Leader\"%host_name)\n\n #Fetch List of Hosts - From API\n host_dict = list_hosts(nova)\n allhosts = host_dict['all_list']\n api_down_nodes = host_dict['down_list']\n dishosts = host_dict['disabled_list']\n\n zk_all = zk.get_children(\"/openstack_ha/hosts/all\")\n zk_alive = zk.get_children(\"/openstack_ha/hosts/alive\")\n \n #Fetch Down nodes that are already Handeled - From Zookeeper\n zk_down = zk.get_children(\"/openstack_ha/hosts/down\")\n\n #Fetch nodes that are down and not already handled - From Zookeeper\n calculated_down_nodes = list(set(zk_all) - set(zk_alive))\n\n #Find Nodes Where Scheduler Only failed\n scheduler_down = list(set(calculated_down_nodes).difference(set(api_down_nodes)))\n for node in scheduler_down:\n scheduler_log.debug(\"HA Scheduler Failed on Node : %s \"%node)\n \n #Find Nodes Where API Only failed \n api_down = list(set(api_down_nodes).difference(set(calculated_down_nodes)))\n for node in api_down:\n scheduler_log.debug(\"API Failed on Node : %s \"%node)\n if node not in zk_all:\n scheduler_log.debug(\"HA Scheduler not even initialized %s\"%node)\n\n #Find nodes where both API and Zookeeper are failed \n api_scheduler_down = list(set(api_down_nodes).intersection(set(calculated_down_nodes)))\n\n # Possible Host states - Api only failure | Complete Host Failure ( Not yet Handled | Handling | Handled )\n if(len(api_scheduler_down))==0:\n scheduler_log.debug(\"Hosts working Normally....!!!\")\n else:\n scheduler_log.warning(\"More likely Disaster\")\n #skip if maintance\n # Here check the host in api_down_nodes(api) are present in calculated_down_nodes\n #if present start the instance migrations\n # Checking whether Cluster is Still under HA Policy\n # high availabity contiditions\n if len(api_scheduler_down) <= len(allhosts) - 1:\n scheduler_log.warn(\"Seems like Manageble Disaster\")\n for host in api_scheduler_down:\n scheduler_log.warning(\"Both Api and HA scheduler on\" +host+\" are down\")\n #checks whether down host from api is un handled(not present in down node calculate from zookeeper )\n #(host in zk_all and host not in zk_alive) == calculated_down_nodes\n if host in zk_down:\n #Node will present in zk_down only when all of it's instances are migrated\n scheduler_log.debug(\"Host %s Already handled...!!!!!\"%host)\n else:\n #Node down on api,zk and ( not handled | handling )\n if host not in dishosts:\n #Node Not disabled | disabled reason is not skippable\n scheduler_log.debug(host+\" is not disabled or reason is not maintenance\")\n if(zk.exists(\"/openstack_ha/hosts/time_out/\"+host)==None):\n scheduler_log.debug(\"Inside Time out Node Creation\")\n \n #adding host down time\n host_down_time = time.time()\n host_down_time = str.encode(str(host_down_time))\n scheduler_log.debug(host_down_time)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host, host_down_time)\n \n #adding time_suffix for json_dump file name\n temp_time=time.localtime(time.time()) \n time_suffix=str(temp_time.tm_mday)+\"_\"+str(temp_time.tm_mon)+\"_\"+\\\n str(temp_time.tm_year)+\"_\"+str(temp_time.tm_hour)+\"_\"+\\\n str(temp_time.tm_min)\n enc_time_suffix=str.encode(time_suffix)\n scheduler_log.debug(time_suffix)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\",enc_time_suffix)\n\n # call notification_mail(subj,msg) | Adding Down Node details to Notification \n try:\n subject = \"DGP Office VDI Node Down: %s\"%host\n message = \"Please Check the Network Connectivity and Powersupply as soon as possible\"\n notification_mail(subject,message,to_email=['naanalteam@naanal.in'])\n\n message = \"Please Contact System Administrator\"\n notification_mail(subject,message)\n scheduler_log.debug(\"mail in Scheduler...!\")\n except Exception as e:\n scheduler_log.debug(e)\n scheduler_log.debug(\"Error....! mail scheduler..!\")\n\n # add ping test\n ping_status=ping_check(host)\n if(ping_status):\n scheduler_log.debug(\"Not a Disaster\")\n scheduler_log.debug(\"ping test success....!!! Node is alive... Please Check the APIs,HA Scheduler and other Openstack Services\")\n\n else:\n scheduler_log.warning(\"Ping test also Failed on \"+host+\" proceed with migration\")\n if (zk.exists(\"/openstack_ha/hosts/start_migration/\"+ host)): # it checks the permission from the dashborad\n scheduler_log.warning(\" api down host :\"+host+\"present in zookeeper down_node:\")\n scheduler_log.debug(\"Strart migration....!!!!!\")\n scheduler_log.debug(\"migrating instances from the \"+host)\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode() \n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n #check for time out\n scheduler_log.debug(\"Checking Timeout for Down Node\",host)\n curent_time = time.time()\n if (zk.exists(\"/openstack_ha/hosts/time_out/\"+host)):\n down_host_failuretime = zk.get(\"/openstack_ha/hosts/time_out/\"+host)[0]\n down_host_failuretime = down_host_failuretime.decode(encoding='UTF-8')\n scheduler_log.warning(\"down_host_failuretime\",down_host_failuretime)\n down_host_failuretime = float(down_host_failuretime)\n time_interval = curent_time - down_host_failuretime\n if time_interval>migrate_time:\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode()\n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n scheduler_log.debug(\"Will Wait for another %d\"%(migrate_time-time_interval))\n else:\n scheduler_log.debug(\"%s Node Does'nt have TimeOut Value. Hence will not migrate forever\"%host)\n else:\n scheduler_log.debug(\"Host %s Under Maintenance\"%host)\n \n else:\n scheduler_log.warning(\"Un-Manageble Disaster Too many Nodes are down\")\n else:\n scheduler_log.debug(\"%s : Leader is %s\"%(host_name,leader))\n\n except Exception as e:\n if issubclass(e.__class__,kexception.NoNodeError):\n scheduler_log.exception(\"No node error\")\n elif any(issubclass(e.__class__, lv) for lv in kazoo_exceptions):\n scheduler_log.exception(\"Kazoo Exception.....: \")\n time.sleep(2)\n try:\n zk = KazooClient(hosts='127.0.0.1:2181')\n zk.start() \n Node_creation = createNodeinAll(zk=zk, host_name=host_name)\n election_Node = election_node(zk=zk, host_name=host_name)\n except:\n pass\n else:\n scheduler_log.warning(\"Unhandled Error \")\n scheduler_log.exception(\"\")", "def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True", "def is_master(self, process_group: ProcessGroup = None) -> bool:\n rank = dist.get_rank(group=process_group)\n return rank == 0", "def failover_target(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"failover_target\")", "def test_failover_LR(self, mgmt_root):\n\n f = mgmt_root.tm.sys.failover.load()\n assert 'Failover active' in f.apiRawValues['apiAnonymous']\n f.refresh()\n assert 'Failover active' in f.apiRawValues['apiAnonymous']", "def get_leader(model, application_name):\n command = ['juju', 'run', '--format=yaml',\n '--model', model,\n '--application', application_name,\n 'is-leader']\n results = yaml.load(subprocess.check_output(command))\n for unit in results:\n if 'True' in unit['Stdout'].strip():\n return unit['UnitID']", "def matches(self, name):\n return name is not None and name in (self.leader, self.sync_standby)", "def isConnected():", "def isTCPRunningStartup():\r\n\r\n time.sleep(0.5)\r\n logs = open(\"Client1.txt\", 'r')\r\n #print(logs.readlines()[2])\r\n line = logs.readlines()[2]\r\n logs.close()\r\n print(line)\r\n isRunning = line != 'iperf3: error - unable to connect to server: Cannot assign requested address\\n'\r\n print(isRunning)\r\n return isRunning", "def active(self):\n return self.starting == 0 and self.stopped == 0", "def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False", "def ready(self):\n # NOTE(priteau): Temporary compatibility with old and new lease status\n if self.lease.get('action') is not None:\n return self.status == ('START', 'COMPLETE')\n else:\n return self.status == 'ACTIVE'", "def is_running(self):\n\t\treturn self._running", "def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning", "def is_active(self):\r\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def _use_multi_head(self):\n return self.secondary_loss is not None", "def this_needs_work_test_full_lifecycle(self, h_is_leader, h_leader_set):\n self.do_test_hook_install(testee.install_setup, False)\n self.do_test_hook_install(testee.upgrade_setup, True)\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)\n self.do_test_ensure_our_presence()", "def isalive():\n return 'alive'", "def is_alive(self):\n return (self.read_name() != '')", "def active(self) -> bool:\n return self.orchestration_status == \"Active\"", "def alive(self):\n return self._thread is not None", "def is_active(self):\n if self.load_status == \"I\":\n return True\n return False", "def running(self):\n return self._lifetime_state in {\"starting\",\"running\",\"finishing\"}", "def _mapped_to_this_conductor(self, node_uuid, driver):\n try:\n ring = self.ring_manager[driver]\n except exception.DriverNotFound:\n return False\n\n return self.host in ring.get_hosts(node_uuid)", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def checkUpstreamScheduler():", "def get_active_node(self):\n url = 'http://%s/v1/health/service/vault' % self.consul_host_port\n # parse the health check results and find the one that's passing\n if self.log_enabled:\n logger.debug('Polling active node from: %s', url)\n r = requests.get(url)\n # return the current leader address\n for node in r.json():\n if 'active' not in node['Service']['Tags']:\n continue\n port = node['Service']['Port']\n n = \"%s:%d\" % (node['Node']['Node'], port)\n if self.redir_ip:\n n = \"%s:%d\" % (node['Node']['Address'], port)\n if self.log_enabled:\n logger.info(\"Got active node as: %s\", n)\n return n\n if self.log_enabled:\n logger.critical('NO vault services found with health check passing')\n return None", "def online(self):\n return False", "def isready_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n state = ctx.obj.groups[project.id].clusters[cluster_name].get().data.stateName\n\n if state == \"IDLE\":\n click.echo(\"True\")\n exit(0)\n click.echo(\"False\", err=True)\n exit(1)", "def find_leader(self):\r\n # Initialize the leader fitness as an arbitrarly bad value\r\n leaderFitness = -(2**63)\r\n \r\n for number in range(POPULATION_SIZE):\r\n if self.population[number].current_fitness > leaderFitness:\r\n leaderFitness = self.population[number].current_fitness\r\n self.leader = number", "def ping_cluster():\n response = requests.get('{}/v1/status/peers'.format(common.URL))\n response.raise_for_status()\n\n # Wait for all 3 agents to join the cluster\n if len(response.json()) == 3:\n return True\n\n return False", "def is_active(self) -> bool:", "def is_active(self):\n if not self._relaypid:\n return False\n\n self._lock.acquire()\n relaypid = None\n portoffset = None\n try:\n relaypid, portoffset = self._check_tcprelay()\n except AttributeError:\n logger.debug(\n \"No active TCPRELAY tunnel on locationid - {0}\"\n \"\".format(self.locationid_param))\n finally:\n self._lock.release()\n\n return (\n self._relaypid == relaypid and\n self._portoffset == portoffset\n )", "def _is_running(self):\n # Public interface is given by get_status instead.\n self._update()\n return True if self.running_mode else False", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def IsStarted(self) :\n\t\t...", "def is_frida_running(self):\n if not self.available():\n return False\n\n found = False\n pid = None\n\n if self._have_pidof:\n if self._alternate_frida_name:\n pid = self.su_cmd('pidof -s frida-server')\n else:\n pid = self.su_cmd('pidof -s frida')\n if pid:\n try:\n pid = int(pid.join(pid.split())) # remove \\r\\n\n if pid:\n return True\n except ValueError:\n # no integer\n pass\n\n if self._oreo_plus:\n result = self.su_cmd('ps -A | grep frida')\n else:\n result = self.su_cmd('ps | grep frida')\n\n if result:\n result = result.split()\n\n if 'frida' or 'frida-server' in result:\n found = True\n\n return found", "def _requires_inmigrate_from(self):\n existing = locate_live_service(self.consul, \"qemu-\" + self.name)\n\n if existing and existing[\"Address\"] != self.this_host:\n # Consul knows about a running VM. Lets try a migration.\n return existing[\"Address\"]\n\n if self.ceph.is_unlocked():\n # Consul doesn't know about a running VM and no volume is locked.\n # It doesn't make sense to live migrate this VM.\n return None\n\n if self.ceph.locked_by_me():\n # Consul doesn't know about a running VM and the volume is\n # locked by me, so it doesn't make sense to live migrate the VM.\n return None\n\n # The VM seems to be locked somewhere else, try to migrate it from\n # there.\n return self.ceph.locked_by()", "def _is_running(self):\n return self._run_state.is_running()" ]
[ "0.73940504", "0.7367778", "0.72519314", "0.724043", "0.7167702", "0.70824057", "0.69250184", "0.69241196", "0.67583", "0.65203124", "0.65203124", "0.6361834", "0.6304819", "0.6293747", "0.6229859", "0.6190928", "0.61597836", "0.604024", "0.60138893", "0.5951151", "0.58495647", "0.5820223", "0.5815441", "0.5748331", "0.574193", "0.5720468", "0.56876665", "0.5686061", "0.5684562", "0.5654774", "0.56517226", "0.56469524", "0.56289357", "0.56238234", "0.5623192", "0.5616688", "0.5615438", "0.55944145", "0.55799985", "0.55703115", "0.5561672", "0.55587125", "0.55570954", "0.5554009", "0.55465245", "0.5538244", "0.5532542", "0.552798", "0.5516802", "0.55035025", "0.54992366", "0.5496882", "0.5494553", "0.549294", "0.5491746", "0.54760605", "0.5473075", "0.5466254", "0.5440962", "0.5434209", "0.5428537", "0.54143405", "0.54130876", "0.5410443", "0.54065156", "0.53979903", "0.53805035", "0.5373475", "0.53563875", "0.5355411", "0.53414744", "0.5341015", "0.5338921", "0.53364736", "0.5322058", "0.5321196", "0.53179044", "0.53130716", "0.53119874", "0.53119177", "0.5307098", "0.53054196", "0.5304539", "0.53001493", "0.52999055", "0.52980125", "0.5295916", "0.5293185", "0.52910775", "0.52892476", "0.52847004", "0.5282096", "0.52787304", "0.5276357", "0.5270194", "0.52673244", "0.5259074", "0.52580875", "0.5255447", "0.5248918" ]
0.7651802
0
talk to the frontends to find out whether its a leader or not.
def probe_leader(self): # Should this be moved to the AF script? self.is_leader = False for instance in self.get_frontends(): if instance.probe_if_is_leader(): self.is_leader = True return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_leader(self):\n # Should this be moved to the AF script?\n lfs = self.read_db_logfile()\n\n became_leader = lfs.find(\"Became leader in\") >= 0\n took_over = lfs.find(\"Successful leadership takeover:\" + \" All your base are belong to us\") >= 0\n self.is_leader = became_leader or took_over\n if self.is_leader:\n url = self.get_frontend().get_local_url(\"\")\n reply = requests.get(url, auth=requests.auth.HTTPBasicAuth(\"root\", self.passvoid), timeout=120)\n print(f\"{url} => {str(reply)}\")\n if reply.status_code == 503:\n self.is_leader = False\n return self.is_leader", "def leader(self):\n pass", "def leader(self):\n pass", "def is_leader(self):\n return self.__is_leader", "def isLeader(self):\n return self.datacenter_id == self.leader_id", "def this_needs_work_test_hook_leader_elected(\n self, h_is_leader, h_leader_set\n ):\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)", "def is_cluster_leader(self):\n return self.leader == 'self'", "def start_election(self):\n print \"---------\\nStarting an election...\\n---------\"\n processes = self.get_processes()\n if len(processes) == 0:\n print \"Not enough servers up yet. Cannot initiate election.\"\n return \"Not enough servers up yet.\"\n higher_active_process = False\n for uid, server in processes.iteritems():\n if uid <= self.uid:\n continue # only contact higher processes\n try:\n ack = server.elect_leader()\n if (ack == \"I am leader.\"):\n self.global_time_server = server\n self.time_server_set = True\n print \"OUTCOME:\\nLeader is %d\\n---------\"%(uid)\n higher_active_process = True\n break\n except socket.error:\n pass\n if higher_active_process:\n return \"I am NOT leader.\"\n else:\n self.am_leader = True\n self.time_server_set = True\n print \"OUTCOME:\\nI am leader.\\n---------\"\n return \"I am leader.\"", "def is_elected_leader(resource):\n if is_clustered():\n if not is_crm_leader(resource):\n log('Deferring action to CRM leader.', level=INFO)\n return False\n else:\n peers = peer_units()\n if peers and not oldest_peer(peers):\n log('Deferring action to oldest service unit.', level=INFO)\n return False\n return True", "def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False", "def take_leader(self):", "def is_cluster_leader(target, schema=None):\n try:\n return cluster_status(target, schema=schema).get('leader') == 'self'\n except subprocess.CalledProcessError:\n return False", "def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def check_server_activity(self):\n if (self.am_leader == True):\n return \"Time server connected.\"\n elif (self.time_server_set == False):\n print \"I am not aware of a time server. Fetching from existing process.\"\n if (self.fetch_time_server() == False):\n print \"Fetch failed. Electing a leader.\"\n self.start_election()\n if self.time_server_not_responding():\n print \"The time server is not responding.\" \n self.start_election()\n return \"Time server elected.\"", "def _update_leader(self):", "def check_leader(leader=None):\n grastate_dat = '/var/lib/mysql/grastate.dat'\n grastate = open(grastate_dat)\n for line in grastate.readlines():\n if 'safe_to_bootstrap' in line and '1' in line:\n leader = True\n if not leader:\n print 'It may not be safe to bootstrap the cluster from this node.'\n print 'It was not the last one to leave the cluster and may not contain all the updates.'\n print 'To force cluster bootstrap with this node, edit the {} file manually and set safe_to_bootstrap to 1'.format(grastate_dat)\n os.sys.exit(1)", "def leaderboard(self):\n pass", "async def leaderboard(self, ctx):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n xp = config.load_xp()\n scores = {}\n if str(guild) in xp['guilds']:\n for user in xp['guilds'][str(guild)]:\n scores.update({ctx.guild.get_member(int(user)).display_name: xp['guilds'][str(guild)][user]['xp']})\n sorted_scores = collections.OrderedDict(sorted(scores.items(), key=lambda x: x[1], reverse=True))\n message = discord.Embed(title='Leaderboard', description=ctx.guild.name + \"'s most active users\")\n current_field = 1\n field_limit = 25\n for index, (key, value) in enumerate(sorted_scores.items()):\n if current_field <= field_limit:\n message.add_field(name=str(index+1) + \": \" + key,\n value=\"with: \" + str(value) + \" xp\",\n inline=False)\n current_field += 1\n else:\n break\n await ctx.send('', embed=message)\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")", "def becomeLeader(self):\n logging.info('become leader for term {}'.format(self.current_term))\n\n # no need to wait for heartbeat anymore\n self.election_timer.cancel()\n\n self.role = 'leader'\n self.leader_id = self.datacenter_id\n # keep track of the entries known to be logged in each data center\n # note that when we are in the transition phase\n # we as the leader need to keep track of nodes in\n # the old and the new config\n self.loggedIndices = dict([(center_id, 0)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n # initialize a record of nextIdx\n self.nextIndices = dict([(center_id, self.getLatest()[1]+1)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n\n self.sendHeartbeat()\n self.heartbeat_timer = Timer(self.heartbeat_timeout, self.sendHeartbeat)\n self.heartbeat_timer.daemon = True\n self.heartbeat_timer.start()", "def do_test_we_are_the_leader(self, h_is_leader, h_leader_set):\n states = r_state.r_get_states()\n r_state.remove_state(LEADER_STATE)\n no_leader = r_state.r_get_states()\n r_state.set_state(LEADER_STATE)\n leader = r_state.r_get_states()\n self.assertNotEquals(no_leader, leader)\n self.assertEquals(no_leader.union(set([LEADER_STATE])), leader)\n\n is_leader_call_count = h_is_leader.call_count\n leader_set_call_count = h_leader_set.call_count\n # is_leader() fails\n h_is_leader.return_value = False\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 1, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 0, h_leader_set.call_count)\n\n def raise_fail(*args, **kwargs):\n \"\"\"\n Simulate a leader_set() failure.\n \"\"\"\n raise Exception(\"oops\")\n\n # is_leader() succeeds, but leader_set() fails\n h_is_leader.return_value = True\n h_leader_set.side_effect = raise_fail\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 2, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 1, h_leader_set.call_count)\n\n self.lset_args = None\n self.lset_kwargs = None\n\n def record_leader_set_args(*args, **kwargs):\n \"\"\"\n Make sure leader_set() was invoked with the correct parameters.\n \"\"\"\n self.lset_args = args\n self.lset_kwargs = kwargs\n\n # ...and now it all works out\n h_is_leader.return_value = True\n h_leader_set.side_effect = record_leader_set_args\n testee.we_are_the_leader()\n self.assertEquals(leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 3, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 2, h_leader_set.call_count)\n self.assertEquals((), self.lset_args)\n self.assertEquals(\n {\"charm_storpool_block_unit\": sputils.MACHINE_ID}, self.lset_kwargs\n )\n\n r_state.r_set_states(states)", "def attempt_to_acquire_leader(self, permanent=False):", "async def elect_leader( request ):\n resource = request.match_info['resource']\n node = request.match_info['node']\n ttl = int( request.match_info['ttl'] )\n leader_election = await create_leader_election( redises, resource, node, ttl )\n try:\n leader = await leader_election.elect_leader()\n return web.json_response( {\"leader\": leader} , status = 200 )\n except Exception as ex:\n print(ex)\n return web.json_response( {\"error\": \"fail to elect leader\" }, status = 501 )", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "async def tod_status(self, ctx, *args):\n n = len(self.players)\n if n > 0:\n if n == 1:\n s = \"person\"\n else:\n s = \"people\"\n message = f\"A Truth or Dare game is currently taking place with {n} {s}!\"\n else:\n message = \"No Truth or Dare game is currently taking place.\"\n await ctx.send(message)", "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def find_leader(self):\r\n # Initialize the leader fitness as an arbitrarly bad value\r\n leaderFitness = -(2**63)\r\n \r\n for number in range(POPULATION_SIZE):\r\n if self.population[number].current_fitness > leaderFitness:\r\n leaderFitness = self.population[number].current_fitness\r\n self.leader = number", "def tellIfStarted(self):\n if self.game_number == 1:\n self.welcome()\n else:\n self.tellGameNumber()", "def on_join(data):\n print(str(data))\n if models.Leaderboard.query.filter_by(\n username=data['user']).first() is None:\n add_user(data['user'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def leaderboard():\n # Get leaderboard and user information\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n # Get top gainer leaderboards\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('leaderboard.html',\n leaderboard=leaderboard,\n current_user_info=current_user_info,\n weektopgainers=weektopgainers,\n monthtopgainers=monthtopgainers,\n userbalance=current_user.balance)", "async def join_leaderboard(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n author = ctx.message.author\n\n info_str = (\n \"Head over to https://adventofcode.com/leaderboard/private \"\n \"with code `975452-d90a48b0` to join the TWT private leaderboard!\"\n )\n try:\n await author.send(info_str)\n except discord.errors.Forbidden:\n await ctx.send(f\":x: {author.mention}, please (temporarily) enable DMs to receive the join code\")\n else:\n await ctx.message.add_reaction(\"\\U0001F4E8\")", "async def post_leaderboard(\n self,\n ctx: commands.Context,\n leaderboard_type: Literal[\n \"season\",\n \"weekly\",\n \"worst\",\n \"playoffs\",\n \"playoffs_weekly\",\n \"pre-season\",\n \"pre-season_weekly\",\n ],\n ) -> None:\n leaderboard_type_str = leaderboard_type.replace(\"_\", \" \").title()\n leaderboard = await self.pickems_config.guild(ctx.guild).leaderboard()\n if leaderboard == {} or leaderboard is None:\n await ctx.send(_(\"There is no current leaderboard for this server!\"))\n return\n if leaderboard_type != \"worst\":\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True\n )\n else:\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][\"total\"] - i[1][\"season\"], reverse=True\n )\n msg_list = []\n count = 1\n user_position = None\n total_str = {\n \"season\": \"total\",\n \"playoffs\": \"playoffs_total\",\n \"pre-season\": \"pre-season_total\",\n }.get(leaderboard_type, \"total\")\n\n for member_id in leaderboard:\n if str(member_id[0]) == str(ctx.author.id):\n user_position = leaderboard.index(member_id)\n member = ctx.guild.get_member(int(member_id[0]))\n if member is None:\n member_mention = _(\"User has left the server \") + member_id[0]\n else:\n member_mention = member.mention\n if leaderboard_type in [\"weekly\", \"playoffs_weekly\", \"pre-season_weekly\"]:\n points = member_id[1].get(leaderboard_type, 0)\n msg_list.append(\"#{}. {}: {}\\n\".format(count, member_mention, points))\n elif leaderboard_type in [\"season\", \"playoffs\", \"pre-season\"]:\n total = member_id[1].get(total_str, 0)\n wins = member_id[1].get(leaderboard_type, 0)\n try:\n percent = (wins / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\\n\"\n )\n else:\n total = member_id[1].get(total_str, 0)\n losses = member_id[1].get(total_str, 0) - member_id[1].get(leaderboard_type)\n try:\n percent = (losses / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\\n\"\n )\n count += 1\n leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]\n if user_position is not None:\n user = leaderboard[user_position][1]\n wins = user[\"season\"]\n total = user[total_str]\n losses = user[total_str] - user[\"season\"]\n position = _(\n \"{member}, you're #{number} on the {leaderboard_type} leaderboard!\\n\"\n ).format(\n member=ctx.author.display_name,\n number=user_position + 1,\n leaderboard_type=leaderboard_type_str,\n )\n if leaderboard_type == \"season\":\n percent = (wins / total) * 100\n position += _(\"You have {wins}/{total} correct ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n elif leaderboard_type == \"worst\":\n percent = (losses / total) * 100\n position += _(\"You have {wins}/{total} incorrect ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n await ctx.send(position)\n await BaseMenu(\n source=LeaderboardPages(pages=leaderboard_list, style=leaderboard_type_str),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def is_crm_leader(resource):\n cmd = [\n \"crm\", \"resource\",\n \"show\", resource\n ]\n try:\n status = subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n return False\n else:\n if get_unit_hostname() in status:\n return True\n else:\n return False", "async def leaderboard(self, ctx) -> None:\n await ctx.send(\n \"\",\n embed=NumEmbed(\n title=\"Points Leaderboard\",\n fields=self.bot.points_leaderboard.field_representation,\n user=ctx.author,\n ),\n )", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def announce_lead_changes(previous_leader=None):\n def say(score0, score1):\n if score0 > score1:\n leader = 0\n elif score1 > score0:\n leader = 1\n else:\n leader = None\n if leader != None and leader != previous_leader:\n print('Player', leader, 'takes the lead by', abs(score0 - score1))\n return announce_lead_changes(leader)\n return say", "def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack", "def start_leader_election():\n\t\ttime.sleep(5)\n\t\ttry:\n\n\t\t\tprint(\"starting the election... \")\n\t\t\tthread = Thread(target=contact_vessel,args=(next_address(),\"/election/electing\",{'start_id':node_id,'highest_value':randomized_value,'winning_id':node_id}))\n\t\t\tthread.daemon = True\n\t\t\tthread.start()\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn True", "def is_alive(self):", "async def leaderboard(self, ctx):\n\t\tasync with ctx.typing():\n\t\t\ttry:\n\t\t\t\tlbFunc = functools.partial(save_leaderboard)\n\t\t\t\tawait self.bot.loop.run_in_executor(None, lbFunc)\n\t\t\t\tawait ctx.send(file=discord.File(\"leaderboard.png\"))\n\t\t\texcept:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\t\"https://aninternettroll.github.io/mcbeVerifierLeaderboard/\"\n\t\t\t\t)", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "async def join_leaderboard(self, ctx: commands.Context) -> None:\n author = ctx.message.author\n log.info(f\"{author.name} ({author.id}) has requested the PyDis AoC leaderboard code\")\n\n info_str = (\n \"Head over to https://adventofcode.com/leaderboard/private \"\n f\"with code `{AocConfig.leaderboard_join_code}` to join the PyDis private leaderboard!\"\n )\n try:\n await author.send(info_str)\n except discord.errors.Forbidden:\n log.debug(f\"{author.name} ({author.id}) has disabled DMs from server members\")\n await ctx.send(f\":x: {author.mention}, please (temporarily) enable DMs to receive the join code\")\n else:\n await ctx.message.add_reaction(Emojis.envelope)", "def is_actor(self):\n return True", "def enough_players():\n return True", "def check_winner(self):\n pass", "def is_bot(self) -> bool:", "def isCurrentPlayerHome(self):\r\n \r\n #creates corresponding starting and ending points for each player\r\n if self.getTurn() == RED:\r\n start = 0\r\n end = 18\r\n else:\r\n start = 6\r\n end = 24\r\n \r\n #checks whether the current player has checkers on corresponding points\r\n for i in range(start, end):\r\n if self.points[i].getTeam() == self.getTurn():\r\n return False\r\n \r\n return True", "def someone_home(self) -> bool:\n return self._someone_home", "async def message_leaderboard(self, ctx, boardType):\n\n\t\tglobal embeds\n\t\tguild = ctx.message.guild\n\n\t\tif boardType == \"quotes\":\n\t\t\tleaderboardType = \"quoteLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"quoteLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"reactions\":\n\t\t\tleaderboardType = \"reactionLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"reactionLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"emojis\":\n\t\t\tleaderboardType = \"emojiLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"emojiLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telse:\n\t\t\tleaderboardType = \"messageLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"messageLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\n\t\tleaderboardEmbed.clear_fields()\n\n\t\tleaderboard = {k: v for k, v in sorted(leaderboard.items(), key=lambda a: a[1], reverse=True)}\n\n\t\tpastScore = 0\n\t\toffset = 0\n\t\tposition = 0\n\t\tuserValues = \"\"\n\n\t\tfor participant in leaderboard:\n\t\t\tscore = leaderboard[participant]\n\n\t\t\tif score == pastScore:\n\t\t\t\toffset += 1\n\t\t\telse:\n\t\t\t\tposition += offset + 1\n\t\t\t\toffset = 0\n\t\t\t\tpastScore = score\n\n\t\t\tif leaderboardType == \"reactionLeaderboard\":\n\t\t\t\tname = str(participant)\n\t\t\telif leaderboardType == \"emojiLeaderboard\":\n\t\t\t\tfor emoji in guild.emojis:\n\t\t\t\t\tif int(participant) == emoji.id:\n\t\t\t\t\t\tname = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif int(participant) == 456226577798135808:\n\t\t\t\t\t# Skip deleted users\n\t\t\t\t\tTrue\n\t\t\t\telif guild.get_member(int(participant)) is None:\n\t\t\t\t\tname = str(await self.bot.fetch_user(int(participant)))\n\t\t\t\telse:\n\t\t\t\t\tname = str(guild.get_member(int(participant)).display_name)\n\n\t\t\tuserValues += \"**\" + str(position) + \". \" + name + \"** - \" + str(score) + \"\\n\\n\\t\"\n\n\t\tif userValues == \"\":\n\t\t\tuserValues = \"None\"\n\n\t\tleaderboardEmbed.add_field(name=\"User\", value=\"\".join(userValues.split(\"\\t\")[0:10]), inline=True)\n\n\t\tmessage = await ctx.send(embed=leaderboardEmbed)\n\t\tself.cachedMessages[message.id] = {\"type\": leaderboardType, \"page\": 1}\n\t\tawait message.add_reaction(\"⬅️\")\n\t\tawait message.add_reaction(\"➡️\")", "def announce_to_players(self):\n # Notifiers will source nothing if self.character.location is None\n # or if self.receivers is None.\n # They will have empty receiver lists, and thus not do anything.\n\n # SelfListNotifier will notify the caller if a player or\n # player GM, and notify every player/player-GM on the list.\n player_notifier = SelfListNotifier(\n self.character,\n receivers=self.receivers,\n to_player=True,\n to_gm=True,\n )\n # RoomNotifier will notify every staff member in the room\n staff_notifier = RoomNotifier(\n self.character,\n room=self.character.location,\n to_staff=True,\n )\n\n # Generate the receivers of the notifications.\n player_notifier.generate()\n staff_notifier.generate()\n\n # Staff names get highlighted because they're fancy\n staff_names = [f\"|c{name}|n\" for name in sorted(staff_notifier.receiver_names)]\n\n # Build list of who is receiving this private roll. Staff are last\n receiver_names = sorted(player_notifier.receiver_names) + staff_names\n\n # If only the caller is here to see it, only the caller will be\n # listed for who saw it.\n if receiver_names:\n receiver_suffix = f\"(Shared with: {', '.join(receiver_names)})\"\n else:\n receiver_suffix = f\"(Shared with: {self.character})\"\n\n # Now that we know who is getting it, build the private message string.\n private_msg = f\"|w[Private Roll]|n {self.roll_message} {receiver_suffix}\"\n\n # Notify everyone of the roll result.\n player_notifier.notify(private_msg, options={\"roll\": True})\n staff_notifier.notify(private_msg, options={\"roll\": True})", "async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')", "def makeLeader(self, node_id):\n self.graph.addTriple(\n node_id, self.annotation_properties['clique_leader'], True,\n object_is_literal=True, literal_type='xsd:boolean')\n return", "async def leaderboard(self, ctx, arg1: T = None, arg2: T = None):\n\n (channel, member) = self.resolve_arguments(arg1, arg2, types=get_args(T))\n\n await ctx.trigger_typing()\n\n member = member if member else ctx.author\n channel_id = channel.id if channel else None\n bot_ids = [bot.id for bot in filter(lambda user: user.bot, ctx.guild.members)]\n\n await self.bot.db.leaderboard.preselect(ctx.guild.id, bot_ids, channel_id)\n top10 = await self.bot.db.leaderboard.get_top10()\n around = await self.bot.db.leaderboard.get_around(member.id)\n\n embed = await self.display_leaderboard(ctx, top10, around, member)\n await ctx.send(embed=embed)", "def find_new_people(self):\n #greets people, only greets once while they're in the camera's view and are center of attention\n\n\n if (self.person is not None) and (self.person.acknowledged == False):\n self.person.acknowledged = True\n print \"I see you!\"\n self.idle_pub.publish(\"idle:stop\")\n time.sleep(2)\n\n greeting = [\"R_nudge\",\"R_look\"]\n for msg in greeting:\n self.behavior_pub.publish(msg)\n self.check_completion()\n\n\n self.detection_pub.publish('found')\n\n elif self.person is None:\n print \"I don't see you\"\n self.detection_pub.publish('nothing')", "def check_fabric_is_alive(fabric):\n seq = get_random_sequence()\n msg = eptMsg(MSG_TYPE.GET_FABRIC_STATUS, seq=seq, data={\"fabric\":fabric})\n #logger.debug(\"get fabric status (seq:0x%x) fabric: %s\", seq, fabric)\n redis = get_redis()\n p = redis.pubsub(ignore_subscribe_messages=True)\n p.subscribe(MANAGER_CTRL_RESPONSE_CHANNEL)\n redis.publish(MANAGER_CTRL_CHANNEL, msg.jsonify())\n start_ts = time.time()\n timeout = AppStatus.MANAGER_STATUS_BRIEF_TIMEOUT\n try:\n while start_ts + timeout > time.time():\n data = p.get_message(timeout=0.5)\n if data is not None:\n channel = data[\"channel\"]\n if channel == MANAGER_CTRL_RESPONSE_CHANNEL:\n msg = eptMsg.parse(data[\"data\"]) \n if msg.msg_type == MSG_TYPE.FABRIC_STATUS:\n # validate this is the addr and sequence number our user requested\n if msg.seq == seq and \"fabric\" in msg.data and \\\n msg.data[\"fabric\"] == fabric:\n #logger.debug(\"fabric status (0x%x) alive:%r\",seq,msg.data[\"alive\"])\n if msg.data[\"alive\"]: \n return True\n else:\n return False\n else:\n logger.debug(\"rx seq/fabric (0x%x/%s), expected (0x%x/%s)\",\n msg.seq, msg.data.get(\"fabric\", \"\"), seq, fabric)\n except Exception as e:\n logger.debug(\"Traceback:\\n%s\", traceback.format_exc())\n logger.debug(\"error: %s\", e)\n finally:\n if redis is not None and hasattr(redis, \"connection_pool\"):\n redis.connection_pool.disconnect()\n logger.warn(\"no manager response within timeout(%s sec)\", timeout)\n return False", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False", "async def on_message(self, message):\n\t\t# If message was sent in a guild\n\t\tif isinstance(message.channel, discord.TextChannel):\n\t\t\tguild = message.channel.guild\n\t\t\tleaderboard = self.leaderboards[str(guild.id)]\n\n\t\t\tif not message.author.bot:\n\t\t\t\t# Check message author\n\t\t\t\tif str(message.author.id) not in leaderboard[\"messageLeaderboard\"]:\n\t\t\t\t\tleaderboard[\"messageLeaderboard\"][str(message.author.id)] = 1\n\t\t\t\telse:\n\t\t\t\t\tleaderboard[\"messageLeaderboard\"][str(message.author.id)] += 1\n\n\t\t\t\t# Check for quotes\n\t\t\t\tif str(message.channel.id) == leaderboard[\"quotesChannel\"]:\n\t\t\t\t\tfor user in message.mentions:\n\t\t\t\t\t\tif str(user.id) not in leaderboard[\"quoteLeaderboard\"]:\n\t\t\t\t\t\t\tleaderboard[\"quoteLeaderboard\"][str(user.id)] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleaderboard[\"quoteLeaderboard\"][str(user.id)] += 1\n\n\t\t\t\t# Check for emojis\n\t\t\t\tfor emoji in self.bot.emojis:\n\t\t\t\t\temojiName = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\tfor index in range(0, message.content.count(emojiName)):\n\t\t\t\t\t\tleaderboard[\"emojiLeaderboard\"][str(emoji.id)] += 1\n\n\n\t\t\tleaderboard[\"lastUpdate\"] = message.created_at.isoformat()\n\t\t\tawait self.update_state()", "async def leaderboard(self, ctx: commands.Context, *, leaderboard_type: str = \"seasonal\") -> None:\n leaderboard_type = leaderboard_type.replace(\" \", \"_\").lower()\n if leaderboard_type in [\"seasonal\", \"season\"]:\n await self.post_leaderboard(ctx, \"season\")\n if leaderboard_type in [\"weekly\", \"week\"]:\n await self.post_leaderboard(ctx, \"weekly\")\n if leaderboard_type in [\"playoffs\", \"playoff\"]:\n await self.post_leaderboard(ctx, \"playoffs\")\n if leaderboard_type in [\"playoffs_weekly\", \"playoff_weekly\"]:\n await self.post_leaderboard(ctx, \"playoffs_weekly\")\n if leaderboard_type in [\"pre-season\", \"preseason\"]:\n await self.post_leaderboard(ctx, \"pre-season\")\n if leaderboard_type in [\"pre-season_weekly\", \"preseason_weekly\"]:\n await self.post_leaderboard(ctx, \"pre-season_weekly\")\n if leaderboard_type in [\"worst\"]:\n await self.post_leaderboard(ctx, \"worst\")", "def client_add_received():\n\t\tglobal board, node_id\n\t\ttry:\n\t\t\tnew_entry = request.forms.get('entry')\n\t\t\tthread=Thread(target=contact_vessel,args=(leader_ip,'/leader/add/0',new_entry))\n\t\t\tthread.daemon= True\n\t\t\tthread.start()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn False", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def notify_winner(self):\n self.is_winner = True", "def is_actor():\n return False", "def watch(self, leader_index, timeout):\n\n self.event.wait(timeout)\n return self.event.isSet()", "def test_user_is_sender(self):\n sender = self.create_user()\n thread = self.create_thread(sender=sender, status='pending')\n self.assertTrue(thread.first_message.visible_to_user(sender))", "async def check_player_standing(user_id):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT stand FROM blackjack.currentstatus WHERE userid = $1\", user_id)) == 1", "def ping(self):\n return True", "def ping(self):\n return True", "def print_leader(self):\r\n return \"Best particle found:\\n{0}\".format(\r\n repr(self.population[self.leader]))", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "async def on_start(self):\n m = \"**{}** has started a game of {}! To participate, say `I`! **{} players needed.**\".format(\n self.message.author.display_name, self.name, self.num)\n await client.say(self.message, m)", "def get_leader(model, application_name):\n command = ['juju', 'run', '--format=yaml',\n '--model', model,\n '--application', application_name,\n 'is-leader']\n results = yaml.load(subprocess.check_output(command))\n for unit in results:\n if 'True' in unit['Stdout'].strip():\n return unit['UnitID']", "def on_win(data):\n print(str(data))\n update_score_db(data['winner'], data['loser'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def should_keep_running(self):\n return len(self.party.active_users())", "def groupstatus(speaker, action, args, soco_function, use_local_speaker_list):\n\n visible_speakers = False\n invisible_speakers = False\n coordinator = None\n\n for grouped_speaker in speaker.group.members:\n if speaker is grouped_speaker:\n continue\n if grouped_speaker.is_visible:\n visible_speakers = True\n if not grouped_speaker.is_visible:\n invisible_speakers = True\n if grouped_speaker.is_coordinator:\n coordinator = grouped_speaker\n\n logging.info(\n \"Visible = {}, Coordinator = {}, Speakers in Group = {}, Other Visible Speakers = {}, Other Invisible Speakers = {}\".format(\n speaker.is_visible,\n speaker.is_coordinator,\n len(speaker.group.members),\n visible_speakers,\n invisible_speakers,\n )\n )\n\n if len(speaker.group.members) == 1:\n print(\"Standalone\")\n\n if speaker.is_visible and speaker.is_coordinator and invisible_speakers:\n print(\"Paired or bonded, coordinator\")\n\n if not speaker.is_visible:\n print(\n \"Paired or bonded, not coordinator [coordinator = {} @ {}]\".format(\n coordinator.player_name, coordinator.ip_address\n )\n )\n\n if speaker.is_visible and speaker.is_coordinator and visible_speakers:\n print(\"Grouped, coordinator\")\n\n if speaker.is_visible and not speaker.is_coordinator:\n print(\n \"Grouped, not coordinator [coordinator = {} @ {}]\".format(\n coordinator.player_name, coordinator.ip_address\n )\n )\n\n return True", "async def leaderboard(message, client, extra_args):\n\n if not extra_args or not (user_id := utils.from_mention(extra_args[0])):\n user_id = message.author.id\n page_length = controls[\"style\"][\"embeds\"][\"length\"]\n\n @database.query\n def find_people(conn):\n cursor = conn.cursor()\n # the limit forces the leaderboard to match the chosen page length\n cursor.execute(\"SELECT awarder, SUM(operation) AS aggregate FROM funnypts WHERE awardee = ? GROUP BY awarder ORDER BY aggregate DESC LIMIT {0}\".format(\n page_length), (user_id,))\n awarder_lb = cursor.fetchall()\n cursor.execute(\"SELECT awardee, SUM(operation) AS aggregate FROM funnypts WHERE awarder = ? GROUP BY awardee ORDER BY aggregate DESC LIMIT {0}\".format(\n page_length), (user_id,))\n awardee_lb = cursor.fetchall()\n cursor.close()\n conn.close()\n return (awarder_lb, awardee_lb)\n\n boards = find_people()\n entries = []\n for lb in boards:\n for entry in lb:\n entries.append(entry)\n # fill empty entries\n for i in range(len(lb), page_length):\n entries.append((\"--\", \"--\"))\n\n @utils.paginated_embeds\n def populate(embed, entry, position):\n if entry[0] != \"--\":\n username = client.get_user(entry[0]).name.split(\"#\", 1)[0]\n desc = str(entry[1])\n desc += \" POINT AWARDED\" if desc == \"1\" else \" POINTS AWARDED\"\n else:\n username = desc = \"--\"\n position = position % page_length + 1\n embed.add_field(name=\"{0}. {1}\".format(\n position, username), value=desc, inline=False)\n\n page_embeds = populate(\"P1: AWARDED FROM | P2: AWARDED TO\", entries)\n await utils.sauce_pages(page_embeds, message, client)", "def ping_cluster():\n response = requests.get('{}/v1/status/peers'.format(common.URL))\n response.raise_for_status()\n\n # Wait for all 3 agents to join the cluster\n if len(response.json()) == 3:\n return True\n\n return False", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "def start(self, event):\n\t\tself.get_roster()\n\t\tself.send_presence()", "async def leaderboard(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n name = \" \".join(args) if args else None\n msg = await ctx.send(\"Select an emoji to load a leaderboard.\")\n for emoji in lb_helpers.EMOJI.values():\n await msg.add_reaction(emoji)\n\n while True:\n reaction, user = await self.bot.wait_for('reaction_add')\n if user == ctx.author and reaction.message.id == msg.id:\n for key in lb_helpers.EMOJI.keys():\n if str(reaction.emoji) == lb_helpers.EMOJI[key]:\n await msg.edit(content=None)\n await msg.edit(content=lb_helpers.get_leaderboard(key, name))", "def main(self):\n \n print(\"Welcome to Speedy Gonzales!\")\n print(\"Press 'a' to move your left leg, 'k' to move your right leg.\")\n print(\"Ready?\")\n \n start = time.time()\n self.score = 0\n self.previous_key = None\n \n def on_key_release(key):\n try:\n if key == keyboard_0.Key.esc:\n print(\"Good Bye!\")\n listener.stop()\n exit()\n \n if key.char == \"a\" or key.char=='k':\n msg = \" . \" if key.char=='a' else \" .\"\n print(msg)\n # Main loop to run the program.\n if key.char != self.previous_key:\n self.score = self.score + 1\n self.previous_key = key.char\n else:\n # Hit 'a' or 'k' two times in a row, fell for 2s.\n self.previous_key = None\n print(\"Ops! You fell down on your face!\")\n time.sleep(1)\n print(\"Stand up!\")\n time.sleep(1)\n print(\"Continue running!\")\n \n # Finish running.\n if self.score >= 40:\n listener.stop()\n end = time.time()\n score = round(end - start, 2)\n print(\"Congrats! You have run 100m in {}!\".format(score))\n \n print(\"Check the previous winner...\")\n self.print_leaderboard() \n \n score_flag = self.get_highscore(score)\n if score_flag == 0:\n # Not in leaderboard.\n print(\"Thanks for playing.\")\n else:\n if score_flag == 1:\n # In top 10 leaderboard.\n winner_msg = \"\\n\\nYou have achieved top 10! \"\n else:\n # Get champion.\n winner_msg = \"\\n\\nYou are the champion! \"\n \n # Get winner's name and update leaderboard.\n name = input(winner_msg + \"Please enter your name:\" )\n self.update_leaderboard(name, score)\n \n print(\"Lets check the new leaderboard.... \\n\\n\\n\\n\\n\")\n time.sleep(1)\n self.print_leaderboard() \n \n \n # Ask for new game.\n in_flag = input(\"New game? (Y/N)\")\n if in_flag.upper() == 'Y':\n self.main()\n else:\n print(\"Good Bye!\")\n exit()\n \n except AttributeError:\n print(key)\n\n with keyboard_0.Listener(on_release = on_key_release, suppress=True) as listener:\n listener.join()", "def talk(self):\r\n if self.conversation is not None:\r\n print(\"[\" + self.name + \" says]: \" + self.conversation)\r\n else:\r\n print(self.name + \" doesn't want to talk to you\")", "def check(self):\n\n print('Requester object is active: \\t', str(self.is_active))\n print('Number of requests sent: \\t', str(self.n_requests))\n print('Requester opened: \\t\\t', str(self.st_time))\n print('Requester closed: \\t\\t', str(self.en_time))", "def is_alive(self):\n return True", "def rendezvous(leader_action=None):\n worker=current_worker()\n pool=current_worker_pool()\n if (not is_worker_thread()) or (not worker) or (not pool) or (not worker.isopen) or (not pool.isopen) or (worker.index<0):\n logging.warning(\"rendezvous can only be called from activated worker thread\")\n return\n pool.barrier.wait()\n # Remember, all workers in same worker pool are exceuting same code, that means\n # there is no any difference to let anyone to do the action, we just need to guarantee\n # the action is executed only once.\n if leader_action and worker.index==0:\n leader_action()\n pool.barrier.wait()", "def _is_authored_by_target(self, speech):\n speaker = speech.find(\"p\", klasse=\"redner\").redner.find(\"name\")\n\n if not speaker.vorname or not speaker.nachname:\n return False\n\n first_name = str(speaker.vorname.string)\n last_name = str(speaker.nachname.string)\n\n return first_name == self.first_name and last_name == self.last_name", "def alive(self):\n return True", "def is_user_player(self, user):\n return self.user == user", "def is_alive(self):\n pass", "def print_leaderboard(self):\n \n leaderboard = pandas.DataFrame(self.history_score.items(), columns=[\"Name\", \"Score\"])\n leaderboard.index += 1\n \n print(leaderboard)", "def handle_enter_room_session(self, lobby_command, client_socket):\n words = lobby_command.split()\n sent_name = words[1]\n user = self.clients[client_socket]['data'].decode('utf-8')\n for room in self.rooms:\n if room.name == sent_name and user in room.room_attrbts['members']:\n room.room_attrbts['active'].add(user)\n msg = f'User {user} is a member of room {sent_name}. Entering user into active mode for this room. ACTIVE'\n print(msg)\n return\n msg = f'Room {sent_name} not found or user {user} is not yet a member. NONACTIVE'\n self.log_and_send(client_socket, msg)\n return", "def checkForOnes(self, playersView: Player):\n # TODO checkForOnes not implemented\n raise NotImplementedError()", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def follow_people(self):\n #finds the person of interest's coordinates and then converts them to Edwin coordinates\n if (self.person is not None) and (self.person.acknowledged == True):\n trans = self.kinect_to_edwin_transform([self.person.X, self.person.Y, self.person.Z])\n if trans is not None:\n xcoord, ycoord, zcoord = self.edwin_transform(trans)\n\n #the person's coordinates are updated here, edwin's coordinates are updated in the callback\n self.coordx = xcoord\n self.coordy = ycoord\n self.coordz = zcoord\n\n #after coordinates are calculated, checks if the person has moved enough to respond, and then responds\n if abs(self.coordx - self.edwinx) > 400 or abs(self.coordy - self.edwiny) > 400 or abs(self.coordz - self.edwinz) > 400:\n msg = \"move_to:: \" + str(self.coordx) + \", \" + str(self.coordy) + \", \" + str(self.coordz) + \", \" + str(11)\n self.request_cmd(msg)", "def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)", "def isalive():\n return 'alive'", "async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950", "async def check_in_game(user_id, ctx): # this is meant for when it is accessed by commands outside of BlackJack.\n check = ex.first_result(await ex.conn.fetchrow(\"SELECT COUNT(*) From blackjack.games WHERE player1 = $1 OR player2 = $1\", user_id))\n if check:\n await ctx.send(f\"> **{ctx.author}, you are already in a pending/active game. Please type {await ex.get_server_prefix_by_context(ctx)}endgame.**\")\n return True", "def check_status(board, player_mark, turn_counter):" ]
[ "0.7640331", "0.74470776", "0.74470776", "0.74285924", "0.713586", "0.6762263", "0.6751401", "0.6734137", "0.6681536", "0.65391123", "0.64464325", "0.629198", "0.6276058", "0.62618124", "0.61972916", "0.61910975", "0.6164121", "0.59911203", "0.5944456", "0.5918337", "0.5872921", "0.5842132", "0.58410966", "0.5818992", "0.5804581", "0.57784945", "0.5749074", "0.5745227", "0.5738587", "0.57067996", "0.5694297", "0.56939715", "0.5667607", "0.5650556", "0.5602417", "0.5588946", "0.55753547", "0.5561537", "0.55334467", "0.5514116", "0.54782414", "0.5456713", "0.54536694", "0.54268306", "0.5415596", "0.541181", "0.54052377", "0.54039764", "0.5400567", "0.5384453", "0.53781384", "0.53718305", "0.53681624", "0.5366506", "0.53501743", "0.5347211", "0.53470343", "0.53425115", "0.53339154", "0.5329599", "0.532821", "0.531901", "0.53140354", "0.53090584", "0.53078026", "0.5305714", "0.5287466", "0.5282524", "0.5282524", "0.52789426", "0.52693635", "0.5268544", "0.5263172", "0.52421206", "0.52380353", "0.52349174", "0.52317613", "0.52260435", "0.5222326", "0.52177244", "0.52171296", "0.521144", "0.5205691", "0.5204024", "0.51977426", "0.5196332", "0.519368", "0.5193643", "0.51890266", "0.51887655", "0.5186601", "0.51843584", "0.5179191", "0.51695895", "0.51666903", "0.5165575", "0.51536995", "0.5151768", "0.5148", "0.51447153" ]
0.78123844
0
detect hosts for the active failover
def active_failover_detect_hosts(self): self.check_that_instance_is_alive() # this is the way to detect the master starter... lfs = self.get_log_file() if lfs.find("Just became master") >= 0: self.is_master = True else: self.is_master = False regx = re.compile(r"Starting resilientsingle on port (\d*) .*") match = regx.search(lfs) if match is None: raise Exception(timestamp() + "Unable to get my host state! " + self.basedir + " - " + lfs) self.frontend_port = match.groups()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sniff_hosts(self):\n previous_sniff = self.last_sniff\n hosts = []\n try:\n # reset last_sniff timestamp\n self.last_sniff = time.time()\n try:\n hosts = self.get_es_node_addresses()\n except Exception:\n raise TransportError(\"N/A\", \"Unable to sniff hosts.\" + traceback.format_exc())\n except:\n # keep the previous value on error\n self.last_sniff = previous_sniff\n raise\n\n # we weren't able to get any nodes, maybe using an incompatible\n # transport_schema or host_info_callback blocked all - raise error.\n if not hosts:\n raise TransportError(\"N/A\", \"Unable to sniff hosts - no viable hosts found.\" + traceback.format_exc())\n\n self.set_connections(hosts)", "def all_hosts(self):\n ...", "def getHosts(self):\n raise \"not implemented\"", "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def get_hosts(self, target, listener_type):", "def select_active_hosts():\n return IMPL.select_active_hosts()", "def get_hosts_retry(self, target, listener_type):", "def _get_hosts_from_state(state):\n active_nodes = set()\n for shard, shard_data in state.get('shards', {}).items():\n replicas = shard_data['replicas']\n for replica, replica_data in replicas.items():\n if replica_data['state'] == 'active':\n active_nodes.add(replica_data['base_url'])\n\n return active_nodes", "def get_hosts(self):\n\n raise NotImplementedError", "def check_hosts(zk,host_name,task,scheduler_log):\n\n #scheduler_log.debug(\"Scheduler Working...!!!\")\n try:\n #Leader Election\n leader = leaderCheck(zk=zk)\n #scheduler_log.debug(\"Leader Election Over\")\n #Update alive status to zookeeper - seems unnecessary\n imalive(zk=zk)\n #scheduler_log.debug(\"Alive Status Updated\")\n\n #If current Host is the Leader perform Scheduled Checks \n if (leader == host_name):\n scheduler_log.debug(\"%s : I am the Leader\"%host_name)\n\n #Fetch List of Hosts - From API\n host_dict = list_hosts(nova)\n allhosts = host_dict['all_list']\n api_down_nodes = host_dict['down_list']\n dishosts = host_dict['disabled_list']\n\n zk_all = zk.get_children(\"/openstack_ha/hosts/all\")\n zk_alive = zk.get_children(\"/openstack_ha/hosts/alive\")\n \n #Fetch Down nodes that are already Handeled - From Zookeeper\n zk_down = zk.get_children(\"/openstack_ha/hosts/down\")\n\n #Fetch nodes that are down and not already handled - From Zookeeper\n calculated_down_nodes = list(set(zk_all) - set(zk_alive))\n\n #Find Nodes Where Scheduler Only failed\n scheduler_down = list(set(calculated_down_nodes).difference(set(api_down_nodes)))\n for node in scheduler_down:\n scheduler_log.debug(\"HA Scheduler Failed on Node : %s \"%node)\n \n #Find Nodes Where API Only failed \n api_down = list(set(api_down_nodes).difference(set(calculated_down_nodes)))\n for node in api_down:\n scheduler_log.debug(\"API Failed on Node : %s \"%node)\n if node not in zk_all:\n scheduler_log.debug(\"HA Scheduler not even initialized %s\"%node)\n\n #Find nodes where both API and Zookeeper are failed \n api_scheduler_down = list(set(api_down_nodes).intersection(set(calculated_down_nodes)))\n\n # Possible Host states - Api only failure | Complete Host Failure ( Not yet Handled | Handling | Handled )\n if(len(api_scheduler_down))==0:\n scheduler_log.debug(\"Hosts working Normally....!!!\")\n else:\n scheduler_log.warning(\"More likely Disaster\")\n #skip if maintance\n # Here check the host in api_down_nodes(api) are present in calculated_down_nodes\n #if present start the instance migrations\n # Checking whether Cluster is Still under HA Policy\n # high availabity contiditions\n if len(api_scheduler_down) <= len(allhosts) - 1:\n scheduler_log.warn(\"Seems like Manageble Disaster\")\n for host in api_scheduler_down:\n scheduler_log.warning(\"Both Api and HA scheduler on\" +host+\" are down\")\n #checks whether down host from api is un handled(not present in down node calculate from zookeeper )\n #(host in zk_all and host not in zk_alive) == calculated_down_nodes\n if host in zk_down:\n #Node will present in zk_down only when all of it's instances are migrated\n scheduler_log.debug(\"Host %s Already handled...!!!!!\"%host)\n else:\n #Node down on api,zk and ( not handled | handling )\n if host not in dishosts:\n #Node Not disabled | disabled reason is not skippable\n scheduler_log.debug(host+\" is not disabled or reason is not maintenance\")\n if(zk.exists(\"/openstack_ha/hosts/time_out/\"+host)==None):\n scheduler_log.debug(\"Inside Time out Node Creation\")\n \n #adding host down time\n host_down_time = time.time()\n host_down_time = str.encode(str(host_down_time))\n scheduler_log.debug(host_down_time)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host, host_down_time)\n \n #adding time_suffix for json_dump file name\n temp_time=time.localtime(time.time()) \n time_suffix=str(temp_time.tm_mday)+\"_\"+str(temp_time.tm_mon)+\"_\"+\\\n str(temp_time.tm_year)+\"_\"+str(temp_time.tm_hour)+\"_\"+\\\n str(temp_time.tm_min)\n enc_time_suffix=str.encode(time_suffix)\n scheduler_log.debug(time_suffix)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\",enc_time_suffix)\n\n # call notification_mail(subj,msg) | Adding Down Node details to Notification \n try:\n subject = \"DGP Office VDI Node Down: %s\"%host\n message = \"Please Check the Network Connectivity and Powersupply as soon as possible\"\n notification_mail(subject,message,to_email=['naanalteam@naanal.in'])\n\n message = \"Please Contact System Administrator\"\n notification_mail(subject,message)\n scheduler_log.debug(\"mail in Scheduler...!\")\n except Exception as e:\n scheduler_log.debug(e)\n scheduler_log.debug(\"Error....! mail scheduler..!\")\n\n # add ping test\n ping_status=ping_check(host)\n if(ping_status):\n scheduler_log.debug(\"Not a Disaster\")\n scheduler_log.debug(\"ping test success....!!! Node is alive... Please Check the APIs,HA Scheduler and other Openstack Services\")\n\n else:\n scheduler_log.warning(\"Ping test also Failed on \"+host+\" proceed with migration\")\n if (zk.exists(\"/openstack_ha/hosts/start_migration/\"+ host)): # it checks the permission from the dashborad\n scheduler_log.warning(\" api down host :\"+host+\"present in zookeeper down_node:\")\n scheduler_log.debug(\"Strart migration....!!!!!\")\n scheduler_log.debug(\"migrating instances from the \"+host)\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode() \n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n #check for time out\n scheduler_log.debug(\"Checking Timeout for Down Node\",host)\n curent_time = time.time()\n if (zk.exists(\"/openstack_ha/hosts/time_out/\"+host)):\n down_host_failuretime = zk.get(\"/openstack_ha/hosts/time_out/\"+host)[0]\n down_host_failuretime = down_host_failuretime.decode(encoding='UTF-8')\n scheduler_log.warning(\"down_host_failuretime\",down_host_failuretime)\n down_host_failuretime = float(down_host_failuretime)\n time_interval = curent_time - down_host_failuretime\n if time_interval>migrate_time:\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode()\n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n scheduler_log.debug(\"Will Wait for another %d\"%(migrate_time-time_interval))\n else:\n scheduler_log.debug(\"%s Node Does'nt have TimeOut Value. Hence will not migrate forever\"%host)\n else:\n scheduler_log.debug(\"Host %s Under Maintenance\"%host)\n \n else:\n scheduler_log.warning(\"Un-Manageble Disaster Too many Nodes are down\")\n else:\n scheduler_log.debug(\"%s : Leader is %s\"%(host_name,leader))\n\n except Exception as e:\n if issubclass(e.__class__,kexception.NoNodeError):\n scheduler_log.exception(\"No node error\")\n elif any(issubclass(e.__class__, lv) for lv in kazoo_exceptions):\n scheduler_log.exception(\"Kazoo Exception.....: \")\n time.sleep(2)\n try:\n zk = KazooClient(hosts='127.0.0.1:2181')\n zk.start() \n Node_creation = createNodeinAll(zk=zk, host_name=host_name)\n election_Node = election_node(zk=zk, host_name=host_name)\n except:\n pass\n else:\n scheduler_log.warning(\"Unhandled Error \")\n scheduler_log.exception(\"\")", "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def getHostInfo():", "def set_hosts(self, hypervisor_per_cluster=False):\n\n self.conf['hosts'] = set()\n\n host_patterns, host_others = self._sift_patterns(\n self.conf.get('hosts_list')\n )\n datacenter_patterns = self.conf.get('datacenter', [])\n cluster_patterns = self.conf.get('cluster', [])\n\n if host_patterns:\n self.conf['host_pattern'] = host_patterns\n\n self.conf['hosts'] = self._get_hypervisors_from_api()\n # Filter all host specified with -H\n host_filtered = set()\n if host_others:\n host_filtered = set([\n (dc, cl, h, is_spm, is_up)\n for dc, cl, h, is_spm, is_up in self.conf['hosts']\n if h in host_others\n ])\n not_found = host_others - set(host[2] for host in host_filtered)\n if not_found != set():\n # try to resolve to ip specified hosts\n for fqdn in set(not_found):\n try:\n ipaddr = socket.gethostbyname(fqdn)\n logging.debug('%s --> %s' % (fqdn, ipaddr))\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n if h == ipaddr:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(fqdn)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=fqdn,\n )\n )\n if not_found != set():\n # try to resolve to ip known hypervisors\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n try:\n ipaddr = socket.gethostbyname(h)\n logging.debug('%s --> %s' % (h, ipaddr))\n if ipaddr in host_others:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(ipaddr)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=h,\n )\n )\n if not_found != set():\n logging.error(\n _(\n 'The following host are not listed as hypervisors: '\n '{not_listed}. Known hypervisors can be listed using '\n 'the list command'\n ).format(\n not_listed=','.join(not_found)\n )\n )\n sys.exit(ExitCodes.CRITICAL)\n\n orig_hosts = self.conf['hosts'].copy()\n\n if host_patterns:\n for pattern in host_patterns:\n host_filtered |= self._filter_hosts('host', pattern)\n if host_patterns or host_others:\n self.conf['hosts'] &= host_filtered\n\n # Intersect with hosts belonging to the data centers specified with -d\n if datacenter_patterns:\n datacenter_filtered = set()\n for pattern in datacenter_patterns:\n datacenter_filtered |= self._filter_hosts(\n 'datacenter', pattern\n )\n self.conf['hosts'] &= datacenter_filtered\n\n # Intersect with hosts belonging to the clusters specified with -c\n if cluster_patterns:\n # remove all hosts that don't match the patterns\n cluster_filtered = set()\n for pattern in cluster_patterns:\n cluster_filtered |= self._filter_hosts('cluster', pattern)\n self.conf['hosts'] &= cluster_filtered\n\n # If hypervisor_per_cluster is set, collect data only from a single\n # hypervisor per cluster; if the Spm found, collect data from it.\n if hypervisor_per_cluster:\n selected_hosts = dict()\n for dc, cluster, host, is_spm, is_up in self.conf['hosts']:\n # Always add the SPM\n if is_spm:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # For the given cluster, if no host added yet, add it\n elif cluster.name not in selected_hosts:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # If a host is up and the SPM isn't added yet, add this host\n elif is_up and not selected_hosts[cluster.name][3]:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n self.conf['hosts'] &= set(selected_hosts.values())\n\n # warn users if they are going to collect logs from all hosts.\n if orig_hosts and self.conf['hosts'] == orig_hosts:\n logging.warning(\n _(\n 'This ovirt-log-collector call will collect logs from '\n 'all available hosts. This may take long time, '\n 'depending on the size of your deployment'\n )\n )\n\n return bool(self.conf.get('hosts'))", "def get_upgradable_hosts(dbapi):\n all_hosts = dbapi.ihost_get_list()\n # TODO:(mingyuan) Exclude edgeworker host from upgradable hosts\n # until the final phase of the edgeworker feature completed\n hosts = [i for i in all_hosts if i.personality != constants.EDGEWORKER]\n\n return hosts", "def check_all_hosts (self, repo_version_id, version_name):\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query1 = \"SELECT chm.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}';\".format(self.cluster_name)\n else:\n query1 = \"SELECT h.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}' JOIN hosts h ON chm.host_id = h.host_id;\".format(self.cluster_name)\n\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query2 = \"SELECT hv.host_name, hv.state FROM host_version hv WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n else:\n #query2 = \"SELECT hv.state,h.host_name FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n query2 = \"SELECT hv.state,h.host_name, hs.health_status,hs.agent_version,(h.total_mem/1024/1024) as total_mem_gb,(hs.available_mem/1024/1024) as available_mem_gb FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id JOIN hoststate hs ON h.host_id = hs.host_id WHERE hv.repo_version_id = {0} order by h.host_name;\".format(repo_version_id)\n # All cluster hosts\n host_names = set()\n self.cursor.execute(query1)\n rows = self.cursor.fetchall()\n if self.options.verbose:\n Logger.debug(query1 + \"\\n\")\n if rows and len(rows) > 0:\n host_names = set([row[0] for row in rows if len(row) == 1])\n Logger.debug(\"Hosts: {0}\".format(\", \".join(host_names)))\n\n host_name_to_state = {} # keys should be a subset of host_names\n hosts_with_repo_version_state_not_in_current = set()\n self.cursor.execute(query2 + \"\\n\")\n rows = self.cursor.fetchall()\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST(S) STATE\\t\")\n Logger.info(\"******************************************************************************************************************************************************\\n\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n Logger.info(\"State\\t\\tHostname\\t\\t\\t\\tHealth\\t\\tAgentVersion\\tTotalMemory\\tAvailableMemory\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n\n if rows and len(rows) > 0:\n for row in range(len(rows)):\n data = json.loads(rows[row][2])\n data1 = json.loads(rows[row][3])\n Logger.info(\"{0}\\t\\t{1}\\t\\t{2}\\t\\t{3}\\t\\t{4}\\t\\t{5}\".format(rows[row][0], rows[row][1], data[\"healthStatus\"], data1[\"version\"], rows[row][4], rows[row][5]))\n print (\"\\n\")\n Logger.debug(query2)\n if rows and len(rows) > 0:\n for row in rows:\n if len(row) == 6:\n host_name = row[1]\n state = row[0]\n host_name_to_state[host_name] = state\n if state.upper() != \"CURRENT\":\n hosts_with_repo_version_state_not_in_current.add(host_name)\n host_names_with_version = set(host_name_to_state.keys())\n host_names_without_version = host_names - host_names_with_version\n # Logger.info(\"\\t\\tHost(s) state Summary\")\n if len(host_names) > 0:\n if len(host_names_without_version) > 0:\n Logger.error(\"{0} host(s) do not have a Host Version for Repo Version {1}.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(host_names_without_version), version_name, \", \".join(host_names_without_version)))\n\n if len(hosts_with_repo_version_state_not_in_current) > 0:\n Logger.error(\"{0} host(s) have a Host Version for Repo Version {1} but the state is not CURRENT.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(hosts_with_repo_version_state_not_in_current), version_name, \", \".join(hosts_with_repo_version_state_not_in_current)))\n\n if len(host_names_without_version) == 0 and len(hosts_with_repo_version_state_not_in_current) == 0:\n Logger.info(\"Found {0} host(s) in the cluster, and all have a Host Version of CURRENT for \" \\\n \"Repo Version {1}. Things look good.\\n\".format(len(host_names), version_name))\n else:\n Logger.error(\"Make sure that all of these hosts are heartbeating, that they have the packages installed, the\\n\" \\\n \"hdp-select symlinks are correct, and that the services on these hosts have been restarated.\\n\")\n pass", "def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]", "def _get_active_hosts(self, object):\n\t\t## First, generate the negation list\n\t\tnegate_hosts = []\n\n\t\t## Hostgroups\n\t\tif object.has_key(\"hostgroup_name\"):\n\n\t\t\tfor hostgroup_name in self._get_list(object, 'hostgroup_name'):\n\t\t\t\tif hostgroup_name[0] == \"!\":\n\t\t\t\t\thostgroup_obj = self.get_hostgroup(hostgroup_name[1:])\n\t\t\t\t\tnegate_hosts.extend(self._get_list(hostgroup_obj,'members'))\n\n\t\t## Host Names\n\t\tif object.has_key(\"host_name\"):\n\t\t\tfor host_name in self._get_list(object, 'host_name'):\n\t\t\t\tif host_name[0] == \"!\":\n\t\t\t\t\tnegate_hosts.append(host_name[1:])\n\n\n\t\t## Now get hosts that are actually listed\n\t\tactive_hosts = []\n\n\t\t## Hostgroups\n\t\tif object.has_key(\"hostgroup_name\"):\n\n\t\t\tfor hostgroup_name in self._get_list(object, 'hostgroup_name'):\n\t\t\t\tif hostgroup_name[0] != \"!\":\n\t\t\t\t\tactive_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name),'members'))\n\n\t\t## Host Names\n\t\tif object.has_key(\"host_name\"):\n\t\t\tfor host_name in self._get_list(object, 'host_name'):\n\t\t\t\tif host_name[0] != \"!\":\n\t\t\t\t\tactive_hosts.append(host_name)\n\n\t\t## Combine the lists\n\t\treturn_hosts = []\n\t\tfor active_host in active_hosts:\n\t\t\tif active_host not in negate_hosts:\n\t\t\t\treturn_hosts.append(active_host)\n\n\t\treturn return_hosts", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def default_node_detector():\n ret = []\n try:\n hostname = socket.gethostname()\n ret.append(hostname)\n except socket.error:\n pass\n\n try:\n fqdn = socket.getfqdn()\n if fqdn not in ret:\n ret.append(fqdn)\n except socket.error:\n pass\n\n if any(ret):\n return ret\n else:\n return None", "def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def enforce_hostnames(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def all_hosts(*args, **kwargs):\n return True", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "def verify_lag_host_connectivity(self):\n # Find all LACP hosts\n for lacp_id, host_options in self.host_options.items():\n if 'lacp' in host_options:\n # Found LACP host\n for dst_id in self.host_information:\n if lacp_id == dst_id:\n continue\n # Test connectivity to any other host (might be another LAG host)\n self.check_host_connectivity_by_id(lacp_id, dst_id)", "def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]", "def test_hostmgr_failover(self, failure_tester):\n hosts1 = self._get_hosts(failure_tester)\n\n leader1 = failure_tester.fw.get_leader_info(failure_tester.hostmgr)\n assert leader1\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n failure_tester.wait_for_leader_change(failure_tester.hostmgr, leader1)\n failure_tester.reset_client()\n\n # verify that we can query the new leader\n def check_hosts():\n hosts2 = self._get_hosts(failure_tester)\n return len(hosts1) == len(hosts2)\n\n failure_tester.wait_for_condition(check_hosts)", "def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)", "def mon_hosts(self, mon_ips):\n hosts = []\n for ceph_addrs in mon_ips:\n # NOTE(jamespage): This looks odd but deals with\n # use with ceph-proxy which\n # presents all monitors in\n # a single space delimited field.\n for addr in ceph_addrs.split(' '):\n hosts.append(ch_ip.format_ipv6_addr(addr) or addr)\n hosts.sort()\n return hosts", "def include_up_hosts(nmap_host):\n if nmap_host.status == 'up':\n return True\n return False", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host", "async def establish_hosts(self):\n scheme = self._config['scheme']\n hosts = self._config['hosts']\n port = self._config['port']\n for hostname in hosts:\n url = '{}://{}:{}/gremlin'.format(scheme, hostname, port)\n host = await driver.GremlinServer.open(\n url, self._loop, **dict(self._config))\n self._hosts.append(host)\n self._hostmap[hostname] = host", "def get_hosts(self):\n if self._scanned:\n return self._scanner.all_hosts()\n else:\n raise ScannerError(\"ERROR: A scan has not yet been conducted!\")", "def test_start_vms(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def get_weak_hosts(self):\n weak = []\n try:\n for host in self.get_hosts_only():\n # Checks for the success code against each host in the dictionary\n if self.get_ftp_code(host) == 220:\n weak.append(host)\n except TypeError:\n pass\n return weak", "def hosts(self):\n return tuple(self.hosts_)", "def test_liveness_multihost(self):\n with DockerHost('host1',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host1, \\\n DockerHost('host2',\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS) as host2:\n retry_until_success(host1.assert_is_live, retries=30)\n retry_until_success(host2.assert_is_live, retries=30)", "def hostname(ctx):\n ctl = ctx.ctl\n\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n if len(jobs) == 0:\n click.echo('No jobs running', err=True)\n sys.exit(1)\n\n for job in jobs:\n host = ctl('get-host', '--jobid', job['id']).get('host')\n click.echo(host)\n\n return 0", "def nslookup(self):\n if len(self.hostnames) == 0:\n st, out = commands.getstatusoutput('get_instance_by_service %s' % self.bns)\n assert st == 0, \"Failure:'get_instance_by_service %s', errno=%d\" % (self.bns, st)\n self.hostnames = out.split('\\n')\n assert self.hostnames, 'No hosts found for bns: \"%s\"' % self.bns", "def select_inactive_hosts():\n return IMPL.select_inactive_hosts()", "def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def get_hosts_fanout(self, target, listener_type):", "def _display_hosts(self):\n if (\n self.check_valid_result_data(\"hosts\", silent=True)\n and self._last_result.hosts\n ):\n nb_markdown(f\"Hosts connecting to {self.url}\", \"bold\")\n display(self._last_result.hosts)\n else:\n nb_markdown(f\"No hosts found connecting to {self.url}\")", "def get_inv_hosts(self, host_pattern):\n return self.im.get_hosts(host_pattern)", "def map_int_ext_hosts(self):\n int_hosts = []\n ext_hosts = []\n dp_hosts = {self.dp_name(dp_index): ([], []) for dp_index in range(self.NUM_DPS)}\n for host_id, options in self.host_options.items():\n host = self.host_information[host_id]['host']\n if options.get('loop_protect_external', False):\n ext_hosts.append(host)\n int_or_ext = 1\n else:\n int_hosts.append(host)\n int_or_ext = 0\n for link in self.host_links[host_id]:\n dp_hosts[self.dp_name(link)][int_or_ext].append(host)\n return set(int_hosts), set(ext_hosts), dp_hosts", "def hostnames(self) -> Sequence[str]:\n return pulumi.get(self, \"hostnames\")", "def getHost():", "def getHost():", "def process_host(desired_host):\n \n node_list = []\n host_info_list = [] \n if desired_host == \"all\":\n desired_host_list = getAllMachines()\n else:\n desired_host_list = (subprocess.getoutput(\"qconf -shgrp_resolved \" + '@' + str(desired_host))).split()\n qstat = subprocess.getoutput('qstat -f')\n for host in desired_host_list:\n if qstat.find(host) != (-1):\n #Searches the long string for the index of the occurance of the specified host, then\n #parses it the string for just that one line with the host that we want.\n host_info_list.append((qstat[qstat.find(host):].split('\\n'))[0])\n #Start at with everything at 0, and will count up as encountered.\n total_nodes = 0\n total_cores = 0\n disabled_cores = 0\n used_cores = 0\n free_cores = 0\n empty_nodes = 0\n disabled_nodes = 0\n for host in host_info_list:\n #simply gathering info qstat spat out for us\n temp_node = Node((host.split()[0]))\n cores = host.split()[2].replace('/', ' ').split()\n host_used_cores = cores[1]\n host_total_cores = cores[2]\n if len(host.split()) == 6 and (host.split()[5] == 'd' or host.split()[5] == 'E' or \\\n host.split()[5] == 'au' or host.split()[5] == 'Eau' or host.split()[5] == 'Eqw' \\\n or host.split()[5] == 'adu'):\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n total_cores += int(host_total_cores)\n disabled_nodes += 1\n else: \n temp_node.set_disabled_switch(False)\n used_cores += int(host_used_cores)\n total_cores += int(host_total_cores)\n free_cores += int(host_total_cores) - int(host_used_cores)\n if int(host_used_cores) == 0:\n empty_nodes += 1\n temp_node.set_cores(host_total_cores, host_used_cores)\n total_nodes += 1\n node_list.append(temp_node) \n \n if len(sys.argv) == 3:\n if sys.argv[2] == '--details':\n print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list)\n elif sys.argv[2] == '-v' or sys.argv[2] == '--visual':\n draw_queue(total_nodes, total_cores, used_cores, empty_nodes, desired_host, disabled_cores, \n disabled_nodes, node_list, free_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[2])\n show_usage(23)\n elif sys.argv[1] == \"-qlong\":\n # Returning values from this host group to the qlong function\n return(total_cores, used_cores, total_nodes, empty_nodes, disabled_cores,disabled_nodes, node_list)\n elif len(sys.argv) < 3:\n print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, disabled_cores, \n disabled_nodes)\n else:\n print('Error: Too many args')\n show_usage(23)\n return", "def select_host_states():\n return IMPL.select_host_states()", "def hosts(self):\n hosts = set()\n for p, c in self.configs_:\n hosts.update(c.hosts())\n return tuple(hosts)", "def hosts(self):\n return self._hosts", "def hosts(self):\n return self._hosts", "def node_host_status(self, node):\n if node.is_online() or node.is_unreachable():\n return self.HOST_MONITORED\n else:\n return self.HOST_UNMONITORED", "def online_check():\n try_first_ips = [\n \"216.58.213.238\", # google\n \"8.8.8.8\", # google\n \"8.8.4.4\", # google\n \"46.228.47.115\", # yahoo\n ]\n last_resort_ips = [ # dns root servers\n \"198.41.0.4\",\n \"192.228.79.201\",\n \"192.33.4.12\",\n \"128.8.10.90\",\n \"192.203.230.10\",\n \"192.5.5.241\",\n \"192.112.36.4\",\n \"128.63.2.53\",\n \"192.36.148.17\",\n \"192.58.128.30\",\n \"193.0.14.129\",\n \"198.32.64.12\",\n \"202.12.27.33\"\n ]\n\n iplists = []\n iplists.append(try_first_ips)\n iplists.append(rand_ips(max_num=50))\n iplists.append(last_resort_ips)\n\n return any(can_ping_host(ip) for ip in chain(*iplists))", "def GetSlavesForHost():\n hostname = os.getenv('TESTING_SLAVENAME')\n if not hostname:\n hostname = socket.getfqdn().split('.', 1)[0].lower()\n return [s for s in GetAllSlaves() if s.get('hostname') == hostname]", "def host_discover(self):\n self._scanned = True\n return self._scanner.scan(self._ips, arguments='-sP')", "def __get_rest_hosts(self):\n # Defaults\n host = self.rest_host\n if host is None:\n host = Defaults.rest_host\n\n environment = self.environment\n\n http_max_retry_count = self.http_max_retry_count\n if http_max_retry_count is None:\n http_max_retry_count = Defaults.http_max_retry_count\n\n # Prepend environment\n if environment != 'production':\n host = '%s-%s' % (environment, host)\n\n # Fallback hosts\n fallback_hosts = self.fallback_hosts\n if fallback_hosts is None:\n if host == Defaults.rest_host:\n fallback_hosts = Defaults.fallback_hosts\n elif environment != 'production':\n fallback_hosts = Defaults.get_environment_fallback_hosts(environment)\n else:\n fallback_hosts = []\n\n # Shuffle\n fallback_hosts = list(fallback_hosts)\n random.shuffle(fallback_hosts)\n self.__fallback_hosts = fallback_hosts\n\n # First main host\n hosts = [host] + fallback_hosts\n hosts = hosts[:http_max_retry_count]\n return hosts", "def verify_intervlan_routing(self):\n for src in self.host_information:\n for dst in self.host_information:\n if dst > src:\n self.check_host_connectivity_by_id(src, dst)", "def get_reachable_servers(self) -> List[Server]:\n pass", "def _acquireHosts(self):\n # the tcp socket that receives the ACK\n ackSocket = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM, 0)\n ackSocket.bind(('', Globals.ACK_PORT))\n ackSocket.settimeout(1)\n ackSocket.listen(5)\n \n # UDP BROADCAST\n Globals.Print('Broadcasting for other hosts...')\n self.broadcast('host up')\n \n # WAIT: RESPONSES, TIMEOUT\n self.hosts = []\n while 1:\n try:\n # TCP ACK\n clientsocket, (host, port) = ackSocket.accept()\n value = clientsocket.recv(256)\n if value == 'host up ack':\n self.addHost(host)\n clientsocket.close()\n clientsocket = None\n except:\n break\n \n ackSocket.close()", "def get_hosts_fanout_retry(self, target, listener_type):", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def __getLocalAndRemoteMachineNames(self):\n hostNameMapping = {}\n ## collect the qualified hostnames for each remote node\n for nodeId in list(set(self.runInfoDict['Nodes'])):\n hostNameMapping[nodeId.strip()] = socket.gethostbyname(nodeId.strip())\n self.raiseADebug('Host \"'+nodeId.strip()+'\" identified with IP: ', hostNameMapping[nodeId.strip()])\n\n return hostNameMapping", "def beehive_hosts(self):\n run_data = {\n u'tags':[u'hosts']\n } \n self.ansible_playbook(u'beehive', run_data, \n playbook=self.beehive_playbook)", "def test_check_process_servers(self):\n self.cmd._process_servers(TEST_HOSTS, self.cloud_project)\n\n for host_id, test_host in TEST_HOSTS.items():\n host = CloudHost.objects.get(host_id=host_id)\n ips = host.ip_addresses\n self.assertEqual(host.hostname, test_host['hostname'])\n self.assertIn(test_host['tag'], host.tags.names())\n self.assertEqual(self.cloud_provider, host.cloudprovider)\n for ip in test_host['ips']:\n self.assertIn(ip, list(ips))\n self.assertEqual(host.hypervisor.hostname, test_host['hypervisor'])\n\n # check the creation date only for new hosts\n if host_id.find('_os_') != -1:\n self.assertEqual(\n datetime.strptime(\n test_host['created'],\n self.cmd.DATETIME_FORMAT\n ),\n host.created,\n )", "def hosts(self) -> t.List[str]:\n if not self._hosts:\n self._hosts = self._get_db_hosts()\n return self._hosts", "def test_multiple_hosts(self):\n config = {\n 'example.com': {\n 'applications': {\n 'site-hybridcluster': {\n 'image': 'unknown',\n },\n },\n 'version': 1,\n },\n 'example.net': {\n 'applications': {\n 'mysql-hybridcluster': {\n 'image': 'unknown',\n }\n },\n 'version': 1,\n },\n }\n expected = Deployment(nodes=frozenset([\n Node(hostname='example.com', applications=frozenset([\n Application(\n name='site-hybridcluster',\n image=DockerImage.from_string('unknown'),\n ports=frozenset(),\n )])),\n Node(hostname='example.net', applications=frozenset([\n Application(\n name='mysql-hybridcluster',\n image=DockerImage.from_string('unknown'),\n )]))]))\n self.assertEqual(expected,\n current_from_configuration(config))", "def get_info_hosts():\n print(\"\\nMapeando...\")\n host_ip = socket.gethostbyname(socket.gethostname()).split('.')\n base_ip = \".\".join(host_ip[0:3]) + '.'\n host_validos = []\n return_codes = dict()\n for i in range(1, 255):\n return_codes[base_ip + str(i)] = retorna_codigo_ping(base_ip + str(i))\n if i %20 == 0:\n print(\".\", end = \"\")\n if return_codes[base_ip + str(i)] == 0:\n host_validos.append(base_ip + str(i))\n print(\"\\nMapeamento completo, informações sobre portas enviadas...\")\n \n return host_validos", "def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def local_bind_hosts(self):\n self._check_is_started()\n return [_server.local_host for _server in self._server_list if\n _server.local_host is not None]", "def test_get_host(self):\n pass", "def get_hosts_info(self):\n result = []\n index = 0\n while index < self.host_numbers:\n host = self.get_generic_host_entry(index)\n result.append({\n 'ip': host['NewIPAddress'],\n 'name': host['NewHostName'],\n 'mac': host['NewMACAddress'],\n 'status': host['NewActive']})\n index += 1\n return result", "def check_host_activation(cls, ksm_merge_across_nodes):\n testflow.step(\"Deactivate the host %s\", sla_conf.HOSTS[0])\n assert ll_hosts.deactivate_host(\n positive=True,\n host=sla_conf.HOSTS[0],\n host_resource=sla_conf.VDS_HOSTS[0]\n )\n cls.update_merge_across_nodes_parameter(\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )\n assert ll_hosts.activate_host(\n positive=True,\n host=sla_conf.HOSTS[0],\n host_resource=sla_conf.VDS_HOSTS[0]\n )\n testflow.step(\n \"%s: wait until KSM merge across nodes will be equal to %s\",\n sla_conf.VDS_HOSTS[0], ksm_merge_across_nodes\n )\n assert sla_helpers.wait_for_numa_aware_ksm_status(\n resource=sla_conf.VDS_HOSTS[0],\n expected_value=ksm_merge_across_nodes\n )", "def checkwsrep(sqlhost):\n fnull = open(os.devnull, 'wb')\n ping = subprocess.Popen([\"/bin/ping\", \"-w2\", \"-c2\", sqlhost],\n stdout=fnull, stderr=subprocess.STDOUT)\n _, __ = ping.communicate()\n retcode = ping.poll()\n fnull.close()\n if retcode == 0:\n print \"\\nChecking if {} belongs to cluster ...\".format(sqlhost)\n cnx_sqlhost = None\n wsrep_status = 0\n try:\n cnx_sqlhost = MySQLdb.connect(\n user='sstuser',\n passwd=CREDENTIALS[\"sstuser\"],\n unix_socket='/var/lib/mysql/mysql.sock',\n host=sqlhost\n )\n cursor = cnx_sqlhost.cursor()\n wsrep_status = cursor.execute(\"\"\"show variables LIKE 'wsrep_on'\"\"\")\n except Exception:\n pass\n finally:\n if cnx_sqlhost:\n cnx_sqlhost.close()\n if wsrep_status == 1:\n LASTCHECK_NODES.append(sqlhost)\n print \"{}{} belongs to cluster{}\".format(GREEN, sqlhost, WHITE)\n else:\n print \"{}Skipping {}: it is not in the cluster{}\".format(\n YELLOW, sqlhost, WHITE)", "def check_ping(self):\n # Print ping status of all of your hosts, minimum padding of 8 spaces\n padding_size = max(len(max(self.hosts, key=len)) + 4, 8)\n print('{:{padding_size}}{}'.format('Host', 'Status', padding_size=padding_size))\n for host in self.hosts:\n # Get output of ping command\n output = str(Popen('ping -n 1 {}'.format(host), stdout=PIPE).communicate()[0])\n\n result = '{:{padding_size}}'.format(host, padding_size=padding_size)\n if 'unreachable' in output:\n result = result + 'Offline - unreachable'\n self.offline_hosts.append(host)\n elif 'could not find' in output:\n result = result + 'Offline - could not find'\n self.offline_hosts.append(host)\n elif 'transmit failed' in output:\n result = result + 'Offline - transmit failed'\n self.offline_hosts.append(host)\n elif 'timed out' in output:\n result = result + 'Offline - timed out'\n self.offline_hosts.append(host)\n else:\n result = result + 'Online'\n print(result)\n print()", "def add_hosts_all_subnets(client, parsed_args):\n for host in client.hosts():\n if host.category == \"linux\":\n if host.server_id:\n EXISTING.append(host)\n if host.server_id is None:\n create_agentless(host, client, parsed_args)", "def _mapped_to_this_conductor(self, node_uuid, driver):\n try:\n ring = self.ring_manager[driver]\n except exception.DriverNotFound:\n return False\n\n return self.host in ring.get_hosts(node_uuid)", "def get_hosts(self):\n\n return sorted(self.host_data.keys())", "def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"hostnames\")", "def can_failover(self):\n return self._can_failover", "def show_hosts():\n valid_hosts = (subprocess.getoutput(\"qconf -shgrpl\").split())\n for host in valid_hosts:\n print(host)\n sys.exit()", "def select_host_ids():\n return IMPL.select_host_ids()", "def get_migrating_vms_to_host(self, node_id):\n result = []\n for server_id in self.__migrating_tasks.keys():\n if self.__migrating_tasks[server_id] == node_id:\n result.append(server_id)\n return result", "def get_export_hosts(self, host_details):\n ipv4_hosts = list()\n ipv6_hosts = list()\n fqdn_hosts = list()\n\n for host in host_details:\n version = check_ipv4_ipv6_fqdn(host)\n if version == 4:\n ipv4_hosts.append(self.get_ipv4_host(host))\n elif version == 6:\n ipv6_hosts.append(self.get_ipv6_host(host))\n else:\n fqdn_hosts.append(host)\n\n return ipv4_hosts, ipv6_hosts, fqdn_hosts", "def _get_hosts_with_container(self, context, cluster):\n pass", "def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(6, len(hosts))", "def test_get_mon_hosts(self, get_public_addr):\n unit_addrs = {\n 'mon:0': {\n 'ceph-mon/0': '172.16.0.2',\n 'ceph-mon/1': '172.16.0.3',\n },\n 'bootstrap-source:1': {\n 'ceph/0': '172.16.10.2',\n 'ceph/1': '172.16.10.3',\n 'cehp/2': '172.16.10.4',\n }\n }\n\n def rel_ids_side_effect(relname):\n for key in unit_addrs.keys():\n if key.split(':')[0] == relname:\n return [key]\n return None\n\n def rel_get_side_effect(attr, unit, relid):\n return unit_addrs[relid][unit]\n\n def rel_units_side_effect(relid):\n if relid in unit_addrs:\n return unit_addrs[relid].keys()\n return []\n\n self.relation_ids.side_effect = rel_ids_side_effect\n self.related_units.side_effect = rel_units_side_effect\n get_public_addr.return_value = '172.16.0.4'\n self.relation_get.side_effect = rel_get_side_effect\n hosts = ceph_hooks.get_mon_hosts()\n self.assertEqual(hosts, [\n '172.16.0.2', '172.16.0.3', '172.16.0.4',\n '172.16.10.2', '172.16.10.3', '172.16.10.4',\n ])", "def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()", "def verify_all_stack_hosts(self):\n for _ in range(2):\n self.verify_stack_up()\n self.verify_no_cable_errors()\n self.verify_stack_hosts()\n self.verify_traveling_dhcp_mac()\n self.verify_unicast_not_looped()\n self.verify_no_bcast_to_self()\n self.verify_stack_has_no_loop()\n self.flap_all_switch_ports()", "def get_all_hosts(self, tag='agent'):\n # services = self.consul.catalog.service('docker')\n services = self.healthy_services.get_healthy_nodes()\n if not services:\n message = \"No services are registered to the consul\"\n raise exception.NoValidHostFound(message)\n\n return services", "def process_hosts(root_bridge_ips: Iterable[str], community: str,\n do_recurse=False, all_ports=False, resolve_hostnames=True) -> \\\n Tuple[Dict[str, Bridge], Dict[str, str], Dict[str, List[str]], Dict[str, str], str]:\n ips_to_visit = set(list(root_bridge_ips))\n visited_chassis_ids = set()\n visited_ips = set()\n\n bridges: Dict[str, Bridge] = {}\n all_bridge_macs = set()\n arp = {}\n\n walk = partial(snmp_walk, community=community)\n\n while ips_to_visit:\n host = ips_to_visit.pop()\n if host in visited_ips:\n continue\n visited_ips.add(host)\n\n print(\"VISITING\", host, file=sys.stderr)\n\n # Skip if chassis ID not found or has already been seen\n try:\n lldpLocChassisId = walk(host, '1.0.8802.1.1.2.1.3.2', 'hex').values()\n except ConnectionError as e:\n print(str(e) + f\" -- skipping {host}!\", file=sys.stderr)\n continue\n\n if not lldpLocChassisId:\n print(f\"Got no ChassisId from {host} -- missing LLDP support?\", file=sys.stderr)\n continue\n lldpLocChassisId = tuple(lldpLocChassisId)[0]\n if lldpLocChassisId in visited_chassis_ids:\n continue\n visited_chassis_ids.add(lldpLocChassisId)\n\n all_bridge_macs.add(lldpLocChassisId) # chassis id looks like a MAC and some switches use it for all their ports\n\n print(\" - Getting local info...\", file=sys.stderr)\n\n # Check that it's a bridge\n lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n is_bridge = (lldpLocSysCapSupported & 32) != 0\n if not is_bridge:\n print(f\"Host {host} does not announce Bridge type LLDP capability. Skipping.\", file=sys.stderr)\n continue\n\n dot1dTpFdbPort_to_portnum = {int(k): v for (k, v) in walk(host, '1.3.6.1.2.1.17.1.4.1.2', 'int').items()}\n\n # Find local management IP addresses (if supported)\n local_ips = set()\n lldpLocManAddrIfId = walk(host, '1.0.8802.1.1.2.1.3.8.1.5', 'preview') # local man addresses\n for oid, port_id in lldpLocManAddrIfId.items():\n local_ips.add(read_ipv4_from_oid_tail(oid))\n\n lldpLocSysName = walk(host, '1.0.8802.1.1.2.1.3.3')\n lldpLocSysDesc = walk(host, '1.0.8802.1.1.2.1.3.4')\n\n this_bridge = Bridge(\n chassis_id=lldpLocChassisId,\n ip_addresses=list({host} | local_ips),\n name=next(iter(lldpLocSysName.values())),\n desc=next(iter(lldpLocSysDesc.values())) or '',\n neighbors=[],\n ports=defaultdict(lambda: Port(name='', speed=0, remote_macs=[], remote_ips=[], local_mac=None, interlink=False)))\n\n # Find IP addresses to neighbor bridges\n print(\" - Getting neighbors...\", file=sys.stderr)\n lldpRemManAddrTable = walk(host, '1.0.8802.1.1.2.1.4.2.1.4', 'preview')\n for oid, port_id in lldpRemManAddrTable.items():\n time_mark, local_port_num, rem_index, addr_subtype, *rest = split_numbers(oid)\n if addr_subtype == 1: # ipv4\n if do_recurse:\n ips_to_visit.add(read_ipv4_from_oid_tail(oid))\n\n # Port names\n print(\" - Getting ports...\", file=sys.stderr)\n for port, name in walk(host, '1.3.6.1.2.1.31.1.1.1.1', 'any').items(): # ifName\n this_bridge.ports[int(port)].name = name\n # Port speeds\n for port, speed in walk(host, '1.3.6.1.2.1.31.1.1.1.15', 'int').items(): # ifHighSpeed\n this_bridge.ports[int(port)].speed = speed\n # Local port macs\n for port, mac in walk(host, '1.3.6.1.2.1.2.2.1.6', 'hex').items(): # ifPhysAddress\n this_bridge.ports[int(port)].local_mac = mac\n all_bridge_macs.add(mac)\n\n # Read ARP table\n print(\" - Reading device ARP table...\", file=sys.stderr)\n atPhysAddress = walk(host, '1.3.6.1.2.1.3.1.1.2', 'hex')\n for oid, mac in atPhysAddress.items():\n ip = read_ipv4_from_oid_tail(oid, with_len=False)\n arp[ip] = mac\n\n # Map remote (learned) MACs to ports\n print(\" - Getting MACs for ports...\", file=sys.stderr)\n macs_per_port = defaultdict(set)\n ports_per_mac = defaultdict(set)\n dot1qTpFdbPort = walk(host, '1.3.6.1.2.1.17.7.1.2.2.1.2', 'int')\n for k, port_idx in dot1qTpFdbPort.items():\n port = port_idx\n if port_idx in dot1dTpFdbPort_to_portnum:\n port = dot1dTpFdbPort_to_portnum[port_idx]\n parts = split_numbers(k)\n vlan = int(parts[0])\n if port:\n mac = ''.join([('%02x' % x) for x in parts[1:]])\n if mac != '0000000000':\n assert(port in this_bridge.ports)\n if mac not in this_bridge.ports[port].remote_macs:\n this_bridge.ports[port].remote_macs.append(mac)\n macs_per_port[port].add(mac)\n ports_per_mac[mac].add(port)\n\n #lldpRemSysCapSupported = walk(host, '1.0.8802.1.1.2.1.4.1.1.11', 'hex')\n ##lldpLocSysCapSupported = int(tuple(walk(host, '1.0.8802.1.1.2.1.3.5', 'hex').values())[-1], 16)\n ##is_bridge = (lldpLocSysCapSupported & 32) != 0\n #print(lldpRemSysCapSupported, file=sys.stderr)\n\n print(\" - Getting remotes...\", file=sys.stderr)\n lldpRemChassisId = walk(host, '1.0.8802.1.1.2.1.4.1.1.5', 'hex')\n for k, chassis_id in lldpRemChassisId.items():\n time_mark, port, idx = split_numbers(k)\n if chassis_id not in this_bridge.neighbors:\n this_bridge.neighbors.append(chassis_id)\n\n this_bridge.ports = dict(this_bridge.ports)\n bridges[this_bridge.chassis_id] = this_bridge\n\n\n # Just to be sure: lookup MACs for visited bridge IPs\n for ip, mac in arp.items():\n if ip in visited_ips:\n all_bridge_macs.add(mac)\n\n # Reverse ARP table (MAC -> set of IPs)\n rarp = {}\n for k, v in arp.items():\n rarp.setdefault(v, set()).add(k)\n\n # Find hostnames for ip addresses using multiple threads (the query is VERY slow)\n ip_to_hostname = {}\n with PoolExecutor(max_workers=50) as executor:\n ips = []\n for b in bridges.values():\n ips.extend(b.ip_addresses)\n for p in b.ports.values():\n for mac in [*p.remote_macs, p.local_mac, b.chassis_id]:\n ips.extend(rarp.get(mac) or [])\n ips = set(ips)\n\n def fetch_name(ip):\n try:\n return socket.gethostbyaddr(ip)\n except (socket.gaierror, socket.herror):\n return [None, [], [ip]]\n\n if resolve_hostnames:\n print(f\"Resolving hostnames for {len(ips)} IP addresses...\", file=sys.stderr)\n for res in executor.map(fetch_name, ips):\n for ip in res[2]:\n if res[0]:\n ip_to_hostname[ip] = res[0]\n\n # Cleanup and extend some values\n print(\"Cleaning up and extending...\", file=sys.stderr)\n for b in bridges.values():\n print(f\" - Bridge {b.name}...\", file=sys.stderr)\n\n # Replace macs with NeighborInfos in neighbor lists\n print(\" - extending NeighborInfos...\", file=sys.stderr)\n neigh_infos = []\n for chassis_id in b.neighbors:\n ni = NeighborInfo(is_bridge=False, name='', ips=[], macs=[chassis_id], chassis_id=chassis_id)\n b2 = bridges.get(chassis_id)\n if b2:\n ni = NeighborInfo(is_bridge=True, name=b2.name, ips=list(b2.ip_addresses), chassis_id=chassis_id,\n macs=list({chassis_id, *[p.local_mac for p in b2.ports.values()]}))\n\n ni.in_ports = list({k for k,p in b.ports.items() if (set(ni.macs).intersection(set(p.remote_macs)))})\n for ips in ((rarp.get(m) or []) for m in ni.macs):\n ni.ips.extend(ips)\n ni.ips = list(set(ni.ips))\n ni.name = ni.name or ip_to_hostname.get([*ni.ips, ''][0]) or ''\n neigh_infos.append(ni)\n\n b.neighbors = neigh_infos\n\n # Delete unused ports from results\n if not all_ports:\n print(\" - filtering unused ports...\", file=sys.stderr)\n b.ports = {k: v for k, v in b.ports.items() if (v.remote_macs or v.remote_ips)}\n\n # Update port contents\n print(\" - updating port contents...\", file=sys.stderr)\n for p in b.ports.values():\n # Mark all ports with bridge management addresses as \"interlink\"\n for bm in all_bridge_macs:\n p.interlink |= (bm in p.remote_macs)\n # Add a list of IP addresses seen behind a port\n for mac in p.remote_macs:\n p.remote_ips.extend(rarp.get(mac) or [])\n p.remote_macs = sorted(p.remote_macs)\n p.remote_ips = sorted(list(set(p.remote_ips))) # prune duplicates\n\n # Sort for nicer output TODO: \"natural sorting\" for IPs\n print(\"Sort ARP tables...\", file=sys.stderr)\n arp = dict(sorted(arp.items()))\n rarp = dict(sorted(rarp.items()))\n\n res_dict = {\n 'timestamp': time.time(),\n 'bridges': [b.as_dict() for b in bridges.values()],\n 'arp': arp,\n 'rarp': {k: list(v) for k, v in rarp.items()},\n 'ip_to_hostname': ip_to_hostname\n }\n\n return bridges, arp, rarp, ip_to_hostname, json.dumps(res_dict, indent=4)", "def hostnames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"hostnames\")", "def match_hosts(needle):\n\n matched_hosts = []\n with open(known_hosts_path, \"r\") as known_hosts_file:\n for line in known_hosts_file:\n host, _, _ = line.split(\" \")\n\n if needle in host:\n matched_hosts.append(host)\n\n return matched_hosts" ]
[ "0.6949691", "0.6628178", "0.6534761", "0.6527784", "0.64729506", "0.64609885", "0.64203495", "0.64133173", "0.63669676", "0.6323245", "0.6245519", "0.6221742", "0.6206262", "0.62053776", "0.61899376", "0.61697066", "0.61608654", "0.6157352", "0.6157352", "0.61356586", "0.612202", "0.6103853", "0.6093331", "0.60810935", "0.60810935", "0.6043534", "0.60243857", "0.5957719", "0.5951593", "0.5944126", "0.59213144", "0.58966994", "0.5866506", "0.5863945", "0.58448493", "0.5822217", "0.57935643", "0.5792715", "0.5792715", "0.5792552", "0.57753605", "0.5745278", "0.57439375", "0.57416844", "0.5738404", "0.5718221", "0.5711626", "0.5701481", "0.56865454", "0.5683842", "0.5683682", "0.56821126", "0.56768197", "0.56768197", "0.56752634", "0.5671821", "0.56707126", "0.566885", "0.566885", "0.5664439", "0.5654685", "0.5652588", "0.5650096", "0.5635979", "0.5634656", "0.5633699", "0.563295", "0.5631213", "0.56274444", "0.5622038", "0.561465", "0.5610232", "0.55996937", "0.55992144", "0.5593925", "0.5593589", "0.55920774", "0.5580446", "0.55733734", "0.5561195", "0.55495554", "0.5549028", "0.5548368", "0.55469185", "0.55429184", "0.55256057", "0.5524222", "0.5522428", "0.55208397", "0.55042225", "0.5502726", "0.5488368", "0.54855365", "0.54804796", "0.5467344", "0.54532576", "0.5452326", "0.5440535", "0.543248", "0.5430779" ]
0.8014416
0
detect whether we successfully respawned the instance, and it became a follower
def active_failover_detect_host_now_follower(self): self.check_that_instance_is_alive() lfs = self.get_log_file() if lfs.find("resilientsingle up and running as follower") >= 0: self.is_master = False return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = SCREEN_HEIGHT / 2\n self.angle = 0", "def is_alive(self):", "def _players_are_done(self):\n self._waiting_for_players = False\n if self.get_state_info(\"show_waiting_for\"):\n for p in self.all_players:\n p.remove_waiting_message()\n\n info = self.states[self.state]\n if \"post\" in info:\n info[\"post\"]()\n\n self._run_next_state()", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_alive(self):\n return True", "def alive(self):\n return True", "def respawn(self, xrespawn, yrespawn):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n #self.center_x = SCREEN_WIDTH / 2\n #self.center_y = SCREEN_HEIGHT / 2\n\n self.center_x = xrespawn\n self.center_y = yrespawn\n\n self.angle = 0\n\n self.cur_health = self.max_health", "def is_alive(self):\n pass", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def finished(self) -> bool:\n return self.turn == self.marbles", "def isalive():\n return 'alive'", "def was_followed(sender, instance, created, **kwargs):\n\n sendr = User.objects.get(id=instance.user_id)\n followed = User.objects.get(id=instance.followed_user_id)\n if created:\n notify.send(sender=sendr, recipient=followed, verb='followed',\n description=\"{} followed you.\".format(sendr.username))", "def _episode_success(self, observations):\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def have_i_lost(self):\n if self.life_points <= 0:\n self.running = False", "def update_alive(self):\n if (not self.proc is None) and (not self.proc.is_alive()):\n print(\"process died in error, destroying proxy object\")\n self.reset()", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def wait(self, *args):\n # TODO -- say something\n if self.finished_places == 7:\n self.finished_places += 1\n return super(Up, self).wait(*args)", "def should_keep_running(self):\n return len(self.party.active_users())", "def checkForcedPawnPromote(board, positions):\n\n #Get current position of the pawn\n posX = positions[1][0]\n posY = positions[1][1]\n\n #Get prev position of the pawn\n prevX = positions[0][0]\n prevY = positions[0][1]\n\n #Get the pawn\n item = board[posX][posY]\n\n #Forced pawn promotion\n if type(item) == Pawn.Pawn and item.checkForPromotion((prevX, prevY)) and not item.promoted:\n item.promote()\n return 1\n \n else:\n return 0", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))", "def respawn_player(self):\n self.rect.x = 50\n self.rect.y = 50\n \n # Specifies the Player's spawnpoint as maze_arrangement[1][1], representing\n # the tile in the top-left corner of the maze\n self.__user_x = 1\n self.__user_y = 1", "def notify_winner(self):\n self.is_winner = True", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "def time_server_not_responding(self):\n if not self.time_server_set:\n return False\n if self.am_leader:\n return False\n try:\n uid = self.global_time_server.get_id()\n except socket.error:\n self.global_time_server = None\n self.time_server_set = False\n print \"The time server is not responding.\"\n return True\n print \"The time server is responding!\"\n return False", "async def twitch_follower_checker_loop(self):\n\n new_last_timestamp = dt.utcnow()\n new_followers = collections.defaultdict(list) # channel user id: list of new follower usernames\n db = await self.bot.database.get_connection()\n data = await db(\"SELECT * FROM user_settings WHERE twitch_bearer_token IS NOT NULL\")\n\n # Wew let's do it\n for row in data:\n\n # See if we got their data already\n new_follower_list = new_followers.get(row['twitch_user_id'])\n if new_follower_list is None:\n new_follower_list, new_cursor_value = await self.get_new_followers(row['twitch_bearer_token'], row['twitch_user_id'], row['twitch_cursor'])\n if new_cursor_value:\n await db(\"UPDATE user_settings SET twitch_cursor=$1 WHERE twitch_user_id=$2\", new_cursor_value, row['twitch_user_id'])\n new_followers[row['twitch_user_id']] = new_follower_list\n\n # Update the follower timestamps into real timestamps\n # self.logger.info(new_follower_list)\n filtered_new_follower_list = [i for i in new_follower_list if dt.strptime(i['followed_at'], \"%Y-%m-%dT%H:%M:%SZ\") > self.last_twitch_checked]\n # self.logger.info(filtered_new_follower_list)\n\n # Send DM to the user\n if filtered_new_follower_list:\n discord_user = self.bot.get_user(row['user_id']) or await self.bot.fetch_user(row['user_id'])\n new_follower_string = ', '.join([f\"**{i['from_name']}**\" for i in filtered_new_follower_list])\n if len(new_follower_string) >= 1800:\n new_follower_string = \"\"\n try:\n await discord_user.send(f\"You have **{len(filtered_new_follower_list)}** new Twitch follower{'s' if len(filtered_new_follower_list) > 1 else ''}! {new_follower_string}\")\n except discord.HTTPException:\n pass\n\n # Update timestamp\n self.last_twitch_checked = new_last_timestamp\n await db.disconnect()", "def KeepAlive(self) -> bool:", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False", "def tellIfEnded(self):\n self.congratulate()", "def isstarted():", "def _fly(self):\n logger.info(\"flyer activity()\")\n if self.complete_status is None:\n logger.info(\"leaving activity() - not complete\")\n return\n\n # TODO: do the activity here\n\n # once started, we notify by updating the status object\n self.kickoff_status.set_finished()\n\n # TODO: wait for completion\n\n # after the wait, we declare victory\n self.complete_status.set_finished()\n logger.info(\"activity() complete. status = \" + str(self.complete_status))", "def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True", "def check_server_activity(self):\n if (self.am_leader == True):\n return \"Time server connected.\"\n elif (self.time_server_set == False):\n print \"I am not aware of a time server. Fetching from existing process.\"\n if (self.fetch_time_server() == False):\n print \"Fetch failed. Electing a leader.\"\n self.start_election()\n if self.time_server_not_responding():\n print \"The time server is not responding.\" \n self.start_election()\n return \"Time server elected.\"", "def is_alive(self) -> bool:\n\n\n try:\n self.sock.settimeout(2)\n except OSError:\n\n return False\n\n try:\n self.talk('/system/identity/print')\n\n except (socket.timeout, IndexError, BrokenPipeError):\n\n self.close()\n return False\n\n self.sock.settimeout(None)\n return True", "async def on_member_update(self, before, after):\n if after and after.activity is not None:\n if after.activity.name == \"Guild Wars 2\":\n print(after.id)\n await self.startGw2Session(after.id)\n print(\"Started GW2 Session {} {}\".format(after, dt.utcnow()))\n\n elif before and before.activity is not None:\n if before.activity.name == \"Guild Wars 2\":\n await self.endGw2Session(before.id)\n print(\"Ending GW2 Session {} {}\".format(before, dt.utcnow()))", "def death_screen():\n return False", "def episode_done(self):\n if self.get_status() == AssignState.STATUS_DONE:\n return False\n else:\n return True", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "async def reboot(self, ctx: Message):\n\t\tif ctx.author.id == ownerid:\n\t\t\towner_check = True\n\t\telse:\n\t\t\towner_check = False\n\n\t\tif owner_check == True:\n\t\t\tawait self.send(\"Rebooting the bot, I'll be back in 5 seconds!\")\n\t\t\tawait self.close()\n\t\t\tawait asyncio.sleep(4)\n\t\t\tclient = DogeClient(DOGETOKEN,\n\t\t\t DOGEREFRESHTOKEN,\n\t\t\t prefix=\"d!\",\n\t\t\t reconnect_voice=True)\n\t\t\tclient.run()\n\t\t\tawait asyncio.sleep(1)\n\t\t\tawait self.send(\"I'm back online\")\n\t\telse:\n\t\t\treturn await self.send(\n\t\t\t f\"{ctx.author.mention} You are not the owner of the bot so you may not reboot it!\"\n\t\t\t)", "async def end(self, ctx, message: discord.Message):\n\n if message.guild != ctx.guild:\n return await send_embed(ctx, \"You do not have permission to do that.\", negative=True)\n\n embed = message.embeds[0]\n host = embed.description.split('\\n')\n host = host[2]\n\n async def _end(description: str):\n new_embed = discord.Embed(\n colour=discord.Colour.dark_grey(),\n description=description\n )\n\n new_embed.set_author(name=embed.author.name)\n\n new_embed.set_footer(text=f\"Ended at • {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n\n await message.edit(text=\"<:tada:740055373926367383> **GIVEAWAY ENDED** <:tada:740055373926367383>\",\n embed=new_embed)\n\n await db.execute(\"Update Giveaway set Ended = ? where MessageID = ?\", (True, message.id,))\n await db.commit()\n\n cursor = await db.execute(\"Select Members, Ended from Giveaway where MessageID = ? and GuildID = ?\",\n (message.id, ctx.guild.id))\n result = await cursor.fetchone()\n\n if not result:\n return await send_embed(ctx, \"Giveaway with given message ID does not exist.\", negative=True)\n\n if result[1]:\n return await send_embed(ctx, \"Giveaway has already ended.\", negative=True)\n\n for reaction in message.reactions:\n if str(reaction) == \"<:tada:740055373926367383>\":\n thisreaction = reaction\n\n try:\n a = thisreaction\n except:\n await _end(f\"Could not determine winner!\\n{host}\")\n\n members = [i for i in await thisreaction.users().flatten() if not i.bot and i in ctx.guild.members]\n winners = []\n\n if not members:\n return await _end(f\"Could not determine winner!\\n{host}\")\n\n for i in range(result[0]):\n if not members:\n break\n member = random.choice(members)\n winners.append(member)\n members.remove(member)\n\n if len(winners) == 1:\n await _end(f\"Winner: {winners[0].mention}\\n{host}\")\n await ctx.send(f\"Congratulations {winners[0].mention}! You won the **{embed.author.name}**!\\n\"\n f\"{message.jump_url}\")\n\n else:\n await _end(f\"Winners: {', '.join([i.mention for i in winners])}\\n{host}\")\n await ctx.send(f\"Congratulations {', '.join([i.mention for i in winners[:-1]])}, \"\n f\"and {winners[-1].mention}! You won the **{embed.author.name}**!\\n\"\n f\"{message.jump_url}\")", "def is_done(self, agent, world) -> bool:\n if self.steps_from_last_reset / self.num_agents > self.episode_length:\n return True\n return False", "def isFinished():", "def isFinished():", "def isFinished():", "def is_restarting(self) -> bool:\r\n return False", "def check_for_inactivity(self):\n if self.last_contacted + self.NO_CONTACT_TIMEOUT < time.time():\n import sys\n sys.exit(\"StarNode terminated due to inactivity with other nodes\")", "def is_done(self):\n\n # Robosuite envs always rollout to fixed horizon.\n return False", "def IsAlive(self, *args, **kwargs):\n pass", "def pawnTo(self, direction) :\n dirCoord = add(self.coord, direction)\n return self.board.playerCellList[dirCoord[0]][dirCoord[1]].hasPawn", "def episode_end(self):\n return self.game.is_episode_finished()", "def respawn_instance(self, version, moreargs=None, wait_for_logfile=True):\n assert version is not None\n self.cfg.version = version\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n if moreargs is not None:\n args.extend(moreargs)\n\n logging.info(\"StarterManager: respawning instance %s\", str(args))\n self.instance = psutil.Popen(args)\n self.pid = self.instance.pid\n self.ppid = self.instance.ppid()\n print(\"respawned with PID:\" + str(self.instance.pid))\n if wait_for_logfile:\n self.wait_for_logfile()\n self.wait_for_port_bind()\n else:\n print(\"Waiting for starter to exit\")\n print(\"Starter exited %d\" % self.instance.wait())", "def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack", "def is_done(self):\n\n return not self.thread.is_alive()", "def alive(p):\n return p.is_alive()", "def detect_leader(self):\n # Should this be moved to the AF script?\n lfs = self.read_db_logfile()\n\n became_leader = lfs.find(\"Became leader in\") >= 0\n took_over = lfs.find(\"Successful leadership takeover:\" + \" All your base are belong to us\") >= 0\n self.is_leader = became_leader or took_over\n if self.is_leader:\n url = self.get_frontend().get_local_url(\"\")\n reply = requests.get(url, auth=requests.auth.HTTPBasicAuth(\"root\", self.passvoid), timeout=120)\n print(f\"{url} => {str(reply)}\")\n if reply.status_code == 503:\n self.is_leader = False\n return self.is_leader", "def is_done(self):\n dead = not any(agent.is_alive() for agent in self.agents)\n winner = any(agent.is_winner() for agent in self.agents)\n if dead:\n print 'Your Agent Died'\n return dead\n elif winner:\n print 'Gold Found!\\nOozplorer wins!'\n return winner\n return False", "def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)", "def link_up_respond(self, neighbor):\n neighbor.is_killed = False\n neighbor.send_timer = time.time()\n neighbor.kill_timer = time.time()\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()", "def running(self) -> bool:", "def isDone(self):\n if self.current_turn >= self.MAX_TURNS: return True\n if self.last_user_action[\"action\"] == \"END\": return True\n return False", "def lost():\n\tforeground_module.foreground_speed = 0\n\tbackground_module.background_speed = 0\n\tdisplay_fail_msg(win)\n\n\tif player_module.player.y > foreground_module.ground_y:\n\t\ttry:\n\t\t\tprocess_object.terminate()\n\t\texcept: pass\n\n\t\treturn True\n\treturn False", "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def has_happened(self):\n\n return self.end < timezone.now()", "def update(self):\n if self.killer:\n if self.killer.stype == 'fire' and not(self.killer in self.pjs.fires):\n self.die()\n elif self.killer.stype == 'enemy':\n if self.timeout == 0:\n self.die()\n else:\n self.timeout -= 1\n else:\n self.move(self.direction, is_update=True)", "def alive(self, pid):\n try:\n self.ssh(\"kill -0 %s\" % str(pid), allow_fail=False)\n return True\n except:\n return False", "def food_respawn(self):\r\n if(self.timer_fire()):\r\n self.current_food = self.max_food\r\n self.redraw()\r\n self.set_change()\r\n return True\r\n return False", "def isAlive(self):\n raise NotImplementedError", "def is_alive(self):\n if self.health > 0:\n return True\n return False", "def _is_unfollow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"unfollow\":\n return True\n else:\n return False", "def test_two_persons_exiting(self):\n human_detector_inst = HumanDetector(\n find_humans_from_video_file_name='videos/two_persons_exiting.mp4',\n use_pi_camera=False, open_display=False)\n self.assertEqual(human_detector_inst.perform_job(), None)\n human_centroid_dict = human_detector_inst.get_human_centroid_dict()\n self.assertEqual(len(human_centroid_dict), 2)\n self.assertEqual(human_centroid_dict[0].direction, Direction.EXIT)\n self.assertEqual(human_centroid_dict[1].direction, Direction.EXIT)\n self.assertEqual(SendReceiveMessages().get_face_detected_count_locally(), -2)\n # human_detector_inst.clean_up()\n self.__cleanup()", "def is_happening(self):\n now = timezone.now()\n start = self.start\n end = self.end\n happening = False\n # check that the event has started and 'now' is btwn start & end:\n if (now >= start) and (start.time() <= now.time() <= end.time()):\n happening = True\n return happening", "def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False", "def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False", "def is_leader(self):\n return self.__is_leader", "def episode_finish(self, reason: str) -> None:\n pass", "def check_owned_instance(st, instance_id):\n\n logging.info(\"Checking owned instance %s...\" % instance_id)\n\n global api, owned_instances\n\n # Get information from API: we need the IP address\n inst = api.nova.get_instance(token_id=api.keystone.token_id, instance_id=instance_id)\n\n # Check if the instance is in the list (using cached status)\n found = False\n for h in st['workers_status'].keys():\n if gethostbyname(h) == inst.network_ip(network_name=cf[\"api\"][\"network_name\"]):\n found = True\n break\n\n # Deal with errors\n if not found:\n logging.error(\"Instance %s (with IP %s) has not joined the cluster after %ds: terminating it\" % (instance_id, inst.private_ip_address, cf['elastiq']['estimated_vm_deploy_time_s']))\n\n try:\n inst.terminate(token_id=api.keystone.token_id)\n owned_instances.remove(instance_id)\n save_owned_instances()\n logging.info(\"Forcing shutdown of %s: OK\" % instance_id)\n except Exception as e:\n # Recheck in a while (10s) in case termination fails\n logging.error(\"Forcing shutdown of %s failed: rescheduling check\" % instance_id)\n return {\n 'action': 'check_owned_instance',\n 'when': time.time() + 10,\n 'params': [ instance_id ]\n }\n\n else:\n logging.debug(\"Instance %s (with IP %s) successfully joined the cluster within %ds\" % (instance_id, inst.network_ip(network_name=cf[\"api\"][\"network_name\"]), cf['elastiq']['estimated_vm_deploy_time_s']))\n\n return", "def test_close_if_ended(self):\n today = pytz.UTC.localize(dt.datetime.now())\n self.leaderboard.closed = False\n self.leaderboard.start = today - dt.timedelta(days=15)\n self.leaderboard.end = today - dt.timedelta(days=7)\n\n self.leaderboard.close_if_ended()\n assert self.leaderboard.closed\n # print(self.leaderboard.closed)", "def waiting(self) -> bool: # pylint: disable=W0221\n return True", "def isAlive(self):\n return self.is_alive()", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def follow(self, follower, followee):\n pass", "def set_awaiting_turn(self):\n if self.status == self.PLAYER_BANKRUPT:\n return\n if self.status == self.PLAYER_AWAITING_TURN:\n return\n self.status = self.PLAYER_AWAITING_TURN\n # self.client.send_player_turn_end()", "def isFinished(self):\n return False", "def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False", "def registration_ended(self):\n pass", "def is_alive(self):\r\n return self._health_points > 0", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def alive(self):\n return self._thread is not None", "def is_actor_alive(actor, time=0.5):\n actor.join(time)\n return actor.is_alive()", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def wait_for_ssh(self):\n self.wait_for_status(16)\n printy(\"The instance is now running ...\")\n # The instance is running, but we give it 60 more seconds for running\n # SSHD\n printy(\"Waiting 60 seconds for SSH server to start ...\")\n time.sleep(60)", "def is_up(self):\n return True" ]
[ "0.62327015", "0.61734205", "0.5913024", "0.58508074", "0.58208185", "0.58022654", "0.5687446", "0.56421936", "0.56058353", "0.55823416", "0.55794746", "0.55337363", "0.5507955", "0.5464997", "0.54603153", "0.54300016", "0.5409948", "0.540742", "0.54065454", "0.54039454", "0.5393364", "0.53923327", "0.53808933", "0.53808933", "0.53789896", "0.53744894", "0.5370398", "0.5362218", "0.53591734", "0.53560054", "0.5348375", "0.5345866", "0.5330382", "0.5328045", "0.5312883", "0.5308241", "0.5306255", "0.53062534", "0.53030664", "0.52901375", "0.5283826", "0.5269047", "0.5268003", "0.5262747", "0.52464396", "0.5244211", "0.52387106", "0.5228322", "0.5225545", "0.5225545", "0.5225545", "0.522513", "0.52221805", "0.5221117", "0.5220011", "0.52164096", "0.5215912", "0.52133185", "0.5210241", "0.52089393", "0.51961434", "0.51930016", "0.5190844", "0.51893854", "0.5186139", "0.51823187", "0.5182074", "0.51814854", "0.51783764", "0.51775336", "0.51679814", "0.51626295", "0.5156379", "0.51542014", "0.515415", "0.51507735", "0.5143839", "0.51395553", "0.5135976", "0.5127482", "0.5125514", "0.51200634", "0.5119644", "0.5116995", "0.5114092", "0.5112827", "0.51115966", "0.5108556", "0.5104232", "0.5097281", "0.509555", "0.5095441", "0.50858027", "0.5082477", "0.5081125", "0.50805867", "0.5077398", "0.507654", "0.50701725", "0.5068891" ]
0.6564329
0
dump out instance args, and what could be fishy in my log
def search_for_warnings(self): log = str() print(self.default_starter_args + self.arguments) if not self.log_file.exists(): print(str(self.log_file) + " not there. Skipping search") return print(str(self.log_file)) with self.log_file.open(errors="backslashreplace") as log_f: for line in log_f.readline(): if "WARN" in line or "ERROR" in line: print(line.rstrip()) log += line.rstrip() attach(log, "WARN or ERROR lines from starter log")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(self, *args, **kwargs):", "def __init__(self, args, logger):\n super().__init__(args, logger)", "def dump(self, args):\n if self.stru:\n self.stru.dump(args)\n if self.index:\n self.index.dump(args)\n if self.bank:\n self.bank.dump(args)\n if self.sys:\n self.sys.dump(args)", "def args_str(self):", "def __init__(self, args):\n self.args = args", "def _log(self, msg, *args):\n\n # Since this is a fake object - no actual logging is done.\n # Instead the message is simply printed to standard output.\n print (msg, end=' ')\n for arg in args:\n print(arg, end=' ')\n print()", "def info(self, *args, **kwargs):", "def debug_logger(*args):\n\n for x in args:\n print(repr(x))", "def _collect_repr_args(self, poargs, kwargs):", "def LogCommand(self): \n # Retrieve the args for the command.\n posname, kwname, args = inspect.getargvalues(inspect.stack()[1][0])[-3:]\n posargs = args.pop(posname, [])\n args.update(args.pop(kwname, []))\n\n methodname = inspect.currentframe().f_back.f_code.co_name \n\n # Output the command in a format that looks like normal Python syntax.\n logmsg = \" - Python command - \" + methodname\n arguments = \"\"\n for key in args: \n if key != \"self\": \n value = args[key]\n if value == \"\":\n value = '\"\"'\n\n arguments += key + \"=\\\"\" + str(value) + \"\\\", \"\n\n if arguments != \"\":\n # Remove the final comma and space (\", \").\n arguments = arguments[:-2]\n\n logmsg += \"(\" + arguments + \")\"\n\n logging.debug(logmsg)\n return", "def log_message(self, format, *args):", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def show_parameters(args):\n\n logging.basicConfig(format='%(message)s', level=args.logging)\n\n logging.info('\\n#{0}'.format('-'*60))\n logging.info('BUILD CONFIG : {0}'.format(args.config))\n logging.info('BUNDLE FILE : {0}'.format(args.bfile))", "def debug(self, msg, *args, **kwargs):\n pass", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, *args):\n\n self.args = args", "def log_arguments(self, args: Union[ArgumentParser, Namespace, Dict] = None, show=False, **kwargs) -> Namespace:\n nargs = dict()\n if args is not None:\n nargs = args\n\n if isinstance(args, ArgumentParser):\n nargs = args.parse_args()\n\n if isinstance(nargs, Namespace):\n nargs = dict(**vars(nargs))\n\n kwargs.update(nargs)\n\n # if we have a pending trial create it now as we have all the information\n if is_delayed_call(self.trial):\n self.trial = self.trial(parameters=kwargs)\n self.logger = TrialLogger(self.trial, self.protocol)\n else:\n # we do not need to log the arguments they are inside the trial already\n self.logger.log_arguments(**kwargs)\n\n if show:\n print('-' * 80)\n for k, v in vars(args).items():\n print(f'{k:>30}: {v}')\n print('-' * 80)\n\n return args", "def extra_args(self):\n return []", "def debug_print(self, *args, **kwargs):\n print(\"APP_DEBUG_PRINT\", args, kwargs)", "def __init__(self, *args, **kwargs):\n\n self.logger = util.get_logger()\n self.args = args\n self.kwargs = kwargs\n for key, value in kwargs.items():\n setattr(self, key, value)", "def _logRequestArgs(self, request):\n\t\ttry:\n\t\t\tif request.args:\n\t\t\t\t# even if there are args, don't log them if only boring ones\n\t\t\t\t# were given\n\t\t\t\tfmtArgs = _formatRequestArgs(request.args)\n\t\t\t\tif fmtArgs!='{}':\n\t\t\t\t\tlog.msg(\"# Processing starts: %s %s\"%(request.path, \n\t\t\t\t\t\tfmtArgs))\n\t\texcept: # don't fail because of logging problems\n\t\t\tbase.ui.notifyError(\"Formatting of request args failed.\")", "def __init__(self, args=False):\n self.args = args", "def __init__(self, log=False):\n self.log = log", "def trace(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, level, pathname, lineno, msg, args, exc_info, func=None):\n #\n # The following statement allows passing of a dictionary as a sole\n # argument, so that you can do something like\n # logging.debug(\"a %(a)d b %(b)s\", {'a':1, 'b':2})\n # Suggested by Stefan Behnel.\n # Note that without the test for args[0], we get a problem because\n # during formatting, we test to see if the arg is present using\n # 'if self.args:'. If the event being logged is e.g. 'Value is %d'\n # and if the passed arg fails 'if self.args:' then no formatting\n # is done. For example, logger.warn('Value is %d', 0) would log\n # 'Value is %d' instead of 'Value is 0'.\n # For the use case of passing a dictionary, this should not be a problem.\n if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:\n args = args[0]\n self.args = args\n self.levelno = level\n self.pathname = pathname\n self.msg = msg\n\n self.levelname = \"FOOBAR\" #getLevelName(level)\n\n try:\n self.filename = os.path.basename(pathname)\n self.module = os.path.splitext(self.filename)[0]\n except (TypeError, ValueError, AttributeError):\n self.filename = pathname\n self.module = \"Unknown module\"\n\n self.exc_info = exc_info\n self.exc_text = None # used to cache the traceback text\n self.lineno = lineno\n self.func_name = func\n self.created = time.time()\n self.asctime = time.asctime()\n # Remove milliseconds\n i = self.asctime.find(\".\")\n if i != -1: self.asctime = self.asctime[:i]", "def format_args(self):\r\n is_ctor = self.object.cls.name == self.object.name\r\n\r\n if self.object.args:\r\n if self.object.args[0] in (\"obj\", \"self\") and not is_ctor:\r\n return \"(\" + \", \".join(self.object.args[1:]) + \")\"\r\n else:\r\n return \"(\" + \", \".join(self.object.args) + \")\"", "def log( cls, level, msg, *args, **kwargs ):\n arg_set = ( level, msg, ) # not clear how to get rest, for now discard\n print( f\"{arg_set[0]} >> {arg_set[1]}\" )\n cls.__log_later.append( arg_set )", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "def __init__(self, args):\n super().__init__()\n self.args = args", "def verbose(self, *args):\n self.mylog.log(logging.INFO - 1, *args)", "def inspect(obj:Any) -> None:\n\t\tLogging._log(Logging.logLevel, obj)", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def parse_args(self, *args, **kwargs):\n options = super().parse_args(*args, **kwargs)\n\n # Configure logging\n level = logging.DEBUG if options.debug else logging.INFO\n logging.addLevelName(logging.INFO, \"\\033[1;36mINFO\\033[1;0m\")\n logging.addLevelName(logging.WARNING, \"\\033[1;33mWARNING\\033[1;0m\")\n logging.addLevelName(logging.ERROR, \"\\033[1;91mERROR\\033[1;0m\")\n logging.addLevelName(logging.DEBUG, \"\\033[1;30mDEBUG\")\n logging.basicConfig(\n level=level,\n format=\"\\033[90m%(asctime)s\\033[1;0m [%(name)s] %(levelname)s %(message)s\\033[1;0m\",\n )\n return options", "def output_debug_info(self):", "def print_args():\n for key, value in vars(ARGS).items():\n print(key + ' : ' + str(value))", "def _log(self, format, args, level=None):\n if level is None:\n level = self.log_level\n xbmc.log(\n \"metadata.movie.stupid: %s - - [%s] %s\\n\" % (\n self.client_address[0], self.log_date_time_string(),\n format % args),\n level)", "def __init__(self, sender, verbose):\n super(DFTimewolfConsoleOutput, self).__init__()\n self._sender = sender\n self._verbose = verbose", "def __init__(self,\n settings=None, # 0.2.4.post2. A dict or a pathname\n omit=(), # 0.3.0 class deco'ing: omit these methods or properties; not a setting\n only=(), # 0.3.0 class deco'ing: deco only these methods or props (sans any in omit); not a setting\n name='', # 0.3.0 name or oldstyle fmt str for f_display_name of fn; not a setting\n override=False, # 0.3.0b18: new settings override existing ones. NOT a \"setting\"\n enabled=True,\n args_sep=', ',\n log_args=True,\n log_retval=False,\n log_elapsed=False,\n log_exit=True,\n indent=True, # 0.3.0, this seems the better default\n log_call_numbers=False,\n prefix='',\n file=None, # detectable value so we late-bind to sys.stdout\n logger=None,\n loglevel=logging.DEBUG,\n mute=False,\n record_history=False,\n max_history=0,\n NO_DECO=False,\n ):\n # 0.2.4 settings stuff:\n # determine which keyword arguments were actually passed by caller!\n used_keywords_dict = log_calls.__dict__['__init__'].get_used_keywords()\n # remove non-\"settings\"\n for kwd in ('settings', 'omit', 'only', 'name', 'override'):\n if kwd in used_keywords_dict:\n del used_keywords_dict[kwd]\n\n super().__init__(\n settings=settings,\n _omit=omit, # 0.3.0 class deco'ing: tuple - omit these methods/inner classes\n _only=only, # 0.3.0 class deco'ing: tuple - decorate only these methods/inner classes (sans omit)\n _name_param=name, # 0.3.0 name or oldstyle fmt str etc.\n _override=override, # 0.3.0b18: new settings override existing ones. NOT a \"setting\"\n _used_keywords_dict=used_keywords_dict,\n enabled=enabled,\n args_sep=args_sep,\n log_args=log_args,\n log_retval=log_retval,\n log_elapsed=log_elapsed,\n log_exit=log_exit,\n indent=indent,\n log_call_numbers=log_call_numbers,\n prefix=prefix,\n file=file,\n logger=logger,\n loglevel=loglevel,\n mute=mute,\n record_history=record_history,\n max_history=max_history,\n NO_DECO=NO_DECO,\n )", "def cmd_debug_args(self, command, args):\n command = self.approx.encmd(command)\n if command not in self.mod_commands:\n return self.cmd_help(commands=(command,))\n namespace = self.parse_command_args(command, args)\n if namespace is None:\n return\n kwargs = vars(namespace)\n outdict = dict(passed=args, parsed=kwargs)\n from .cmdopts import SerialSuspect\n to_eval = [(k, v) for k, v in kwargs.items() if\n isinstance(v, SerialSuspect)]\n if to_eval:\n opt, val = to_eval.pop()\n outdict[\"parsed\"].update({opt: str(val)})\n try:\n evaled = val(kwargs.get(\"as_json\", False), True)\n except Exception as exc:\n self.put_pretty(exc.args[0])\n evaled = [repr(exc)]\n outdict[\"evaled\"] = evaled\n import json\n # Dump JSON instead of pprint-ing so tests can capture and eval output.\n self.put_pretty(json.dumps(outdict, indent=2))", "def help_dump(self):\n print(DUMP)", "def parse_args(self):\n args = super().parse_args()\n self._set_logger(args.log_level)\n return args", "def __call__(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n log(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def dump(self):\n self.logger.debug(self)", "def log_message(self, fmt, *args):\n pass", "def __init__(self, **extra):\n log_msg = self.message\n if extra:\n log_msg += \" Extra info: {0}\".format(extra)\n logger.error(log_msg)\n super().__init__(extra)", "def format_args(self):\r\n ctor = self.get_attr(self.object, self.object.name, None)\r\n\r\n if ctor is None or not isinstance(ctor, MatMethod):\r\n return None\r\n if ctor and not self.env.config.matlab_class_signature:\r\n return None\r\n if ctor.args:\r\n return \"(\" + \", \".join(ctor.args) + \")\"", "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "def get_args(self):\r\n return self.args", "def __init__(self):\r\n self.logger = dict()", "def log_all_class_info():\n\n for cls in of_g.unified:\n for v in of_g.unified[cls]:\n if type(v) == type(0):\n log(\"cls: %s. ver: %d. base len %d. %s\" %\n (str(cls), v, of_g.base_length[(cls, v)],\n loxi_utils.class_is_var_len(cls,v) and \"not fixed\"\n or \"fixed\"))\n if \"use_version\" in of_g.unified[cls][v]:\n log(\"cls %s: v %d mapped to %d\" % (str(cls), v, \n of_g.unified[cls][v][\"use_version\"]))\n if \"members\" in of_g.unified[cls][v]:\n for member in of_g.unified[cls][v][\"members\"]:\n log(\" %-20s: type %-20s. offset %3d\" %\n (member[\"name\"], member[\"m_type\"],\n member[\"offset\"]))", "def parse_args(self, **kwargs):\n args = super().parse_args()\n self._set_logger(args.log_level)\n return args", "def writelog(self,*args):\n import sys\n print(' '.join([str(a) for a in args]),file=sys.stderr)", "def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r", "def debug():", "def vlog(self, msg, *args):\n if self.verbose:\n self.log(msg, *args)", "def vlog(self, msg, *args):\n if self.verbose:\n self.log(msg, *args)", "def print_args():\r\n args = \", \".join(sys.argv)\r\n print(\"pfArgs: \" + args)", "def handle_arguments(self, args):\n debug(\"BloomGenerator.handle_arguments: got args -> \" + str(args))", "def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))", "def __call__(self, args):", "def __process_args__(self, args):\n if getattr(args, 'debug', None):\n self.logger.set_level('DEBUG')\n\n elif getattr(args, 'quiet', None):\n self.silent = True\n\n elif getattr(args, 'verbose', None):\n self.logger.set_level('INFO')\n\n if self.subcommand_parser is not None and args.command is not None:\n if hasattr(self.subcommands[args.command], 'parse_args'):\n args = self.subcommands[args.command].parse_args(args)\n self.subcommands[args.command].run(args)\n\n return args", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def log(*arg):\n\n context = str(*arg)\n print(\"[Texture Builder] {}\".format(context))", "def __init__(self, request, log=\"\"):\n self.log = log\n self.request = request", "def test_print_args_dict_full(self):\n log_file_uri = os.path.join(DATA_DIR, 'sample_log.txt')\n if os.path.exists(log_file_uri):\n os.remove(log_file_uri)\n self.assertEqual(os.path.exists(log_file_uri), False)\n manager = execution.LogManager('MainThread', log_file_uri)\n\n args = {\n 'a': 'aaa',\n 'b': 'bbb',\n 'cdefg': u'qwerty',\n 'hello': 12345,\n 'list': range(4)\n }\n manager.print_args(args)\n self.assertEqual(count_lines(log_file_uri), 8)\n\n regression_file_uri = os.path.join(DATA_DIR, 'execution',\n 'arguments_only.txt')\n regression_file = open(regression_file_uri)\n log_file = open(log_file_uri)\n\n # Loop through all the lines in both files, assert they're equal.\n lines = lambda f: [l for l in f]\n for index, (log_msg, reg_msg) in enumerate(zip(lines(log_file), lines(regression_file))):\n if index == 0:\n continue # skip a logging line with the date/time\n self.assertEqual(log_msg, reg_msg)\n\n os.remove(log_file_uri)", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def __init__(self, name=None):\r\n self.log = {}\r\n self.name = name", "def debug(self, *args, **kwargs):\n self.msg(logging.DEBUG, *args, **kwargs)", "def getLog(self):\n pass", "def vlog(self, msg: str, *args: Optional[str]) -> None:\n if self.verbose:\n self.log(msg, *args)", "def dump(self) -> None:\n ...", "def get_args(inst):\n if is_estimator(inst):\n args = inspect.getargspec(inst.update).args\n args = [arg for arg in args if arg != 'self' and arg != 'X']\n else:\n args = inspect.getargspec(inst).args\n ignore_args = {'self', 'X', 'y', 'pattern', 'normalizer', 'coef'}\n args = [arg for arg in args if arg not in ignore_args]\n\n return args", "def info(self):", "def info(self):", "def __getinitargs__(self):\n return (self.cutout,)", "def __debug(self):\n\t\tprint \"Dumping Object Chat\"\n\t\tprint self.userA.username +' + '+ self.userB.username", "def traceback(self):", "def log(self, level, msg, *args, **kwargs):\n pass", "def dump_args(args, outdir='.'):\n with open( Path(outdir)/'args.txt', 'w' ) as file:\n for k, v in args.items():\n file.write('{}: {}\\n'.format(k, v))", "def debug(self):\n raise NotImplementedError", "def args(self) -> List[str]:\n return self.__args", "def add_extra_args(self):\n pass", "def __init__(self, params):\n self.logger = logging.getLogger(\"simple\")\n self.params = params", "def __getinitargs__(self):\n\n return (self.admin_property_err,)", "def __getnewargs__(self):\n return ()", "def __repr__(self):\n doc_string = \"# %s class description:\\n%s\\n# Instance attributes:\\n\" % (self.__class__, self.__doc__)\n # write each argument with its value\n properties = dir(self)\n for elem in properties:\n if not elem.startswith(\"_\"):\n doc_string += \"\\t%s:%s\\n\" % (elem, self.__getattribute__(elem))\n return doc_string", "def write_debug_info(self):\n #path = self.request.uri.split('?')[0]\n #method = path.split('/')[-1]\n \n self.write(\"Handler: \" + str(self.__class__.__name__)+\"<br>\")\n self.write(\"<hr>\")\n self.write(str(dir(self.request)))\n self.write(\"<br><hr>\")\n self.write(\"query_arguments:\" + str(self.request.query_arguments))\n self.write(\"<br>\")\n self.write(\"uri:\" + self.uri)\n self.write(\"<br>\")\n self.write(\"path:\" + self.path)\n self.write(\"<br>\")\n self.write(\"method to call: \" + self.request.method.lower() + \"_\" + self.method)\n self.write(\"<hr>\")\n self.write(\"request method: \" + self.request.method)\n self.write(\"<hr>\")\n self.write(\"request headers: \" + str(self.request.headers))\n self.write(\"<hr>\")\n self.flush()", "def logd(*args,**kwargs):\n print(*args,**kwargs)", "def format_args(self, **kwargs: Any) -> str:\n return \"\"", "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def get_args( self, ):\r\n for iarg in sys.argv[1:]:\r\n #print iarg\r\n argsplits = iarg.split(\"=\")\r\n parm_name = argsplits[0]\r\n parm_value = argsplits[1]\r\n\r\n # so far only one is captured\r\n if parm_name == \"parameters\":\r\n self.parmeters_x = parm_value #\r\n msg = \"command line arg >>{iarg}\" # log file not open but use alt\r\n AppGlobal.logger.info( msg )\r\n else:\r\n msg = \"no parmeter extensions\"\r\n AppGlobal.logger.info( msg )\r\n return", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass" ]
[ "0.6568053", "0.62609863", "0.6245112", "0.6137647", "0.61293983", "0.6093716", "0.60875756", "0.60433614", "0.5990159", "0.5985708", "0.59716463", "0.59541845", "0.5896174", "0.58789164", "0.58609366", "0.58609366", "0.58546025", "0.5845228", "0.5839964", "0.582042", "0.5813646", "0.5795892", "0.5790719", "0.5786274", "0.57616746", "0.576141", "0.5748035", "0.57397777", "0.5721655", "0.5715431", "0.57092977", "0.5706342", "0.56871825", "0.56809336", "0.5680285", "0.5675244", "0.567097", "0.5669988", "0.56685054", "0.5657115", "0.5653818", "0.564407", "0.5635758", "0.5632916", "0.5632039", "0.5626396", "0.5626396", "0.5625634", "0.5608179", "0.5604364", "0.559117", "0.55902433", "0.5589704", "0.5583566", "0.5576696", "0.5568277", "0.5568277", "0.5565891", "0.5556024", "0.55491364", "0.5548898", "0.55434024", "0.55195534", "0.55163985", "0.55122894", "0.55055803", "0.55025095", "0.55025095", "0.55025095", "0.5497994", "0.549578", "0.5472533", "0.5449221", "0.5448851", "0.54426247", "0.5430571", "0.5430571", "0.5425029", "0.5422983", "0.5419734", "0.5416702", "0.5416452", "0.54045874", "0.54005647", "0.5396461", "0.53948647", "0.5393731", "0.53911716", "0.5377791", "0.53763235", "0.53734607", "0.5372002", "0.53699654", "0.53673625", "0.53655034", "0.53640974", "0.53640974", "0.53640974", "0.53640974", "0.53640974", "0.53640974" ]
0.0
-1
Add starter log to allure report
def add_logfile_to_report(self): logfile = str(self.log_file) attach.file(logfile, "Starter log file", AttachmentType.TEXT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logStarted(build, step, log):", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def write_terraform_apply_log_header(self):\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(\"*\" * 100)\n logfile.write(\"\\n*** Terraform Apply Started\")\n logfile.write(\"\\nDateTime: %s\\n\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n logfile.write(\"*\" * 100)\n self.write_debug_log(K.TERRAFORM_APPLY_STARTED)", "def create_hdf5_logger(self):\n super(Inertial_Logger,self).create_hdf5_logger()\n self.logger.add_attribute(self.trial_info_path, 'mode', 'inertial trajectory')", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def report():\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _log_trial(self, is_add: bool):\n try:\n with open(str(self.info.trials_log_file), \"r\") as file:\n trials = util.yaml_load(file.read())\n except FileNotFoundError:\n trials = []\n\n if is_add:\n trials.append(self.trial.to_dict())\n else:\n trials[-1] = self.trial.to_dict()\n\n with open(str(self.info.trials_log_file), \"w\") as file:\n file.write(util.yaml_dump(trials))", "def do_rrt(self, arg):\n self.do_timesheet('report extend track today')", "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def on_start(self):\r\n self.log()", "def report_trial(self):\n pass", "def write_terraform_init_log(self, response):\n head_msg = \"Terraform Init is done\"\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(self._write_header(head_msg))\n logfile.write(response[1])\n\n self.write_debug_log(K.TERRAFORM_INIT_COMPLETED)", "def start_confluence_log_file(S,cfg,bands):\n # RS: making this file store & confluence-markdown-format your data \n if bands is None:\n bands = S.config.get(\"init\").get(\"bands\")\n confluence_fp = os.path.join(S.output_dir,f\"{S.name}_optimization_summary.txt\")\n with open(confluence_fp,'a') as cfile:\n dev_name, crate_and_slot, start_date = get_config_vals(S,cfg)\n cfile.write(f\"h4. *{dev_name} {crate_and_slot}*\\n\")\n #cfile.write(f\"optimization with `{__file__}\")\n cfile.write(\"* Ran {{\" + f\"{' '.join(sys.argv)}\" +\"}}\\n\")\n band_str = ','.join([str(band) for band in bands])\n cfile.write(f\"* Plots of bands {band_str} taken {start_date} in \" +\\\n \"{{\" +f\"{S.plot_dir}\" +\"}}\\n\")\n cfile.write(\"* resultant tunefile: **TODO**\\n\\n\")\n cfile.write(\"|| ||Indiv.||-||-||-||-||togeth||-||\\n\")\n table_top=\"||SMuRF band||uc att (.5dBs)||tone power (3dB steps)||\"+\\\n \"dc att (.5dBs)||Num. Channels||Med. White Noise (pA/rtHz)||\"+\\\n \"Num. Channels||Med. White Noise (pA/rtHz)||\\n\"\n cfile.write(table_top)\n logger.info(f\"made new confluence summary at:\\n{confluence_fp}\")\n return confluence_fp", "def pytest_runtest_logreport(report):\n # ignore setup and teardown reporting of tests that are run.\n # keep skipped items...\n outcome = report.outcome\n if outcome != 'skipped' and report.when != \"call\":\n return\n # print(\"LOGREPORT {} --> {}\".format(report.sco_bla, outcome))\n tst_lst.append(report.sco_bla + (outcome, ))", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def report(self, short=True):\n self.logger.finish()\n print(json.dumps(to_json(self.trial, short), indent=2))\n return self", "def InsertLog():", "def _log_results(self, first_time=False):\n\n if not first_time:\n print(self.READINGS_PRINT_TEMPLATE % self.get_sensors_data())\n\n self._log_timer = self._start_timer(Config.LOG_INTERVAL, self._log_results)", "def write_terraform_plan_log(self, response):\n head_msg = \"Terraform Plan is done\"\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(self._write_header(head_msg))\n logfile.write(response[1])\n\n self.write_debug_log(K.TERRAFORM_PLAN_COMPLETED)", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def project_report(request, **kwargs):\n\n #Creating the command for the logs \n print(\"in the project_report ...........................................\")\n outputStr = \"Updating the logs...\"\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += f\"\\r\\n[{time.process_time()}] \"\n self.parent.log += string + \"\\r\\n\"", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def homepage_log():\n\n return render_template('home_log.html')", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def __init__(self, log_dir):\n self.writer = SummaryWriter(log_dir)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def render_entry_log(self):\n self.render_log(self.selenium_testcase_entry_template)", "def appendLog(self):\n if self.logBuffer == None :\n self.logBuffer = \"Some header\\nhere\\n\\n\"\n self.logBuffer += \"\\tx\\ty\\ttheta : ul\\tur\\tt-neur\\n\";\n \n self.logBuffer += '%2.1f: %2.6f\\t %2.6f\\t %2.6f : ' % \\\n\t ( self.t, self.env.state[0], self.env.state[2], self.env.state[4] )\n self.logBuffer += '%1.3f\\t %1.3f \\t%1.2f \\t' % \\\n ( self.env.action[0], self.env.action[1], self.env.action[2] )\n self.logBuffer += 'Dst/Theta/Speed: \\t%f\\t%f\\t%f \\tF: %.2f \\n' % \\\n ( self.env.getDistance(), self.env.getOrientation(), self.env.getDistance(), self.getReward() )", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += \"\\r\\n[%s] \" % time.process_time()\n self.parent.log += string + \"\\r\\n\"", "def format_start(self):\n logging.info(\" itr h => cost set troom droom tout dout = t rwd\")\n logging.info(\" %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f\" % (\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n ))", "def create_report_logging():\n print(\"Creating report\")\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(filename=os.path.join(log_path, \"client_stream_report.log\"),\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\n logging.info(\"Performance of client-streaming: Average_size: %s bytes, average ingest time: %s seconds, ingestion_rate: %s byte/sec\" \"number _of_messages: %s\"\n %(performance[\"avg_size\"], performance[\"avg_ingest_time\"], performance[\"rate\"], performance[\"number_of_messages\"]))\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)", "def add_daily_summary(self):\n auth_date = self.report_date.strftime(\"%b %-d, %Y\")\n now = datetime.now().strftime(\"%x %X\")\n report_title = ' '.join([\n f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',\n f'(generated on {now} Central)'\n ])\n\n report_notes = [\n ['Notes:'],\n [f'Validation details on this sheet for {self.origin_value} participants only'],\n ['Checkbox validation currently only performed on GROR consents'],\n ['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']\n ]\n\n self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))\n # Add any explanatory text / details about the report that have been included in the layout\n self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),\n row_pos=self.row_pos + 1)\n\n if not self._has_needs_correcting(self.consent_df):\n self._add_text_rows(text_rows=[['No consent validation errors detected']],\n format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)\n\n # Daily summary counts for all the recently authored consents that were processed (regardless of errors)\n self._add_text_rows([['Total Consent Validation Counts']],\n format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)\n self._add_consent_issue_count_header_section(hpo='All Entities')\n self._add_consent_issue_counts(self.consent_df, show_all_counts=True)", "def write_report(self):\r\n self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')\r\n server_log.info('')\r\n server_log.info('=========================================================')\r\n server_log.info('All test clients completed!')\r\n server_log.info(' Start time: {}'.format(self.start_time))\r\n server_log.info(' End time: {}'.format(self.end_time))\r\n server_log.info('')\r\n server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))\r\n for client in self.client_list.values():\r\n server_log.info('---------------------------------------------------------')\r\n server_log.info(' Client {}'.format(client.client_id))\r\n server_log.info(' Test status: {}'.format(client.status))\r\n server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran)) \r\n server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))\r\n server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))\r\n server_log.info(' Files written: {}'.format(client.files_written))\r\n server_log.info(' File size: {}'.format(client.file_size))\r\n server_log.info(' Chunk size: {}'.format(client.chunk_size))\r\n server_log.info('=========================================================')\r\n server_log.info('')", "def logFile(self):\n\n event = 'stim'\n mStr = '{:013}'.format(self.mouse.tag) + '\\t'\n outPutStr = mStr + \\\n datetime.fromtimestamp(int(time())).isoformat(' ') + '\\t' + event\n print (outPutStr)\n if self.textfp != None:\n outPutStr = mStr + '{:.2f}'.format(time()) + '\\t' + event\n self.textfp.write(outPutStr + '\\n')\n self.textfp.flush()", "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def gReport(self, event):\n \n reports.createReports()", "def on_L1(self):\r\n self.log()", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def attach_request_log(response):\n allure.attach(\n dump.dump_all(response).decode(\"utf-8\"),\n name=\"Full request log\",\n extension=\"txt\",\n )", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def print_summary(options, warnings_errors, last_entry_time):\n # Too many statements - pylint: disable=R0915\n \n last_entry_ago = None\n require_force = False\n warnings_generated = False\n \n # calculate the timedelta of last log message\n if last_entry_time:\n last_entry_ago = datetime.now() - datetime.strptime(last_entry_time, '%Y-%m-%d %H:%M:%S.%f %Z')\n \n # check the syncmaster\n gpsyncmaster_running = check_gpsync_running(options)\n if not gpsyncmaster_running:\n require_force = True\n \n logger.info('-----------------------------------------------------')\n logger.info('Master data directory = %s' % options.master_data_dir)\n if options.logfile:\n logger.info('Log directory = %s' % options.logfile)\n logger.info('gpsyncmaster running = %s' % ('yes' if gpsyncmaster_running else 'no'))\n logger.info('Last log entry time = %s' % last_entry_time)\n if last_entry_ago:\n logger.info(' %s ago' % last_entry_ago)\n logger.info('Create new standby master = %s' % ('yes' if options.new_standby else 'no'))\n if options.new_standby:\n logger.info('New standby master host = %s' % options.new_standby)\n logger.info('Force standby activation = %s' % ('yes' if options.force else 'no'))\n logger.info('-----------------------------------------------------')\n if last_entry_ago > timedelta(minutes=LOG_TIME_THRESHOLD_MINS):\n logger.warning('The last log entry timestamp was over %d minutes ago.' % LOG_TIME_THRESHOLD_MINS)\n logger.warning('This indicates that the standby master is likely out of date.')\n require_force = True\n warnings_generated = True\n if len(warnings_errors) > 0:\n logger.warning('The following warnings/errors were found in the most recent log file:')\n for log_msg in warnings_errors:\n logger.warning(' %s' % log_msg)\n\n logger.warning('Greenplum has detected errors and/or warnings in your standby')\n logger.warning('master log file that indicate a problem with the synchronization process')\n logger.warning('between your primary and standby master hosts. Before activating your')\n logger.warning('standby master, it is critical to ensure that it is up to date with all')\n logger.warning('of the transactions currently committed to Greenplum Database. If you')\n logger.warning('activate a standby master that is not in sync with the transactional')\n logger.warning('state of the segments, you may introduce catalog and data')\n logger.warning('inconsistencies that will render your Greenplum Database instance')\n logger.warning('unusable. If your primary master is no longer available and you suspect')\n logger.warning('that you do not have an up-to-date standby master, contact Greenplum')\n logger.warning('Customer Support for further assistance.')\n logger.warning('It is also recommended that you make a backup of the standby master')\n logger.warning('data directory (%s) before continuing.' % options.master_data_dir)\n \n require_force = True\n warnings_generated = True\n # Check if we require a force\n if require_force and not options.force:\n logger.warning('If you wish to continue you must use the -f option to force')\n logger.warning('the activation process.')\n warnings_generated = True\n raise GpActivateStandbyException('Force activation required')\n if options.confirm:\n yn = ask_yesno(None, 'Do you want to continue with standby master activation?', 'N')\n if not yn:\n raise GpActivateStandbyException('User canceled') \n\n return warnings_generated", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def pytest_runtest_logreport(report):\n\n report_test_status(logger, report)", "def event_log(self):\n pass", "def logentry(self, string=None):\n if (self._OIFlogging):\n oiflogfile = open(self._commslogfilename, \"a\")\n oiflogfile.write(\"# \" + \"%04.6fs: \" % (self._gettime() - self._logstarttime) + string + \"\\n\")\n oiflogfile.flush()\n else:\n# if self._print_once:\n# self._print_once = 0\n# print self.hilite(\"Warning: Not logging OIF transactions. Use\\n it.logfile(<filename>) to set log filename and\\n it.logging(True) to enable logging\", False, True)\n print 'Unable to write log entry', string\n return", "def initialize_summary(self):\n if self.need_logs:\n self.summary_writer = tf.summary.create_file_writer(self.log_dir)\n if self.verbose > 0:\n full_log_path = os.path.abspath(self.log_dir)\n print('Initialize logs, use: \\ntensorboard --logdir={}'.format(full_log_path))", "def add_param_logs(self, logger):\n if self.config.log_fine_probs:\n plate = ET.SubElement(logger, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"log\", {\n \"idref\":\"featureLikelihood:%s:$(feature)\" % self.name})\n if self.rate_variation:\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRatePrior.s:%s\" % self.name})\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRateGammaScalePrior.s:%s\" % self.name})\n\n if self.rate_variation:\n plate = ET.SubElement(logger, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"log\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n # Log the scale, but not the shape, as it is always 1 / scale\n # We prefer the scale because it is positively correlated with extent of variation\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRateGammaScale:%s\" % self.name})", "def main():\n config_file = get_conf(get_config_name())\n if not config_file:\n sys.exit(1)\n log = get_last_file(config_file[\"LOG_DIR\"])\n MAIN_LOGGER.info(\"we've got log file named %s\", log.path)\n file_name = os.path.join(os.path.dirname(__file__), config_file['REPORT_DIR'],\n \"report-{}.html\".format(log.date))\n if os.path.exists(file_name):\n MAIN_LOGGER.info(\"%s already exists\", file_name)\n sys.exit()\n res = gen_parse_log(log, config_file['PERCENT_FAILS'])\n if not res:\n sys.exit(1)\n MAIN_LOGGER.info(\"log parsed\")\n report = []\n for _ in range(int(config_file[\"REPORT_SIZE\"])):\n try:\n report.append(next(res))\n except StopIteration:\n pass\n MAIN_LOGGER.info(\"report file name %s\", file_name)\n\n if report:\n save_report(report, config_file['TEMPLATE_FILE'], file_name)", "def __init__(self):\n s = \"{0}\\n{1:^150}\\n{0}\\n\".format(\"=\"*150, \"N E B I L A N D\")\n self.log(s)\n self.table_log(\"Iteration\", \"Datetime\",\n \"Event\", \"Entity Affected\", \"Extra Info\")\n self.log(\"-\"*150)", "def write_terraform_destroy_log_header(self):\n with open(self.terraform_destroy_log, 'a+') as logfile:\n logfile.write(\"*\" * 100)\n logfile.write(\"\\n*** Terraform Destroy Started ***\")\n logfile.write(\"\\nDateTime: %s\\n\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n logfile.write(\"*\" * 100)\n self.write_debug_log(K.TERRAFORM_DESTROY_STARTED)", "def log_lesson(request):\n user = User.objects.get(email=request.user.email)\n user_info = UserInformation.objects.get(user=user)\n lesson_set = user_info.current_lesson_set\n lesson = Lesson.objects.get(lesson_index=user_info.current_lesson_index)\n main_set = user_info.current_main_set\n\n print(\"lesson_logged\")\n\n lesson_to_log = LessonLog.objects.create(user=user,\n time_stamp=timezone.now(),\n lesson_set_key=lesson_set,\n lesson_key=lesson,\n lesson_index=lesson.lesson_index,\n main_set_key=main_set)\n lesson_to_log.save()", "def main(output, yesterday_option):\n if yesterday_option:\n path = output_yesterday()\n else:\n path = journal_today()\n\n edit_or_output(output=output, path=path)\n\n LOG.info('Otter Pilot journal reporting for duty!')", "def log_all(self):\n self.save_raw()\n self.log()", "def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f = open(filenameF, \"a+\") # Log saved in directory named logs located in same directory as this file\r\n \r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages)):\r\n # f.write('%s\\t\\t\\t' % self._voltages[i][0])\r\n # else:\r\n for i in range(len(self._voltages)):\r\n f.write('%s\\t\\t' % self._voltages[i][0])\r\n f.write('\\n')\r\n\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages[0])):\r\n # line = \"\"\r\n # for j in range(len(self._voltages)):\r\n # line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][2*i]) + \\\r\n # str(self._intensity[j][2*i + 1]) + '\\t'\r\n # f.write(line)\r\n # f.write('\\n')\r\n # else: \r\n for i in range(len(self._voltages[0])):\r\n line = \"\"\r\n for j in range(len(self._voltages)):\r\n line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][i]) + '\\t' \r\n f.write(line)\r\n f.write('\\n')", "def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()", "def main_log(logfile, entry, print_tag=False):\n if logfile != None:\n with open(logfile, 'a') as lf:\n lf.write('{}\\n'.format(entry))\n\n if print_tag:\n print entry", "def report(self, output_dir):", "def setup_logbook(name, extension='.txt', level=logging.INFO, soloDir = True):\n formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d (%(name)s) - %(message)s', datefmt='%d-%m-%y %H:%M:%S')\n date = datetime.today().strftime('%Y-%m-%d')\n if soloDir:\n log_path = str(settings.DATA_DIR + name + '/' + name.replace('_', '') +'_' + date + extension)\n else:\n log_path = str(settings.DATA_DIR + name +'_' + date + extension)\n handler = RotatingFileHandler(log_path, maxBytes=settings.MAX_FILE_SIZE, backupCount=1)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def startLogger(self):\n #------------------------------------------\n # Initialize logger\n log_level = getattr(logging, str(self.loglevel).upper())\n logging.basicConfig(filename=self.logfile,level=log_level, format=DEFAULT_LOG_FORMAT)\n logging.info(START_STRING)", "def initialize_reporting(self):\n reporting_params = self.reporting_params\n reporting_params[\"heartbeat_path\"] = self.result_paths[\"current_heartbeat\"]\n reporting_handler = ReportingHandler(**reporting_params)\n\n #################### Make Unified Logging Globally Available ####################\n G.log = reporting_handler.log\n G.debug = reporting_handler.debug\n G.warn = reporting_handler.warn", "def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)", "def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"", "def __log_trial__(self, trial_data):\n from klibs.KLDatabase import EntryTemplate\n\n trial_template = EntryTemplate('trials')\n trial_template.log(P.id_field_name, P.participant_id)\n for attr in trial_data:\n trial_template.log(attr, trial_data[attr])\n\n return self.database.insert(trial_template)", "def write_initial_summary_text(self):\n self._write_text_to_tensorboard(\n tag=\"MLRun\", text=self._generate_run_start_text(), step=0\n )", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def beforeTest(self, test):\n self.setupLoghandler()", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def log_schedule(self):\n self.logger.log_schedule(self.params.schedule)", "def __init__(self):\n self._logger = logging.getLogger(__name__)\n self.step_name = \"OpenFDA\"", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def make_ts_report(self):\n self.ts_report = ''\n if self.chosen_ts_method is not None:\n self.ts_report += 'TS method summary for {0} in {1}\\n'.format(self.label, self.rxn_label)\n self.ts_report += 'Methods that successfully generated a TS guess:\\n'\n if self.successful_methods:\n for successful_method in self.successful_methods:\n self.ts_report += successful_method + ','\n if self.unsuccessful_methods:\n self.ts_report += '\\nMethods that were unsuccessfully in generating a TS guess:\\n'\n for unsuccessful_method in self.unsuccessful_methods:\n self.ts_report += unsuccessful_method + ','\n self.ts_report += '\\nThe method that generated the best TS guess and its output used for the' \\\n ' optimization: {0}'.format(self.chosen_ts_method)", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def appendReport(self,text:str) -> None:\n #TODO add colorize text\n now = datetime.now().strftime('%H:%M:%S')\n self.report.config(state=NORMAL)\n self.report.insert(END,now+' '+text+'\\n')\n self.report.see(END)\n self.report.config(state=DISABLED)", "def getInitialTracingReport(userid, sitename, dsname, eventType, analysisJob, jobId, jobDefId, dn):\n\n if analysisJob:\n eventType = eventType + \"_a\"\n\n try:\n # for python 2.6\n import hashlib\n hash_pilotid = hashlib.md5()\n hash_userid = hashlib.md5()\n except:\n # for python 2.4\n import md5\n hash_pilotid = md5.new()\n hash_userid = md5.new()\n\n # anonymise user and pilot id's\n hash_userid.update(userid)\n hash_pilotid.update('ppilot_%s' % jobDefId)\n\n report = {'eventType': eventType, # sitemover\n 'eventVersion': 'pilot3', # pilot version\n 'protocol': None, # set by specific sitemover\n 'clientState': 'INIT_REPORT',\n 'localSite': sitename, # localsite\n 'remoteSite': sitename, # equals remotesite (pilot does not do remote copy?)\n 'timeStart': time(), # time to start\n 'catStart': None,\n 'relativeStart': None,\n 'transferStart': None,\n 'validateStart': None, \n 'timeEnd': None,\n 'dataset': dsname,\n 'version': None,\n 'duid': None,\n 'filename': None,\n 'guid': None,\n 'filesize': None,\n 'usr': hash_userid.hexdigest(),\n 'appid': jobId,\n 'hostname': '',\n 'ip': '',\n 'suspicious': '0',\n 'usrdn': dn,\n 'url': None,\n 'stateReason': None,\n }\n\n if jobDefId == \"\":\n report['uuid'] = commands.getoutput('uuidgen -t 2> /dev/null').replace('-',''), # all LFNs of one request have the same uuid\n else:\n report['uuid'] = hash_pilotid.hexdigest()\n\n if jobDefId != \"\":\n tolog(\"Using job definition id: %s\" % (jobDefId))\n\n # add DN etc\n tolog(\"Trying to add additional info to tracing report\")\n try:\n import socket\n report['hostname'] = socket.gethostbyaddr(socket.gethostname())[0]\n report['ip'] = socket.gethostbyaddr(socket.gethostname())[2][0]\n except Exception, e:\n tolog(\"!!WARNING!!2999!! Tracing report could not add some info: %s\" % str(e))\n\n tolog(\"Tracing report initialised with: %s\" % str(report))\n return report", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def add_component(to_add_report, html_report_path, to_add_log=None,\n html_log_path=None):\n html_file_obj = open(html_report_path, 'a')\n html_file_obj.write(to_add_report)\n html_file_obj.close()\n\n if html_log_path is not None:\n html_file_obj = open(html_log_path, 'a')\n html_file_obj.write('<hr/>'+to_add_log)\n html_file_obj.close()", "def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')", "def on_test_begin(self, logs=None):", "def initialize():\n if not os.path.isfile(WORK_LOG_FILENAME):\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writeheader()", "def __init__(self, api_path=None, log_path=None, log_level=\"DEBUG\"):\n\n # Construct the log path. \n if log_path:\n self.log_path = log_path\n else:\n defaultlog_path = \"~/Spirent/CTA/Logs/\"\n\n now = datetime.datetime.now()\n defaultlog_path += now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n defaultlog_path += \"_PID\"\n defaultlog_path += str(os.getpid())\n defaultlog_path = os.path.expanduser(defaultlog_path)\n \n # The environment variable overwrites the default path. \n self.log_path = os.getenv(\"CTA_LOG_OUTPUT_DIRECTORY\", defaultlog_path) \n\n self.log_path = os.path.abspath(self.log_path)\n self.logfile = os.path.join(self.log_path, \"cta_python.log\") \n\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # NOTE: Consider limiting the number of log directories that are created.\n # It would mean deleting older directories.\n\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - stc::get automationoptions -suppressTclErrors\n #16/05/18 11:03:53.717 INFO 3078268608 - user.scripting - return false\n #2016-05-19 14:05:56,382 UserID =mjefferson\n #2016-05-19 14:05:56,382 Log Level=INFO\n\n if log_level == \"CRITICAL\":\n log_level = logging.CRITICAL\n elif log_level == \"ERROR\":\n log_level = logging.ERROR\n elif log_level == \"WARNING\":\n log_level = logging.WARNING\n elif log_level == \"INFO\": \n log_level = logging.INFO\n else:\n # DEBUG is the default log level.\n log_level = logging.DEBUG \n \n logging.basicConfig(filename=self.logfile, filemode=\"w\", level=log_level, format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.Formatter(fmt='%(asctime)s.%(msecs)03d',datefmt='%Y/%m/%d %H:%M:%S')\n # Add timestamps to each log message.\n #logging.basicConfig()\n # The logger is now ready. \n\n logging.info(\"Spirent TestCenter Conformance Application Python API is starting up...\")\n logging.info(\"OS Type = \" + os.name)\n logging.info(\"API Path = \" + api_path)\n logging.info(\"UserID = \" + getpass.getuser())\n logging.info(\"Log Level = \" + logging.getLevelName(log_level)) \n logging.info(\"Current Path = \" + os.path.abspath(os.getcwd())) \n logging.info(\"Log Path = \" + self.log_path)\n\n # Instantiate the Tcl interpreter.\n self.tcl = Tcl()\n\n self.tcl.eval(\"lappend ::auto_path {\" + api_path + \"}\")\n\n logging.info(\"Tcl Version = \" + self.tcl.eval(\"info patchlevel\"))\n logging.info(\"Tcl ::auto_path = \" + self.tcl.eval('set ::auto_path'))\n logging.info(\"Loading the Spirent TestCenter Conformance Application in the Tcl interpreter...\")\n self.Exec(\"package require SpirentTestCenterConformance\")\n\n return", "def __init__(self):\n super(ForceBalanceTestResult,self).__init__()\n self.logger = forcebalance.output.getLogger('forcebalance.test.results')", "def display_report(request, **kwargs):\n\n #Getting the report of the tests \n try:\n outputStr = sidecar.events.test_logs(project_id=kwargs['project_id'])\n outputStr = outputStr.results\n except Exception, e:\n outputStr = \"Updating the logs...\"\n \n #Making the output\n context = {\n \"page_title\": _(\"Test Report\"),\n \"test_report\": outputStr\n }\n return render(request, 'rally_dashboard/events/view_report.html', context)", "def configure(base_path):\n\n log_path = os.path.join(\n base_path,\n 'logs',\n )\n current_time = datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n log_fmt = '%(asctime)s [%(threadName)-12.12s] [%(levelname)-3.4s] %(message)s'\n\n logging.basicConfig(\n level=logging.INFO,\n format=log_fmt,\n handlers=[\n TimedRotatingFileHandler(\n filename=f\"{log_path}/analysis-service.({current_time}).log\",\n encoding='utf-8',\n when=\"d\"\n ),\n logging.StreamHandler()\n ]\n )", "def _PrependTimeStamp(log_string):\n global _log_time\n if _log_time:\n return \"# %s: %s\" % (datetime.datetime.now().strftime(\"%m/%d/%y %H:%M:%S\"),\n log_string)\n else:\n # timestamp logging disabled\n return log_string", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def on_R1(self):\r\n self.log()", "def on_up(self):\r\n self.log()", "def logtool(self, action, **options):\n pass", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def _log(self, action: types.NestedArray) -> None:\n if self._logger is None:\n return\n self._logger.info('{}, {}, {}, {}, {}, {}, {}'.format(\n self._last_timestep.observation['STAGE'],\n self._last_timestep.observation['CHIPS'],\n self._last_timestep.observation['PLAYER_TOTAL'],\n self._last_timestep.observation['PLAYER_ACES'],\n self._last_timestep.observation['DEALER_TOTAL'],\n action,\n self._deck_distribution))", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger" ]
[ "0.6069437", "0.5960698", "0.59574604", "0.5930421", "0.5885275", "0.5810344", "0.57965165", "0.5733731", "0.5677679", "0.567086", "0.56483895", "0.5602861", "0.5598323", "0.5581187", "0.5545258", "0.5526509", "0.55119", "0.5511117", "0.5473258", "0.5438435", "0.54359597", "0.5406004", "0.54059684", "0.5400562", "0.53897744", "0.53851074", "0.5377885", "0.5372496", "0.53715295", "0.5361603", "0.53577167", "0.5335452", "0.5331451", "0.5311146", "0.5307584", "0.5298697", "0.52960354", "0.52786505", "0.5239366", "0.52381575", "0.52315915", "0.5225695", "0.5220764", "0.521383", "0.5190329", "0.5188071", "0.51763105", "0.5154318", "0.5153545", "0.515227", "0.51462305", "0.51435757", "0.5140165", "0.5133178", "0.51328295", "0.5129633", "0.5128549", "0.51230854", "0.5113613", "0.5111369", "0.5107872", "0.51058275", "0.51055163", "0.5105071", "0.50976247", "0.50945884", "0.50903046", "0.5081358", "0.5071599", "0.5065758", "0.5064538", "0.5064532", "0.50508994", "0.50501066", "0.50455767", "0.5039325", "0.50386405", "0.50308335", "0.50292003", "0.5027372", "0.50268483", "0.50262684", "0.50260544", "0.5025487", "0.50153935", "0.50075495", "0.50040406", "0.49967536", "0.499206", "0.49912676", "0.49872372", "0.49857572", "0.49777967", "0.49773124", "0.4976824", "0.49755526", "0.49731064", "0.4971881", "0.49706355", "0.49692982" ]
0.6250697
0
get HTTP protocol for this starter(http/https)
def get_http_protocol(self): if self.cfg.ssl: return "https" else: return "http"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol", "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'", "def getProtocol(self) -> str:\n ...", "def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'", "def protocol(self, code: str) -> str:\n return 'https'", "def scheme(self):\n return self.use_ssl and \"https\" or \"http\"", "def protocol(self):\n return helpers.get_protocol()", "def protocol(self) -> str:\n return pulumi.get(self, \"protocol\")", "def protocol(request):\n return request.param", "def protocol(self, code):\n return self.url.scheme", "def protocol(self):\n return self._host[CONF_PROTOCOL]", "def protocol(self):\n return self._config[\"security.protocol\"]", "def protocol(self):\n\n if '://' in self.host:\n scheme, host = self.host.split('://', 1)\n return scheme\n elif self.port == 21:\n return 'ftp'\n elif self.port == 22:\n return 'sftp'\n elif self.port == 990:\n return 'ftps'\n else:\n # Uncertain, assume FTP.\n return 'ftp'", "def get_protocol(url):\n result = re.search(r\"^https?://\", url)\n return result.group(0) if result else None", "def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")", "def getProtocol(self, _):\r\n return self._protocol", "def protocol(self):\n return self._protocol", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def get_protocol(binding_id):\n binding_to_protocol = {VID_TAXII_HTTP_10: \"http\", VID_TAXII_HTTPS_10: \"https\"}\n try:\n return binding_to_protocol[binding_id]\n except:\n raise ValueError(\"Unknown Protocol Binding ID %s\" % binding_id)", "def protocol(self) -> str:\n return self.__parameters.protocol", "def query_scheme(self):\n\n return 'https'", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def protocol(self) -> Optional[pulumi.Input[Union[str, 'Protocol']]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n self.protocol = self.protocol or \"http\"", "def protocol(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")", "def getHttp(self):\n return self._http", "def proxy_protocol(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def default_protocol(self):\n return \"dropbox://\"", "def protocol(self):\n self._recv_protocol()\n return self._protocol", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def protocol(self) -> str:\n return __name__", "def scheme(self) -> Optional[pulumi.Input[Union[str, 'HTTPSchemeType']]]:\n return pulumi.get(self, \"scheme\")", "def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError", "def in_protocol(self) -> str:\n return pulumi.get(self, \"in_protocol\")", "def get_server_url(self):\r\n default_port={'http': '80', 'https': '443'}\r\n environ = self.cgi_environment()\r\n if (environ.get('HTTPS') in ('on', 'ON') or\r\n environ.get('SERVER_PORT_SECURE') == \"1\"):\r\n # XXX this will currently never be true\r\n protocol = 'https'\r\n else:\r\n protocol = 'http'\r\n\r\n if 'HTTP_HOST' in environ:\r\n host = environ['HTTP_HOST'].strip()\r\n hostname, port = urllib.splitport(host)\r\n else:\r\n hostname = environ['SERVER_NAME'].strip()\r\n port = environ['SERVER_PORT']\r\n\r\n if port is None or default_port[protocol] == port:\r\n host = hostname\r\n else:\r\n host = hostname + ':' + port\r\n server_url = '%s://%s' % (protocol, host)\r\n if server_url[-1:]=='/':\r\n server_url=server_url[:-1]\r\n return server_url", "def default_protocol(self):\n return \"sftp://\"", "def test_http_get_kind(self):\n assert_equal(self.test_http.get_kind(), 'mphttp')", "def protocol(self):\n return self._info.next # pylint: disable=E1101", "def get_protocols(self):\r\n\r\n return None", "def test_get_protocol_with_more_than_one_value():\n request = Mock(\n headers={\"X-Forwarded-Proto\": \"https,http,http\"},\n protocol=\"http\",\n )\n expected = \"https\"\n protocol = get_browser_protocol(request)\n\n assert expected == protocol", "def _get_protocol_type(self):\n return self.__protocol_type", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def url(self):\n return 'http://%s:%d' % (self._host, self._port)", "def protocol(self) -> Optional[pulumi.Input[Union[str, 'GatewayRouteConfigProtocol']]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self):\n ...", "def http(self):\n return str(self._http_method)", "def ip_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_protocol\")", "def protocol(self) -> NetworkProtocol:\n if hasattr(self, \"_protocol\"):\n return self._protocol\n _args: list[Arg] = []\n _ctx = self._select(\"protocol\", _args)\n return _ctx.execute_sync(NetworkProtocol)", "def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)", "def trafficProtocol(self):\n #\n # TODO: Reimplement this if possible.\n #\n return client.trafficProtocol(self)", "def https(url):\n if url[:8] == 'https://':\n return url\n if url[:7] != 'http://':\n return False\n return 'https://' + url[7:]", "def _base_url(self):\n # URL Protocol\n proto = 'https' if self._ssl else 'http'\n\n # Device port number\n if self._port is None:\n port = 8080 if self._ssl else 8008\n else:\n port = self._port\n \n return f'{proto}://{self._address}:{port}/api/v1'", "def __find_protocol(self, url):\n match = self.__REGEX_SCHEMA.search(url)\n if match:\n protocol = match.group(0).split(':')[0]\n return protocol\n return None", "def getHost():", "def getHost():", "def _http(self):\n raise NotImplementedError(\"HTTP transport is not supported.\")", "def transportprotocol(self) :\n\t\ttry :\n\t\t\treturn self._transportprotocol\n\t\texcept Exception as e:\n\t\t\traise e", "def link(self):\n return 'http://{}:{}'.format(self.basic_url, self.port)", "def url_base(self):\n return 'http://%s:%d/' % (self.host, self.port)", "def base(self):\n\n if self.discovery_address:\n return ('http://%s:%s' % self.discovery_address_tuple)\n\n elif self.multicast_address:\n return ('http://%s:%s' % self.multicast_address_tuple)\n\n elif self.address == \"0.0.0.0\":\n return ('http://%s:%s' % self.address_tuple)\n\n else:\n return ('http://%s:%s' % self.address_tuple)", "def get_http():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n return http", "def authentication_protocol(transport_type):\n return \"ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking no'\"", "def get_highest_http(uri, https, upgrade=True):\n highest_http = '1.0'\n response_status = \"\"\n redirect = False\n location = \"\"\n port = 443 if https else 80\n use_https = https\n use_upgrade = upgrade\n host, path = get_host(uri)\n i_p = check_host_name(host)\n request_line = \"GET \"+ path +\" HTTP/1.1\\r\\n\"\n headers_line = \"Host: \"+ host+ \"\\r\\n\"\n\n upgrade_line = \"Connection: close\\r\\nUpgrade: h2c\\r\\n\\r\\n\" if not https \\\n else \"Connection: Close\\r\\nuser-agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US)\"+ \\\n \"AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.86 Safari/533.4\\r\\n\\r\\n\" #[3]\n\n h11_request = (request_line+headers_line+upgrade_line).encode()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n if https:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n ctx.set_alpn_protocols(['h2', 'http/1.1', 'http/1.0'])\n ssl_sock = ctx.wrap_socket(sock, server_hostname=host)\n sock = ssl_sock\n try:\n sock.settimeout(5)\n sock.connect((i_p, port))\n sock.settimeout(None)\n except socket.error:\n print(\"The socket can't seem to connect,\"+\n \"even though host name was resolved for the provided URI\")\n sys.exit()\n except socket.timeout:\n print(\"A timeout occured because the host failed to connect for 5 seconds\")\n if https:\n proto = sock.selected_alpn_protocol()\n if proto == 'h2':\n highest_http = '2.0'\n sock.close()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ctx = ssl.create_default_context()\n ctx.set_alpn_protocols(['http/1.1', 'http/1.0'])\n ssl_sock = ctx.wrap_socket(sock, server_hostname=host)\n sock = ssl_sock\n sock.connect((i_p, port))\n\n rec = send_and_recieve(sock, h11_request)\n sock.close()\n status_line = rec[0]\n response_headers = rec[1:]\n\n if highest_http != '2.0':\n highest_http = \"1.0\" if 'HTTP/1.0' in status_line else \"1.1\"\n if not https and '101' in status_line:\n highest_http = \"2.0\"\n\n\n if '200' not in status_line and '204' not in status_line and '205' not in status_line:\n if '302' in status_line or '301' in status_line:\n\n redirect = True\n\n for header in response_headers:\n if 'Location' in header:\n if 'https' in header:\n use_https = True\n redirect = True\n location = (header.split(\" \")[1])\n if location == uri:\n print(\"This site keeps redirecting to itself and returning 302's Something is wrong\")\n redirect = False\n break\n elif '101' in status_line:\n use_upgrade = False\n location = uri\n redirect = True\n elif '500' in status_line or '505' in status_line:\n print(\"Recieved a 5xx response from the server at location: \" + uri +\" exiting now...\")\n sys.exit()\n elif '404' in status_line:\n print(\"The specified host exists but the path \" + path + \" was not found\")\n sys.exit()\n else:\n print('An unexpected response status of ' +status_line.split(\" \")[1] +' was received from site \"' + uri +'\"')\n sys.exit()\n\n response_status = status_line.split(\" \")[1]\n tup = (\n response_status,\n response_headers,\n highest_http,\n redirect,\n location, use_https,\n use_upgrade\n )\n return tup", "def protocol_name(self):\n self._protocol_name = 'kerberos'\n return self._protocol_name", "def _get_local_endpoint():\n return \"https://%s:8446\" % socket.getfqdn()", "def extract_scheme(url):\n return urlsplit(url, \"http\").scheme", "def ip_protocol(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"ip_protocol\")", "def buildProtocol(self, addr):\n return _SSLServerProtocol(self)", "def v_protocol(self):\n return self._protocol", "def v_protocol(self):\n return self._protocol", "def supports_http_1_1():", "def tfcProtocol(contactString):\n args = urlsplit(contactString)[3]\n value = args.replace(\"protocol=\", '')\n return value", "def ip_protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_protocol\")", "def GetSchemeHostPort(environ):\n url = \"{0}://\".format(environ[\"wsgi.url_scheme\"])\n\n if environ.get(\"HTTP_HOST\"):\n url += environ[\"HTTP_HOST\"]\n else:\n url += environ[\"SERVER_NAME\"]\n\n if environ[\"wsgi.url_scheme\"] == \"https\":\n if environ[\"SERVER_PORT\"] != \"443\":\n url += \":{0}\".format(environ[\"SERVER_PORT\"])\n else:\n if environ[\"SERVER_PORT\"] != \"80\":\n url += \":{0}\".format(environ[\"SERVER_PORT\"])\n\n return url", "def switch_protocol(self):\n with self._lock:\n if self.protocol == 'rtmp':\n self._protocol = 'hls'\n else:\n self._protocol = 'rtmp'", "def provides_protocol(type_, protocol):\n return AdaptationManager.provides_protocol(type_, protocol)", "def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False", "def ip_protocol(self) -> str:\n protocol = f\"ipv{self.ip_address.version}\"\n\n log.debug(\"Host %s: IP protocol for paramiko is %s.\", self.host)\n return protocol", "def buildProtocol(addr):", "def protocol(self) -> typing.Optional[\"RedirectProtocol\"]:\n return self._values.get('protocol')", "def protocol(self) -> typing.Optional[\"RedirectProtocol\"]:\n return self._values.get('protocol')", "def test_url_add_missing_protocol(self):\n assert ct.url_add_missing_protocol(\"https://www.bad-actor.services/\") == \"https://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"http://www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\n \"www.bad-actor.services/\",\n default=\"https\") == \"https://www.bad-actor.services/\"", "def protocol_details(self) -> Optional[pulumi.Input['ServerProtocolDetailsArgs']]:\n return pulumi.get(self, \"protocol_details\")", "def protocol_details(self) -> Optional[pulumi.Input['ServerProtocolDetailsArgs']]:\n return pulumi.get(self, \"protocol_details\")", "def available_protocols(self):\n return [\"ssh://\", \"sftp://\"]", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def http_proxy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"http_proxy\")", "def http_proxy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"http_proxy\")", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def get_v3io_api_host():\n api = None\n if config.v3io_api:\n api = config.v3io_api\n\n # strip protocol\n if \"//\" in api:\n api = api[api.find(\"//\") + 2 :]\n\n # strip port\n if \":\" in api:\n api = api[: api.find(\":\")]\n\n # ensure webapi prefix\n if not api.startswith(\"webapi.\"):\n api = f\"webapi.{api}\"\n return api", "def http(self) -> Optional[pulumi.Input['HttpScaleRuleArgs']]:\n return pulumi.get(self, \"http\")" ]
[ "0.85699964", "0.8152591", "0.7916452", "0.74209046", "0.7311242", "0.7305693", "0.7218571", "0.7194539", "0.70896435", "0.69592017", "0.68593144", "0.68068993", "0.6778132", "0.6735765", "0.67253834", "0.65918314", "0.6591383", "0.6480884", "0.6470097", "0.64394146", "0.64071524", "0.6391711", "0.63792455", "0.63792455", "0.63594115", "0.6356992", "0.6356992", "0.6356992", "0.6356992", "0.6356992", "0.6347469", "0.6310512", "0.6281721", "0.6281721", "0.62649167", "0.6215847", "0.6161131", "0.6140467", "0.61353385", "0.6116976", "0.610478", "0.6094973", "0.6049937", "0.6048728", "0.6040472", "0.59896237", "0.5968323", "0.59664375", "0.5955863", "0.5924774", "0.5889206", "0.5877006", "0.5866716", "0.5847708", "0.5847696", "0.58461237", "0.5822375", "0.5820883", "0.5812515", "0.5809244", "0.5796524", "0.57925516", "0.579115", "0.579115", "0.57867515", "0.5745017", "0.57322705", "0.57321763", "0.57265943", "0.5704068", "0.5702585", "0.56913066", "0.5688481", "0.56878066", "0.5687605", "0.5687406", "0.56708074", "0.56605214", "0.56605214", "0.5653566", "0.5643089", "0.5634159", "0.5632778", "0.5604776", "0.560346", "0.5596735", "0.55883044", "0.5554609", "0.5540412", "0.5540412", "0.5530379", "0.5523982", "0.5523982", "0.55015194", "0.54986995", "0.5487175", "0.5487175", "0.5479135", "0.54791236", "0.5467665" ]
0.8308429
1
Check that starter instance is alive
def check_that_instance_is_alive(self): if not self.instance.is_running(): raise Exception(f"Starter instance is not running. Base directory: {str(self.basedir)}") if self.instance.status() == psutil.STATUS_ZOMBIE: raise Exception(f"Starter instance is a zombie. Base directory: {str(self.basedir)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_alive(self):\n pass", "def is_alive(self):\n return True", "def is_alive(self):", "def alive(self):\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self, site):\n try:\n return requests.get(site).status_code == 200\n except Exception:\n pass", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def is_alive(self):\n if self.health > 0:\n return True\n return False", "def is_alive(self):\n return self.alive", "def is_alive(self):\n return self.alive", "def KeepAlive(self) -> bool:", "def IsAlive(self, *args, **kwargs):\n pass", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True", "def isAlive(self):\n return self.is_alive()", "def is_alive(self):\n return self._is_alive", "def is_alive(self) -> bool:\n return self._main_thread.is_alive()", "def service( self ):\n\n self.alive = time.time()", "def alive(self):\n return self._thread is not None", "def isAlive(self):\n raise NotImplementedError", "def is_running(self) -> bool:\n return False", "def alive(p):\n return p.is_alive()", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def isalive():\n return 'alive'", "def is_alive(self):\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))", "def is_available_while_running(cls) -> bool:\n\n return True", "def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def ServerIsReady( self ):\n return self.ServerIsHealthy()", "def is_running(self):\n\t\treturn self in _running", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def isstarted():", "def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def is_ready() -> bool:\n return True", "def is_alive(self):\r\n return self._health_points > 0", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def running(self) -> bool:", "def test_alive():\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def is_alive(self):\n try:\n _ = self.CORE.title\n return True\n except SeleniumExceptions.WebDriverException:\n return False", "def is_working(self):\n if not self.__th:\n return False\n return self.__th.is_alive()", "def is_alive(addr, user):\n return _ssh_master_cmd(addr, user, 'check') == 0", "def is_alive(self):\n return (self.read_name() != '')", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def is_ready(self) -> bool:\n pass", "def should_keep_running(self):\n return len(self.party.active_users())", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "def healthy_service(self):\n return not self.service_currently_down and not self.service_recently_down", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def isAlive(self):\r\n # Just use connectionInit, that is our internal variable\r\n return self.connectionInit", "def IsStarted(self) :\n\t\t...", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def is_running(self):\n\t\treturn self._running", "def is_alive(self) -> bool:\n if self._thread is None:\n return False\n return self._thread.is_alive()", "def is_alive(self):\n ret = subprocess.call(\n shlex.split(\"ping -c 1 -W 2 %s\" % self.ip_address),\n stdout=open('/dev/null', 'w'),\n stderr=subprocess.STDOUT,\n )\n \n if ret == 0:\n return True\n else:\n return False", "def is_alive(self):\n\n return not self._stop.is_set()", "def check_heartbeat(self):\n return True", "def is_ready(cls):\n\n return False", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def alive(self):\n return self._thread.is_alive()", "def check_health(self):\n return defer.succeed(True)", "def running(self):\n\t\treturn self._start is not None", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def isAlive(self):\n return self.state", "def isAlive(self):\n return self.state", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def check_is_alive(self) -> bool:\n crew_alive = False\n for operator in self.__operators:\n if operator.is_alive:\n crew_alive = True\n break\n if crew_alive and self.health > self.MIN_HEALTH:\n self.__is_alive = True\n return True\n else:\n self.__is_alive = False\n return False", "def start_check(self):\n pass", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)", "def alive(self):\n return self._process.is_alive()", "def step_impl(context):\n log.info(\"====> Checking if the CME server is alive via SSH\")\n assert context.cme_session.is_server_alive(), \" ****> The server is not available via SSH\"", "def is_alive(self) -> bool:\n if self._loop_handler:\n return self._loop_handler.is_alive()\n else:\n return False", "def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def IsWiredUp(self):\n return self.wired.IsUp()", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def isAlive(self):\n return self._state.isAlive()", "def ensure_running_service_daemon(context):\n svc = Management()\n if svc._check_if_its_up(usutil.UBERSERVER):\n context.temp_service_daemon = None\n else:\n svc.start()\n context.temp_service_daemon = svc", "def online(self):\n return False", "def running(self):\n\n return can_connect_to(APIConsumer.host, APIConsumer.port)", "def keepalive(self) -> None:", "def _is_running(self):\n return self._run_state.is_running()" ]
[ "0.77166504", "0.7615171", "0.74846673", "0.7292565", "0.7082333", "0.69093466", "0.69083273", "0.687419", "0.6822564", "0.66718936", "0.66718936", "0.66681343", "0.6643307", "0.6634902", "0.6621218", "0.6621218", "0.66178095", "0.66134834", "0.6612024", "0.6602468", "0.6561023", "0.6549695", "0.6541095", "0.65271956", "0.65264034", "0.65168935", "0.6505408", "0.6487338", "0.6476037", "0.6471301", "0.64606595", "0.64424014", "0.6423399", "0.63939476", "0.6380195", "0.6356962", "0.6352248", "0.6350006", "0.63353467", "0.6332506", "0.6327823", "0.6318195", "0.63178116", "0.63132244", "0.63029873", "0.62952304", "0.62904507", "0.62904507", "0.62904507", "0.6283415", "0.6280577", "0.6278473", "0.62767553", "0.6266161", "0.62399817", "0.62282956", "0.62204146", "0.62102497", "0.62058073", "0.6199949", "0.61971945", "0.6193812", "0.6193792", "0.6189573", "0.6187789", "0.6179953", "0.617462", "0.6172146", "0.61684036", "0.61547357", "0.6135471", "0.6132213", "0.61319774", "0.6126149", "0.6117794", "0.60999334", "0.608224", "0.608224", "0.608224", "0.60670835", "0.60670835", "0.60658956", "0.6065311", "0.60633546", "0.60583895", "0.60540056", "0.6051667", "0.60514206", "0.60508144", "0.6049183", "0.6048486", "0.60475534", "0.6040684", "0.6039373", "0.6028873", "0.60167545", "0.600826", "0.5994151", "0.5994072", "0.59788555" ]
0.820519
0
check whether substring is present in the starter log
def check_that_starter_log_contains(self, substring: str): if self.count_occurances_in_starter_log(substring) > 0: return else: raise Exception( f"Expected to find the following string: {substring}\n in this log file:\n{str(self.log_file)}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_contains(self, s: str) -> bool:\n return len(list(filter(lambda str: s in str, self.logs))) > 0", "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def _is_substring(s1, s2):\n\treturn s1.find(s2) != -1", "def match_substring(self, str):\n if self.repo_relative_path.find(str) >= 0:\n return True\n\n if self.uuid:\n if (\"uuid://%s%s\" % (self.uuid, self.repo_relative_path)).find(str) >= 0:\n return True\n\n if self.url:\n if (self.url + self.repo_relative_path).find(str) >= 0:\n return True\n\n return False", "def issubstring(substring, string):\n return substring in string", "def search(self):\n if self.substring in [None, \"\"]:\n print(\"Invalid Value For Substring\")\n elif self.string in [None, \"\"]:\n print(\"Invalid Value For String\")\n elif len(self.substring) > len(self.string):\n print(\"Length of Substring Less Than String\")\n else:\n posn = self.comparison()\n if posn == -1:\n print(\" Substring Not Found :: Search Failed\")\n else:\n print(\" Substring Found at Position --> \", posn+1)", "def substring_match(recipe, word):\n if names_only:\n line = recipe.name\n else:\n line = str(recipe)\n\n if not case:\n word = word.lower()\n line = line.lower()\n\n return line.find(word) != -1", "def isSubString(string1, string2, minMatchLength = 0):\n return (True)", "def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring", "def isstringIs_substring(str1, str2):\r\n if str1 in str2:\r\n return True\r\n else:\r\n False", "def starts_with(text, substring):\n assert text.startswith(substring), \"%r doesn't start with %r\" % (text,\n substring)", "def dz_is_in(dz_string, substring):\n if substring not in dz_string:\n return 0\n else:\n return 1", "def count_occurances_in_starter_log(self, substring: str):\n number_of_occurances = self.get_log_file().count(substring)\n return number_of_occurances", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def is_in_log(self, regex, start=0):\n\n ex = re.compile(regex)\n for l in self.logs[start:]:\n if ex.search(l):\n logging.debug(\"Found '%s' in logs\", regex)\n return l\n\n logging.debug(\"Did not find '%s' in logs\", regex)\n return None", "def is_junk(substring):\n return len(substring.strip(' \\t\\0')) == 0 and len(substring) > 10", "def contains(strn, substr):\n try:\n strn.index(substr)\n return True\n except ValueError:\n return False", "def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False", "def val_starts_with(base_string, strings):\n for the_string in strings:\n if base_string.startswith(the_string):\n return True", "def dzs_are_in(dz_string, substring1, substring2):\n if substring1 not in dz_string:\n return 0\n elif substring2 not in dz_string:\n return 0\n else:\n return 1", "def isSubStringNoCase(string1, string2, minMatchLength = 0):\n return (True)", "def check_string(str_one, str_two):\n str_one = str_one.lower()\n str_two = str_two.lower()\n # print(str_one,str_two)\n if len(str_two) < len(str_one):\n return bool(re.search(str_two+'$',str_one))\n else:\n return bool(re.search(str_one+'$',str_two))", "def startswith(value, s):\n\n if not value: return False\n return value.find(s) == 0", "def has_prefix(cls, string1, string2):\n return len(cls.get_prefix(string1, string2)) > 0", "def test_get_substrings_standard(self):\n ans = self.sf.get_substrings()\n\n for substr in ans:\n self.assertTrue(substr.freq >= 2)\n self.assertTrue(substr.subr_saving() > 0)", "def has_substring(pattern, text):\n M = len(pattern)\n N = len(text)\n\n # create the LPS\n lps = [0] * M\n j = 0\n\n compute_lsp(pattern, M, lps)\n\n i = 0\n final_index = 0\n\n while (N - i) >= (M - j):\n if pattern[j] == text[i]:\n i += 1\n j += 1\n if j == M:\n # on Last index\n final_index = i - j\n j = lps[j - 1]\n\n elif i < N and pattern[j] != text[i]:\n\n if j != 0:\n j = lps[j - 1]\n else:\n i += 1\n\n return final_index", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def _check_logic_syntax(string):\n return logExp.matches(string)", "def match(self, head_str):\n\t\tif \"masscan\" in head_str.lower():\n\t\t\treturn True\n\t\treturn False", "def find_str_in_file(f: Path, s: str) -> bool:\n return f.read_text(encoding='utf-8').find(s) != -1", "def filter_line(line:str) -> bool:\n fails = is_short_sentence(line, MIN_LINE_LENGTH)\n\n return not fails", "def _recordIsWGS(recordStr) :\n lines = recordStr.split(\"\\n\")\n WGS_lines = [x for x in lines if x.startswith(\"WGS \")]\n if len(WGS_lines) == 1 :\n return WGS_lines[0]\n elif len(WGS_lines) == 0 :\n return False\n else :\n raise Exception(\"Several lines starting with \\\"WGS \\\" in a GenBank record\")", "def test_string(self, s):\n\n data = s.split(' ')\n\n origin = ' '.join(data[0:-1])\n if not origin:\n return False\n \n origin_hashed = self.hash_with_salt(origin)\n\n return origin_hashed == s", "def assertSubstringIn(self, substring, container, msg=None):\n result = any(substring in item for item in container)\n if not result:\n msg = self._formatMessage(\n msg, f'{substring} is not substring in {safe_repr(container)}'\n )\n self.fail(msg)", "def search(self, string):\n fid = open(os.path.join(self.output_path, \"%s.html\" % TEST_FILE_STEM), \"r\")\n found = False\n for line in fid.readlines():\n if re.search(string, line):\n found = True\n break\n fid.close()\n return found", "def stringcheck(self, rule, string):\n if not \"*\" in rule:\n return rule in string\n elif rule[0] == \"*\":\n return string.endswith(rule[1:])\n elif rule[-1] == \"*\":\n return string.startswith(rule[:-1])\n else:\n start, end = rule.split(\"*\")\n return string.startswith(start) and string.endswith(end)", "def check_suffix(custom_str: str) -> bool:\r\n\r\n if custom_str.startswith(\"-\"):\r\n return True\r\n if len(custom_str) < 4:\r\n custom_str = custom_str.lower()\r\n for c in ASCII_LOWER:\r\n if c in custom_str:\r\n return True\r\n return False", "def is_valid(self, qstr):\r\n pass", "def substring_in_list(s, varlist):\n if varlist is None:\n return False\n is_sub = False\n for v in varlist:\n if v in s:\n is_sub = True\n break\n return is_sub", "def test_evaluate_substring_of_expression(self):\n value = self.evaluate_common(\"substringof('tart','startswith')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"substringof('start','startswith')\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"substringof('t','startswith')\")\n self.assertTrue(value.value is True)\n # not case insensitive\n value = self.evaluate_common(\"substringof('W','startswith')\")\n self.assertTrue(value.value is False)\n try:\n value = self.evaluate_common(\"substringof(1,'3.14')\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"substringof('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def starts(str_, val_to_check):\n \n return (str_.startswith(val_to_check))", "def is_resolution_and_offset_str(x):\n if x.count('x') == 1 and x.count('+') == 2:\n return True\n return False", "def is_substring(text: str, elements: set) -> bool:\n for element in elements:\n if text in element:\n return True\n\n return False", "def __find_string_in_response(self, fullResponse, searchFor):\n check = True\n rawResponse = fullResponse;\n if \"result\" not in rawResponse.text:\n check = False\n else:\n responseJSON = rawResponse.json()\n length_responseJSON = len(responseJSON[\"result\"])\n for i in range(0,length_responseJSON,1):\n check = searchFor in responseJSON[\"result\"][i][\"first_name\"]\n if check == False:\n return check\n return check", "def _find_specie_in_str(specie, equation_str):\n check_length = len(specie) + 1\n found_specie = ' %s ' % specie in equation_str \\\n or equation_str[:check_length] == '%s ' % specie \\\n or equation_str[-check_length:] == ' %s' % specie \\\n\n return found_specie", "def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])", "def is_sd(sequence, stringency=\"medium\"):\n if \"T\" in sequence:\n sequence = sequence.replace(\"T\", \"U\")\n if stringency == \"broad\":\n variants = sd_variants_broad[4]\n else:\n variants = sd_variants_medium[4]\n for variant in variants:\n if variant in sequence:\n return True\n return False", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def sees(context, some_string):\n response_string = context.response.get_data(as_text=True)\n fail_msg = \"{} not in {}\".format(some_string, response_string)\n assert some_string in response_string, fail_msg", "def hasSuffix(self, s):\n node, off = self.followPath(s)\n if node is None:\n return False # fell off the tree\n if off is None:\n # finished on top of a node\n return '$' in node.out\n else:\n # finished at offset 'off' within an edge leading to 'node'\n return node.lab[off] == '$'", "def _ProcessSubstring(self, substring):\n if not substring:\n return\n stripped_substring = StripStartParens(substring)\n stripped_remaining = StripStartParens(self.remaining_string)\n if not stripped_remaining.startswith(stripped_substring):\n raise BadlySpecifiedTemplateError(\n 'string \"{}\" should be in string \"{}\"'\n .format(stripped_substring, stripped_remaining))\n self.remaining_string = self.remaining_string.split(\n stripped_substring, 1)[1]", "def is_item_substring(msg, i):\n if msg[i] == \"(\" and i + 4 <= len(msg):\n return msg[i:i + 4] == \"({})\"\n else:\n return False", "def _is_simple_match(topic: str) -> bool:\n return not (\"+\" in topic or \"#\" in topic)", "def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())", "def new_property_1(string):\n length = len(string)\n for offset in range(length-3):\n sub_string = string[offset:offset+2]\n if string.count(sub_string) > 1:\n return True\n return False", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def has_prefix_some(s, prefix_set):\n\tfor prefix in prefix_set:\n\t\tif s.find(prefix, 0) != -1:\n\t\t\treturn True\n\treturn False", "def topic_pattern_match(pattern):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics()\n topics = topic_metadata.topics\n filtered_topics = {key: value for key, value in topics.items() if contains_substring(key, pattern)}\n return len(filtered_topics) > 0", "def find_substring(pattern, target):\n # Eliminate trivial cases.\n n = len(target)\n m = len(pattern)\n if (not n or not m or m > n):\n return False\n #\n # Search by comparing hashes.\n pattern_hash = hash(pattern)\n for string_start in range(n - m + 1):\n string_end = string_start + m\n if pattern_hash == hash(target[string_start:string_end]):\n return True\n return False", "def title_contains(title_substring):\n title_substring = title_substring.encode('ascii')\n def f(win):\n t = conv(win.title)\n return title_substring in t\n return f", "def valid_start(start, lines):\r\n if start.isalpha(): # start word must be alphabetic\r\n if len(start) > 1: # start word must be larger than 1 character\r\n if start in lines: # start word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Start word not in list of words....please reenter\"\r\n else:\r\n return \"Start word must contain more than one letter....please reenter\"\r\n else:\r\n return \"Start word must contain only letters....please reenter\"", "def has_string(filepath, string):\n with open(filepath) as yaml_file:\n for line in yaml_file:\n if string.search(line):\n return True\n return False", "def timestamp_line(content):\n return re.match(r\"((\\d\\d:){2}\\d\\d),(\\d{3}) --> ((\\d\\d:){2}\\d\\d),(\\d{3})\", content) is not None", "def search_text_in_log_file(self, text) :\n try:\n with open(self.file_path_name, 'r') as searchfile:\n for line in searchfile:\n if text in line:\n return True \n return False \n except: \n print 'The log : ' + self.file_path_name + 'cannot be opened'", "def compare(self, substring, needle):\n if len(substring) < len(needle):\n return False\n for i in range(len(needle)): \n if substring[i] != needle[i]:\n return False \n return True", "def test_search_must_not_start_at_the_beginning(self):\n pattern = \"cde\"\n s = \"abcdefabcdef\"\n self.assertEqual(__, re.search(pattern, s).group())", "def is_resent(self):\n return self.unixtext.find(\"...RESENT\") > 0", "def test_extract_strings_with_rollout(should_index_tag_values, expected):\n outer_message = _construct_outer_message(\n [\n (counter_payload, []),\n (distribution_payload, []),\n (set_payload, []),\n ]\n )\n batch = IndexerBatch(UseCaseKey.PERFORMANCE, outer_message, should_index_tag_values, False)\n\n assert batch.extract_strings() == expected", "def find_log_lines_with_content(self, prefixes, needle_substring, required_count, return_first_line):\n required_count = int(required_count)\n\n def check_function():\n all_data = []\n for prefix in prefixes:\n print('Analyzing prefix {}'.format(prefix))\n all_files = self.get_files_in_bucket(prefix=prefix,\n limit=self.S3_LISTING_REQUEST_LIMIT)\n for file in all_files:\n print('Analyzing file {}'.format(file))\n content = self.get_file_content_from_bucket(file)\n for line in content.splitlines():\n if needle_substring in line:\n data = json.loads(line)\n all_data.append(data)\n if len(all_data) >= required_count and required_count != 0:\n return all_data\n\n if len(all_data) > 0 and required_count == 0:\n return all_data\n\n result = legion_test.utils.wait_until(check_function,\n self.WAIT_FILE_TIME, self.WAIT_FILE_ITERATIONS)\n if not result:\n raise Exception('{} log line(s) with {!r} has not been found'.format(required_count, needle_substring))\n if return_first_line:\n return result[0]\n else:\n return result", "def hasConstantForm(self, sentence):", "def test_evaluate_substring_expression(self):\n value = self.evaluate_common(\"substring('startswith',1,4)\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"tart\")\n value = self.evaluate_common(\"substring('startswith',1)\")\n self.assertTrue(value.value == \"tartswith\")\n try:\n value = self.evaluate_common(\"substring('startswith',1.0D,4)\")\n self.fail(\"double as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"substring('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def check_string(s, strings):\n for string in strings:\n if string not in s:\n return False\n return True", "def check_if_string_in_file(string_to_search):\n # Open the file in read only mode\n with open('MyFile.txt', 'r') as CityNames:\n # Read all lines in the file one by one\n for line in CityNames:\n # For each line, check if line contains the string\n if re.match(string_to_search,line):\n if len(string_to_search)==len(line.strip()):\n print(line,string_to_search)\n return True\n CityNames.close() \n return False", "def get_str_between_s_and_e(start_str, end_str, line):\n\tstart = line.find(start_str)\n\tif start >= 0:\n\t\tstart = start + len(start_str)\n\t\tend = line.find(end_str, start)\n\t\tif end >= 0:\n\t\t\treturn line[start:end].strip()\n\telse:\n\t\treturn None", "def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass", "def test_match_must_start_at_the_beginning(self):\n third_letter = \"c\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(third_letter, s))", "def isPrefix(string, dictionary):\n strLen = len(string)\n for word in dictionary:\n if word[:strLen] == string:\n return True\n return False", "def is_string_in_file(file_name, string_to_search):\n with open(file_name, 'r') as f:\n for line in f:\n if string_to_search in line:\n return True\n return False", "def is_interesting(x):\n if any(x.startswith(y) for y in (\"0000\", \"0004\", \"0400\", \"0404\")):\n return False\n # The TM can't leave a state once it gets to it.\n # Either it doesn't use that state, or it loops\n # Neither is TM we are interested in\n if \"0\" == x[0] == x[2] == x[4] or \"1\" == x[6] == x[8] == x[10]:\n return False\n return True", "def match_example():\n global example\n pattern = r'^[a-z]+$'\n return len(re.findall(pattern, example)) > 0", "def check(word):\n for i in range(1, target_length):\n if word[0:i] in fragments and word[i:] in fragments:\n print(\"%s + %s => %s\" % (word[0:i], word[i:], word))", "def check(self, line):\n if not isinstance(line, str):\n raise TypeError(\"Parameter 'line' not a 'string', is {0}\".format(type(line)))\n if line in self.contents:\n return line\n return False", "def test_match_can_find_longer_sequences_starting_at_beginning_of_string(self):\n first_three_letters = \"abc\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(first_three_letters, s).group())", "def version_is_full_release(version_string):\n match = VERSION_REGEX.match(version_string)\n\n if match and match.groupdict()[\"modifier\"] == \"\":\n return True\n else:\n return False", "def search_for_string(lst_str, stringy):\n if stringy in lst_str:\n return \"Found string\"\n\n else:\n return \"string not found\"", "def count_request_contains_str(sting_input):\n request_list = var_cache['local'].get_request_list()\n match_count = 0\n for url in request_list:\n if url.find(sting_input) > -1:\n match_count += 1\n return match_count", "def check_full(self, tax_str):\n\n taxa = [x.strip() for x in tax_str.split(';')]\n if len(taxa) < len(Taxonomy.rank_prefixes):\n self.logger.error('Taxonomy string contains too few ranks:')\n self.logger.error('%s' % str(taxa))\n return False\n elif len(taxa) > len(Taxonomy.rank_prefixes):\n self.logger.error('Taxonomy string contains too many ranks:')\n self.logger.error('%s' % str(taxa))\n return False\n\n for r, taxon in enumerate(taxa):\n if taxon[0:3] != Taxonomy.rank_prefixes[r]:\n self.logger.error('Taxon is not prefixed with the expected rank, %s.:' % Taxonomy.rank_prefixes[r])\n self.logger.error('%s' % str(taxa))\n return False\n\n return True", "def contains(self, searchstr: str):\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index < 0:\n return False\n return True", "def find_str(self, find_exp, where):\n found = False\n for item in where:\n if find_exp in str(item):\n self.assertTrue(True)\n found = True\n break\n if not found:\n self.assertTrue(False)", "def is_section(line: str) -> bool:\n return len(line) > 0 and (line[0] == '[' and line[len(line) - 1] == ']')", "def has_text(logfile: str, text: str) -> bool:\n time.sleep(0.10) # allow time for log to flush\n with open(logfile, encoding='utf-8') as fh:\n for line in fh.read().strip().split('\\n'):\n if text in line:\n break\n else:\n return False\n return True", "def _contains_synset(cq: str) -> bool:\n return re.search(r'\\[.*\\]', cq) is not None", "def check_event_occurrence(\n self, file_path, pass_string_regex, silent_discard=False\n ):\n if not check_if_string_in_file(file_path, pass_string_regex, re.DOTALL):\n if not silent_discard:\n assert False, \"Triggered Event was not found in the log\"\n else:\n return False\n else:\n return True", "def find_in_cddout(expected_string):\n for pos in range(0,len(cddout)):\n l = cddout[pos].strip();\n if l==expected_string: \n # must not assign to cddout in nested function\n for i in range(0,pos+1): \n cddout.pop(0)\n return True\n return False", "def check_strings(aword, anotherword):\n if aword == anotherword:\n return True\n else:\n return False", "def match_start_string(list_to_search, substring):\n # Whitespace is stripped before and after the substring,\n # but not within (e.g. \" New York City \" -> \"New York City\").\n clean_substring = substring.lstrip().rstrip().lower()\n items_found = []\n ([items_found.append(item) for item in list_to_search\n if clean_substring == item[:len(clean_substring)].lower()])\n return items_found", "def search(self, w: str) -> bool:\n if not w:\n return self.end\n return w[0] in self.d and self.d[w[0]].search((len(w) > 1 and w[1:]) or '')", "def is_dz_free(dz_string, substring=\"none\"):\n if substring not in dz_string:\n return 1\n else:\n return 0", "def __match_begin(string):\n pattern = '<h2 id=\"books-last30\">Top 100 EBooks last 30 days</h2>'\n r = re.compile(pattern)\n return r.search(string)", "def _is_start(self, line):\n if re.match(\".*\\:\\s*\\(groupid\", line):\n return True" ]
[ "0.68737906", "0.6771097", "0.6496623", "0.6410618", "0.63869303", "0.63129365", "0.61710656", "0.61576027", "0.6072304", "0.6054336", "0.6003412", "0.5982007", "0.596408", "0.576148", "0.5692559", "0.5674016", "0.5639905", "0.563524", "0.56121904", "0.56079257", "0.55621016", "0.5531923", "0.5531761", "0.5511909", "0.5508653", "0.5506425", "0.5497619", "0.54950774", "0.54804045", "0.5453835", "0.5451836", "0.54417497", "0.54353446", "0.542388", "0.54162645", "0.5406406", "0.5403949", "0.5403013", "0.53861624", "0.5385119", "0.5357979", "0.5355595", "0.5337102", "0.5327562", "0.53107804", "0.53074974", "0.53041065", "0.5301405", "0.5300156", "0.52989984", "0.52915823", "0.52835065", "0.52791125", "0.5278946", "0.5278017", "0.5269306", "0.5268749", "0.5266399", "0.5258091", "0.5257303", "0.5252629", "0.5247971", "0.52434075", "0.5231756", "0.52305627", "0.522866", "0.522481", "0.5201886", "0.5183006", "0.5175597", "0.5172223", "0.5160507", "0.5158431", "0.51429826", "0.51341873", "0.51276195", "0.5123874", "0.5117881", "0.511523", "0.5111891", "0.5111185", "0.51094943", "0.51069444", "0.51010144", "0.5099089", "0.5093792", "0.5093759", "0.50936526", "0.5092936", "0.50769764", "0.50754195", "0.50743973", "0.5072072", "0.5069626", "0.50672644", "0.5066599", "0.506167", "0.5057397", "0.5056707", "0.50487345" ]
0.82817847
0
count occurrences of a substring in the starter log
def count_occurances_in_starter_log(self, substring: str): number_of_occurances = self.get_log_file().count(substring) return number_of_occurances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_substring(string, sub_string):\n return string.count(sub_string)", "def count_sub(sub, s):\n count = 0\n for i in range(len(s) - len(sub) + 1):\n if s[i:i + len(sub)] == sub:\n count += 1\n return count", "def recCountString():\r\n target = raw_input(\"Enter target string: \")\r\n key = raw_input(\"Enter key string: \")\r\n matches = subStringMatchExact(target,key)\r\n print \"match(es) =\",matches", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def string_freq(self, query_str):\n found = self.search_prefix(query_str)\n # if query is found, go to that node\n if found:\n node = self.saved_node\n # extract relevant count that had been performed during insertion of words and traversal of nodes\n count = node.same_prefix_count\n else:\n return 0\n return count", "def countSubStringMatch(target,key):\n count = 0\n for i in range(0,len(target)-len(key)):\n if target[i:i+len(key)] == key:\n count += 1\n return count", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def check_that_starter_log_contains(self, substring: str):\n if self.count_occurances_in_starter_log(substring) > 0:\n return\n else:\n raise Exception(\n f\"Expected to find the following string: {substring}\\n in this log file:\\n{str(self.log_file)}\"\n )", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def countSubStringMatchRecursive(target,key,count):\r\n print target\r\n index = find(target,key)\r\n if index < 0 :\r\n return 0\r\n else :\r\n count += countSubStringMatchRecursive(target[index+len(key):len(target)+1],key,count)\r\n count += 1\r\n print count\r\n return count", "def numberOfSubstrings(self, s: str) -> int:\n i = 0\n res = 0\n d = {c:0 for c in 'abc'}\n \n for j, val in enumerate(s):\n d[val] += 1\n while all(d.values()):\n d[s[i]] -= 1\n i += 1\n res += i\n \n return res", "def occurrences(substring, string, sensitive=True):\n pos = -1\n o = []\n if not sensitive:\n substring = substring.lower()\n string = string.lower()\n while True:\n pos = string.find(substring, pos + 1)\n if pos == -1:\n return o\n else:\n o.append([pos, pos + len(substring)])", "def count_request_contains_str(sting_input):\n request_list = var_cache['local'].get_request_list()\n match_count = 0\n for url in request_list:\n if url.find(sting_input) > -1:\n match_count += 1\n return match_count", "def count(self):\n string_count = 0\n string = ['abc', 'xyz', 'aba', '1221']\n for elements in string:\n length = len(elements) \n if length >= 2:\n if elements[0] == elements[-1]: \n string_count +=1\n print(\"String count :\", string_count)", "def find_substring(string):\n sub_s = \"\"\n if len(string) == 1:\n print(1)\n for k in range(0, len(string) // 2):\n sub_s = sub_s[:k] + string[k]\n pos = 0\n next_pos = string.find(sub_s, pos + k + 1)\n count = 1\n while next_pos != -1 and next_pos == pos + k + 1:\n count += 1\n pos += k + 1\n next_pos = string.find(sub_s, pos + k + 1)\n if next_pos == -1 and pos == len(string) - k - 1:\n return count\n return 0", "def count_sub(dna, sub):\n sub_len = len(sub)\n dna_len = len(dna)\n count = 0\n\n # iterate over each char of the dna string\n for start in range(dna_len):\n # if we find a match for our substring - reset the current counter\n if dna[start:start+sub_len] == sub:\n tmp_count = 0\n # count how many consecutive occurrences we find\n while dna[start:start+sub_len] == sub:\n tmp_count += 1\n start += sub_len\n # update counter if we find a bigger number of consecutive occurrences\n if tmp_count > count:\n count = tmp_count\n # return the max number of consecutive occurrences\n return count", "def count_hi(str):\n return str.count(\"hi\")", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def prefix_freq(self, query_str):\n # if query input is empty, return all strings\n if query_str == '':\n return len(self.text)\n found = self.search_prefix(query_str)\n # if query is found, go to that node\n if found:\n node = self.saved_node\n # extract relevant count that had been performed during insertion of words and traversal of nodes\n count = node.prefix_count\n else:\n return 0\n return count", "def count(self, word):\n pass", "def kncount(self, string, prefixes=None): ###\n if prefixes == None:\n prefixes = list(self.dist(\"\").keys())\n return sum([self.count(p + string) >= 1 for p in prefixes])", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def _substring_occurrences(\n cls, in_str: str, substrings: Iterable[str]\n ) -> Dict[str, List[int]]:\n occurrences = {}\n for substring in substrings:\n occurrences[substring] = list(findall(substring, in_str))\n return occurrences", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def match_specific_string(input_data: list, keyword: str) -> int:\n number_of_words = 0\n for element in input_data:\n number_of_words += len(re.findall(keyword, element, re.IGNORECASE))\n return number_of_words", "def count_common_prefix(str_seq, prefix):\r\n\r\n count = 0\r\n for element in str_seq:\r\n if element.startswith(prefix):\r\n count += 1\r\n return count", "def count_abbas(str):\r\n i = 0\r\n count = 0\r\n for i in range(0, len(str)):\r\n if str.startswith(\"abba\", i):\r\n count += 1\r\n return count", "def count(words: list, string: str) -> int:\n count = 0\n for word in words:\n if word in string:\n count += 1\n return count", "def count(text):\n return len(text)", "def subtrace_count(self, trace, subtrace):\n if len(subtrace) == 0:\n return 0\n\n count = 0\n\n tr = list(map(lambda ac: self.activity_concept_name(ac), trace))\n\n for index in range(len(tr) - len(subtrace) + 1):\n slice = tr[index:index + len(subtrace)]\n\n if subtrace == slice:\n count += 1\n\n return count", "def __count_of_keyword_matches__(self, citation_string, key_string):\n matches = 0\n for keyword in self.dict_of_keywords.get(key_string):\n pattern = re.compile(keyword)\n if pattern.search(citation_string):\n matches += 1\n return matches", "def count(self, sub) -> int:\n pass", "def keyword_count(searches, doc):\n for search in searches:\n print \"\\\"{0}\\\": {1}\".format(search, len(re.findall(searches[search], doc)))", "def string_substring_count(\n word,\n letter,\n):\n count = 0\n index = 0\n while index < len(word):\n result = word.find(letter, index)\n if result != -1:\n count += 1\n index = result + 1\n else:\n index += 1\n\n return count", "def log(line):\n try:\n terms = line[:-1].split(' ')\n size[0] += int(terms[-1])\n code = int(terms[-2])\n if code in codes:\n codes[code] += 1\n except:\n pass", "def test_number_start_word():\n assert syllapy.count(\"4dog\") == 0", "def count(a, sub, start=0, end=None):\n return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))", "def get_matches_count():\n\n return ''\n \"\"\"\n TODO: count matches\n dtr5app_flag.sender\n dtr5app_flag.receiver\n dtr5app_flag.flag\n \"\"\"", "def count_words_and_dublicates(novel):", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def total_occurrences(word1, word2, flag):\n result = 0\n word1_length = len(word1)\n for i in range(word1_length):\n if word1[i] == flag:\n result += 1\n\n word2_length = len(word2)\n for i in range(word2_length):\n if word2[i] == flag:\n result += 1\n\n return result", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def freq(word, document):\n return document.split(None).count(word)", "def CountAt(url):\r\n return url.count('@')", "def ingsuffix(self):\n file = self.read1()\n count = 0\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i.endswith(\"ing\"):\n count = count + 1\n self.print(count)\n logging.debug(\"Starting with to\")\n return count", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def test_ababab():\n assert part_01.count_for('ababab', 2) == 0\n assert part_01.count_for('ababab', 3) == 1", "def word_count(phrase):\n return collections.Counter(phrase.split())", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def test_aabcdd():\n assert part_01.count_for('abbcdd', 2) == 1\n assert part_01.count_for('aabcdd', 3) == 0", "def string_num_matches(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.matches()", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def requests_count(regexp, data):\n requests_list = re.findall(regexp, data)\n return int(list(Counter(requests_list).values())[0])", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def vc_counter(str_sentence):\n\n # Filtering the vowels from param string\n vowels = \"\".join([str(c) for c in str_sentence if c in VOWELS])\n\n # Filtering the consonants from param string\n consonants = \"\".join([str(c) for c in str_sentence if c in CONSONANTS])\n\n # Returning dictionary with total values of filters\n return {\"total_vowels\": len(vowels), \"total_consonants\": len(consonants)}", "def part1(fname: str) -> int:\n return sum(len(set(''.join(group))) for group in get_data(fname))", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def count():", "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def count(s, value):\n total, index = 0, 0\n while index < len(s):\n element = s[index]\n if element == value:\n total += 1\n index += 1\n return total", "def longest_common_substring_backtrack(s1, s2, i, j, count):\n if i == len(s1) or j == len(s2):\n return 0\n\n if s1[i-1] == s2[j-1]:\n count = 1 + longest_common_substring_backtrack(s1, s2, i+1, j+1, count)\n\n else:\n count = max(count,\n max(longest_common_substring_backtrack(s1, s2, i+1, j, count),\n longest_common_substring_backtrack(s1, s2, i, j+1, count)))\n\n return count", "def count(word):\n\n return len(word)", "def test_suite():\n test(count(\"is\", \"Mississippi\") == 2)\n test(count(\"an\", \"banana\") == 2)\n test(count(\"ana\", \"banana\") == 2)\n test(count(\"nana\", \"banana\") == 1)\n test(count(\"nanan\", \"banana\") == 0)\n test(count(\"aaa\", \"aaaaaa\") == 4)", "def countdots(url): \r\n return url.count('.')", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def word_count(string):\n counts = dict()\n words = string.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return len(counts)", "def track_count(search_string):\n track_results = sp.search(search_string, type='track', limit=1, offset=0)\n return track_results['tracks']['total']", "def countOccurrences(self, wordsToCheck):\n count = 0\n for token in self.importantTokenList():\n w = token.text\n for wtc in wordsToCheck:\n if wtc == w:\n count = count + 1\n return count", "def occurences(words):\n\n\t# Add your code here\n\treturn", "def Counting(seq):\n\n #Scan the sequence, looking for motifs\n\n counting = {k: 0 for k in MOT} # Initialize the counting dictionary.\n # Scan all the motifs and find them in the sequence\n for motif in MOT:\n if len(seq) > len(motif): # Check if the sequence is longer than the motif itself.\n for i in range(len(seq)-len(motif)+1):\n if i == 0: # In case the motif is in the beginning of the sequence\n # print(\"start: \" + seq[i:i+len(motif)] + \" next nuc: \" + seq[i+len(motif)])\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0]: # Check if the next nucleotide is in not part of the motif.\n counting[motif] += 1\n elif i == len(seq)-len(motif): # In case the motif is in the end of the sequence\n \n if seq[i:i+len(motif)] == motif and seq[i-1] != motif[0]: # Check if the previuos nucleotide is in not part of the motif.\n counting[motif] += 1\n elif len(seq) > len(motif)+1: # In case the motif is in the middle of the sequence.\n # Check if the motif is not part of another motif (e.g. TT is in TTT).\n\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0] and seq[i-1] != motif[0]:\n counting[motif] += 1\n for nuc_nr in NUC_NR:\n counting[nuc_nr+\"_NR\"] = seq.count(nuc_nr)\n\n return counting", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices", "def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def main():\n string = input()\n count_ = 0\n for i in range(len(string)-2):\n if (string[i] == \"b\" and string[i+1] == \"o\" and string[i+2] == \"b\"):\n count_ = count_+1\n print(count_)", "def count_words(contain_first, first_letter, letter_set, trie_node, matched_words):\n for letter in trie_node.keys():\n if letter == \"$\" and contain_first:\n matched_words.append(trie_node['$'])\n else:\n if letter in letter_set:\n is_first = (letter == first_letter)\n count_words(contain_first or is_first, first_letter, letter_set, trie_node[letter], matched_words)", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def main ():\n fio = FileIo(\"../input2.txt\")\n text = fio.getInput()\n p = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n out = filter(None, p.split(text))\n #print out[2]\n #print len(out)\n wc = 0\n\n for s in out:\n text = nltk.word_tokenize(s)\n wc += wordCount( text )\n print wc", "def indapproxpattern(pattern, string, nummismatch):\n\n indarr = []\n# substringarr = []\n numchars = len(pattern)\n\n for i in xrange(0, len(string) - numchars + 1):\n \n substring = patterncount.subtext(string, i, numchars)\n \n if hammingdist(pattern, substring) <= nummismatch:\n \n indarr.append(i)\n# substringarr.append(substring)\n \n return indarr", "def score_seq(self, seq, verbose=False):\n score = 0.0\n count = 0\n # Start at third word, since we need a full context.\n for i in range(2, len(seq)):\n if (seq[i] == \"<s>\" or seq[i] == \"</s>\"):\n continue # Don't count special tokens in score.\n s = np.log2(self.next_word_proba(seq[i], seq[i-2:i]))\n score += s\n count += 1\n # DEBUG\n if verbose:\n print(\"log P({:s} | {:s}) = {.03f}\".format(seq[i], \" \".join(seq[i-2:i]), s))\n return score, count", "def count_exclamations(txt):\n count = 0\n for c in txt:\n if c == '!':\n count += 1\n return count", "def test_number_end_word():\n assert syllapy.count(\"dog123\") == 0", "def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result", "def count_phrase_in_text(phrase: str, text: str):\n count = 0\n # Remove leading and trailing white spaces\n phrase = phrase.strip()\n # Substitute multiple whitespace with single whitespace\n phrase = ' '.join(phrase.split())\n if text.startswith(phrase + \" \"):\n count += 1\n if text.endswith(\" \" + phrase + \"\\n\") or text.endswith(\" \" + phrase) or \\\n text.endswith(\" \" + phrase + \"\\r\\n\") or text.endswith(phrase):\n count += 1\n count += len(text.split(\" \" + phrase + \" \")) - 1\n return count", "def word_count(s):\n # Your code here\n\n stop_char = r\"\"\":;\",.-+=/|[]{|}()*^\\&\"\"\"\n\n # Make sure special characters arent in string\n s_clean = \"\".join([x for x in s if x not in stop_char])\n\n # Lower case and remove trailing space\n word_list = s_clean.lower().split()\n\n # use cache to hold memory\n word_count = {}\n\n for x in word_list:\n\n if x not in word_count:\n # if not there, start it at 0\n word_count[x] = 0\n\n # if seen again, increase count\n word_count[x] += 1\n\n return word_count", "def _word_counter(input_string: str) -> Counter:\n return Counter(re.findall(pattern='\\w+', string=input_string.lower()))", "def count_word(word, titles):\n word = word.lower()\n count = 0\n for title in titles:\n if word in title.lower():\n count += 1\n return count", "def test_getcount(inputstr, result):\n from vowel_count import getcount\n assert getcount(inputstr) == result", "def count_until(match_value):", "def beautifulBinaryString(binary_string) -> int:\n sub_str = \"010\"\n count = 0\n start_index = 0\n\n while start_index <= len(binary_string):\n end_index = start_index + 3\n slice = binary_string[start_index:end_index]\n\n if sub_str == slice:\n count += 1\n start_index = end_index\n else:\n start_index += 1\n\n return count", "def analyze(filename):\n\n start = datetime.datetime.now()\n found = 0\n new_ones = []\n\n # read file into a generator\n lines_generator = (line for line in open(filename, encoding=\"ISO-8859-1\"))\n\n # read generator into a list comprehension\n lists_generator = (l.split(\",\") for l in lines_generator)\n\n for line in lists_generator:\n if 'ao' in line[6]:\n found += 1\n lrow: List[str] = list(line)\n if lrow[5] > '00/00/2012':\n new_ones.append((lrow[5], lrow[0]))\n print(f\"'ao' was found {found}, times\")\n end = datetime.datetime.now()\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n # create yyyy from tuple, start at char(6) and grab to end of string\n # for each yyyy, add 1 yyyy if yyyy 2013-2017\n for new in new_ones:\n if new[0][6:] == '2013':\n year_count[\"2013\"] += 1\n if new[0][6:] == '2014':\n year_count[\"2014\"] += 1\n if new[0][6:] == '2015':\n year_count[\"2015\"] += 1\n if new[0][6:] == '2016':\n year_count[\"2016\"] += 1\n if new[0][6:] == '2017':\n year_count[\"2017\"] += 1\n if new[0][6:] == '2018':\n year_count[\"2017\"] += 1\n print(year_count)\n return start, end, year_count, found", "def count_occurrences(article_json, selected_word):\n selected_word = selected_word.lower()\n total_titles = 0 # some rows miss the title field, so not using len()\n selected_word_counter = 0\n for row in article_json:\n if 'title' in row:\n title = row['title']\n total_titles += 1\n for word_in_title in title.lower().split():\n if word_in_title == selected_word:\n selected_word_counter += 1\n return total_titles, selected_word_counter" ]
[ "0.69256353", "0.6731468", "0.6658297", "0.6571659", "0.6564363", "0.6540324", "0.6493924", "0.6485271", "0.64711386", "0.6334691", "0.63335073", "0.6273863", "0.626735", "0.6240192", "0.62162554", "0.6199468", "0.6196827", "0.61422545", "0.6131402", "0.6095801", "0.6036622", "0.6029487", "0.59793204", "0.5976236", "0.5971932", "0.5971555", "0.59228486", "0.5888577", "0.58613247", "0.58471304", "0.5845242", "0.58417225", "0.5808614", "0.5802501", "0.5774717", "0.57727957", "0.5763927", "0.57317036", "0.5717495", "0.5704525", "0.5697883", "0.56520504", "0.5636825", "0.5635224", "0.5622646", "0.5620614", "0.55908173", "0.5578399", "0.55719423", "0.55701", "0.5560399", "0.5559002", "0.5540094", "0.5534947", "0.5530215", "0.5529137", "0.5523375", "0.55128276", "0.5497573", "0.54946417", "0.54904425", "0.5475413", "0.5468348", "0.546529", "0.5460872", "0.5444928", "0.544183", "0.5425083", "0.54160756", "0.54110104", "0.5404877", "0.54047656", "0.5401771", "0.5399946", "0.5393775", "0.53814393", "0.5378436", "0.53716743", "0.5368505", "0.53599", "0.5359303", "0.53566843", "0.5351613", "0.5351488", "0.5351212", "0.53478366", "0.53470564", "0.53418374", "0.53336316", "0.53290415", "0.5325399", "0.5324575", "0.5320737", "0.5317953", "0.5316359", "0.53087837", "0.53020054", "0.53019756", "0.5297546", "0.5294821" ]
0.8001489
0
fake run starter method
def run_starter(self, expect_to_fail=False):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startTestRun(self):", "def test_get_run(self):\n pass", "def run(_):\n pass", "def Run():\r\n pass", "def runtest(self):", "def run_experiment():\n pass", "def run():\n main()", "def runTests(self):\n \n pass", "def test_run_started(self):", "def run_test(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def main():\n run_test_all()", "def custom():\n run(\"example\")", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def run(self, test, env):\n\n raise NotImplementedError", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def RUN(self):", "def run(self):\n \n pass", "def test_script(self) -> None:\n main()", "def run(self, **kwargs):", "def run(self):\r\n pass", "def run_tests(self):\n raise NotImplementedError", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def startTestHook(self):", "def init_run(self):\n raise NotImplementedError", "def main():\n tng.api.runner()", "def Run(self):\n pass", "def _initialise_run(self) -> None:", "def run(self): \r\n return", "def setup(self, run, run_id):\n\n raise NotImplementedError", "def complete_run():\n pass", "def _run(self):\n # We usually override this in __init__\n # pylint: disable=method-hidden\n return", "def run(ctx):\n pass", "def run_one_step(self):\n pass", "def main():\n pass", "def run(self, **kwargs):\n pass", "def test_run(self):\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5", "def run(self, *args, **kwargs):\n pass", "def run(self) -> None:\n log.critical('Not implemented')", "def test_create_run(self):\n pass", "def _run(self, *args, **kwargs):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run():\n # main(sys.argv[1:])\n main()", "def __main() :\n launchTests()", "def Run(self, args):\n pass", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run(self, args):\n pass", "def main(self):\r\n pass", "def main(self) -> None:\n pass", "def TestOneStep(self):\n pass", "def run(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError()", "def run(self):\n\t\t\n\t\tpass", "def run(self):\n self.run()", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def _run_scenario(self, cls, method_name, context, args, config):", "def test():\n pass", "def main(self, **kwargs) -> None:\n ...", "def test_run(self):\n with patch.object(ConfigedParser, 'pass_arg', new=self.mock_method):\n with patch.object(GyazoUploader, 'valid_credentials', new=self.mock_method2):\n with patch.object(GyazoUploader, 'set_required_args', new=self.mock_method):\n with patch('builtins.print') as mock_print:\n uploader = GyazoUploader(ConfigedParser())\n uploader.run('damian', 'notadirectory')\n mock_print.assert_called_with(\"Indicated directory can't be find!\")", "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):" ]
[ "0.74126184", "0.73602825", "0.7261935", "0.72602695", "0.72600466", "0.7242399", "0.72393954", "0.7110345", "0.71020657", "0.7089417", "0.70837325", "0.7037372", "0.7013169", "0.70076424", "0.6999183", "0.6999183", "0.6967566", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.69292164", "0.6920369", "0.688767", "0.68874156", "0.6872487", "0.68263054", "0.6823699", "0.68229914", "0.6818388", "0.68168193", "0.6791083", "0.677251", "0.676475", "0.67392194", "0.67291427", "0.6684374", "0.66724116", "0.666987", "0.66685957", "0.6664043", "0.66620404", "0.66591716", "0.66444314", "0.662221", "0.6613307", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.6610667", "0.66025525", "0.6592553", "0.6574488", "0.6570408", "0.6570408", "0.6570408", "0.6570408", "0.6570408", "0.6570408", "0.6570408", "0.6570408", "0.656208", "0.65472955", "0.6544529", "0.65391773", "0.6527885", "0.6527885", "0.6519248", "0.6500578", "0.64957464", "0.64894134", "0.6487737", "0.64863336", "0.6424018", "0.6423032", "0.6422818", "0.6422818", "0.6422818", "0.6422818", "0.6422818", "0.6422818", "0.6422818", "0.6422818", "0.6422818" ]
0.8099833
0
Test case for basketballteams_get
def test_basketballteams_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_id_get(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def test_teams_get_team_v1(self):\n pass", "def get_teams():", "def test_teams_list(self):\n pass", "def test_teams_read(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_teams_get_users_teams_v2(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_get_team_history(self):\n pass", "def test_gridironfootballplayers_get(self):\n pass", "def test_get_teams(self):\n owner2 = AnotherUserFactory(email_confirmed=True)\n owner3 = AnotherUserFactory(username='team owner 3', email='teamowner3@gmail.com', email_confirmed=True,)\n TeamFactory(owner=owner2, name='second team')\n TeamFactory(owner=owner3, name='third team')\n\n usual_user = UserFactory(\n username='usualuser',\n email='default@email.com',\n email_confirmed=True,\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n response = self.client.get(reverse('api:teams-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 3)", "def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_teams_get_users_teams_v1(self):\n pass", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def test_get_open_requests_by_team(self):\n pass", "def test_teams_create(self):\n pass", "def test_create_team(self):\n pass", "def test_update_team(self):\n pass", "def determine_basketball_outcome_from_api(market, params, enp_id):\n\n n_bet = 1\n outcome = None\n if market == BasketballMarkets.FULL_TIME_POINT_SPREAD:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n\n if selection == BasketballSelections.HOME_TEAM:\n hc_score = score_home + handicap\n if hc_score == score_away:\n outcome = 0\n elif hc_score > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n hc_score = score_away + handicap\n if hc_score == score_home:\n outcome = 0\n elif hc_score > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTPS bet should be ONE or TWO')\n\n elif market == BasketballMarkets.FULL_TIME_MONEYLINE:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n if selection == BasketballSelections.HOME_TEAM:\n if score_home == score_away:\n outcome = 0\n elif score_home > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n if score_away == score_home:\n outcome = 0\n elif score_away > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('selection should be ONE or TWO')\n elif market == BasketballMarkets.FULL_TIME_TOTAL_POINTS:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n score_total = score_home + score_away\n\n if selection == BasketballSelections.OVER:\n if score_total == handicap:\n outcome = 0\n elif score_total > handicap:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.UNDER:\n if score_total == handicap:\n outcome = 0\n elif score_total < handicap:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTTP bet should be OVER or UNDER')\n else:\n raise ValueError('implement more markets')\n\n return outcome, n_bet", "def get_people(team):", "def test_assign_managing_team(self):\n pass", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def test_get_player_battles(self):\n pass", "def test_teams_get_workgroups_v2(self):\n pass", "def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))", "def test_user_get_teams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/teams')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_get_waivers(league):\n pass", "def test_complete_teams_not_returned(self):\n args1 = {\n 'name': 'incomplete team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team1 = Team(args1)\n db.session.add(team1)\n db.session.commit()\n args2 = {\n 'name': 'finished team',\n 'capacity': '11',\n 'number_players': '11',\n 'pitch_postcode': 'E2 6LT',\n 'time': '2019-01-01 13:00'\n }\n team2 = Team(args2)\n db.session.add(team2)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'incomplete team', response.data)\n self.assertNotIn(b'finished team', response.data)", "def test_gridironfootballplayers_id_get(self):\n pass", "def test_get_request_only_from_one_team(self):\n another_user3 = AnotherUserFactory(username='anotheruser3', email='anotheruser3@gmail.com')\n another_user4 = AnotherUserFactory(username='anotheruser4', email='anotheruser4@gmail.com')\n another_user5 = AnotherUserFactory(username='anotheruser5', email='anotheruser5@gmail.com')\n another_user6 = AnotherUserFactory(username='anotheruser6', email='anotheruser6@gmail.com')\n another_team = TeamFactory(owner=another_user3,\n name='Soul Eaters',\n description='We`ll destroy all the souls. And the age of darkness will come')\n UserTeamRequestFactory(\n user=another_user4,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user5,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user6,\n team=another_team,\n )\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)", "def test_get_game(self):\n pass", "def get_baseball_data():\n\n baseball_data = [\n {\n 'team_name': 'Boston', 'wins': 103, 'losses': 47,\n 'wins_in_last_ten': 7, 'streak': 'W2'\n },\n {\n 'team_name': 'NY Yankees', 'wins': 91, 'losses': 58,\n 'wins_in_last_ten': 4, 'streak': 'L2'\n },\n {\n 'team_name': 'Tampa Bay', 'wins': 83, 'losses': 66,\n 'wins_in_last_ten': 8, 'streak': 'W3'\n },\n {\n 'team_name': 'Toronto', 'wins': 68, 'losses': 82,\n 'wins_in_last_ten': 5, 'streak': 'W3'\n },\n {\n 'team_name': 'Baltimore', 'wins': 43, 'losses': 107,\n 'wins_in_last_ten': 2, 'streak': 'L1'\n },\n {\n 'team_name': 'Cleveland', 'wins': 83, 'losses': 66,\n 'wins_in_last_ten': 4, 'streak': 'L1'\n },\n {\n 'team_name': 'Minnesota', 'wins': 69, 'losses': 81,\n 'wins_in_last_ten': 5, 'streak': 'W2'\n },\n {\n 'team_name': 'Detroit', 'wins': 61, 'losses': 89,\n 'wins_in_last_ten': 4, 'streak': 'L1'\n },\n {\n 'team_name': 'Chi White Sox', 'wins': 59, 'losses': 90,\n 'wins_in_last_ten': 3, 'streak': 'L1'\n },\n {\n 'team_name': 'Kansas City', 'wins': 52, 'losses': 98,\n 'wins_in_last_ten': 6, 'streak': 'L2'\n },\n {\n 'team_name': 'Houston', 'wins': 94, 'losses': 56,\n 'wins_in_last_ten': 7, 'streak': 'L1'\n },\n {\n 'team_name': 'Oakland', 'wins': 90, 'losses': 60,\n 'wins_in_last_ten': 7, 'streak': 'L2'\n },\n {\n 'team_name': 'Seattle', 'wins': 83, 'losses': 67,\n 'wins_in_last_ten': 5, 'streak': 'W1'\n },\n {\n 'team_name': 'LA Angels', 'wins': 74, 'losses': 76,\n 'wins_in_last_ten': 6, 'streak': 'W1'\n },\n {\n 'team_name': 'Texas', 'wins': 64, 'losses': 86,\n 'wins_in_last_ten': 3, 'streak': 'L2'\n }\n ]\n\n return baseball_data", "def test_get_team_strength(self):\n pass", "def test_cyclingleagues_get(self):\n pass", "def get_matches_with_teams():\n\tf = open(\"raw_tba.json\")\n\tjsonvar = json.loads(f.read())\n\n\treturn_val = []\n\tfor i in jsonvar:\n\t\t# print i\n\t\tif \"score_breakdown\" in i and i[\"score_breakdown\"] != None:\n\t\t\treturn_val.append(FullTBAMatch(i))\n\n\treturn return_val", "def test_get_boat(self):\n pass", "def get_available_companies(team):", "def test_get_requests_for_team_by_owner(self):\n\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)", "def getTeam(self):\n return [\"The A-Team\", \"some other bloke\"]", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def test_response_for_getting_all_users(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.status_code, 200)", "def test_length_of_teammates_list(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.data, {'status': 200, 'data': []})", "def test_success(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n\n assert response.status_code == status.HTTP_200_OK\n\n response_team = response.json()['results'][0]\n team = Team.objects.get(id=response_team['id'])\n\n assert response_team == get_expected_data_from_team(team)", "def get_info_from_api(team_name):\n if \"-\" in team_name:\n team_name = team_name.replace(\"-\", \"+\")\n if \"brighton\" in team_name: # some teams has different names than in sofa-score\n team_name = \"brighton\"\n if \"leicester\" in team_name:\n team_name = \"leicester\"\n if \"norwich\" in team_name:\n team_name = \"norwich\"\n if \"mallorca\" in team_name:\n team_name = \"mallorca\"\n if \"parma\" in team_name:\n team_name = \"parma+calcio\"\n if \"bayern\" in team_name:\n team_name = \"bayern\"\n if \"koln\" in team_name:\n team_name = \"fc+koln\"\n if \"union+berlin\" in team_name:\n team_name = \"union+berlin\"\n if \"fsv+mainz\" in team_name:\n team_name = \"mainz\"\n if \"hoffenheim\" in team_name:\n team_name = \"hoffenheim\"\n if \"mgladbach\" in team_name:\n team_name = \"borussia+monchengladbach\"\n if \"schalke\" in team_name:\n team_name = \"schalke\"\n if \"leverkusen\" in team_name:\n team_name = \"leverkusen\"\n if \"paderborn\" in team_name:\n team_name = \"paderborn\"\n print(team_name)\n response = requests.get(cfg.API_URL + team_name)\n team_data = json.loads(response.text)\n return team_data['teams'][0]", "def test_get_urls():\n\n year = \"2018\"\n week = \"1\"\n\n assert ff_team.get_game_urls(year, week).get(\n 'Atlanta Falcons') == \"https://www.pro-football-reference.com/boxscores/201809060phi.htm\"", "def test_get_teams_in_year_names():\n assert sorted(gtiy(2008)) == sorted(team_2008)\n assert sorted(gtiy(2009)) == sorted(team_2009)\n assert sorted(gtiy(2010)) == sorted(team_2010)\n assert sorted(gtiy(2011)) == sorted(team_2011)\n assert sorted(gtiy(2012)) == sorted(team_2012)\n assert sorted(gtiy(2013)) == sorted(team_2013)\n assert sorted(gtiy(2014)) == sorted(team_2014)\n assert sorted(gtiy(2015)) == sorted(team_2015)\n assert sorted(gtiy(2016)) == sorted(team_2016)\n assert sorted(gtiy(2017)) == sorted(team_2017)", "def test_teams_partial_update(self):\n pass", "def test_teams_get_workgroups_v1(self):\n pass", "def test_user_get_team_page():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/team/2')\n assert r.status_code == 200\n destroy_ctfd(app)", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def test_get_league_leaders___goaltending(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___goaltending(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)", "def test_add_team_member(self):\n pass", "def get_fb_team_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball team rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name FROM team\")\n teams = cursor.fetchall()\n\n for team_id, team_name in teams:\n cursor.execute(\"SELECT fb_team_rating FROM \\\nteam WHERE team_id = {0}\".format(team_id))\n team_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(team_rating[0]))\n mu, sigma = cursor.fetchall()[0]\n\n team_rank = float(mu) - (3 * float(sigma))\n\n # get player_ids\n cursor.execute(\"SELECT player from player_team_xref \\\nWHERE team = {0}\".format(team_id))\n players = cursor.fetchall()\n player_one = players[0]\n player_two = players[1]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_one[0]))\n player_one_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_two[0]))\n player_two_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_winner = {0} AND defense_winner = {1}) OR (offense_winner = {1} \\\nAND defense_winner = {0})\".format(player_one[0], player_two[0]))\n team_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_loser = {0} AND defense_loser = {1}) OR (offense_loser = {1} \\\nAND defense_loser = {0})\".format(player_one[0], player_two[0]))\n team_loss_count = cursor.fetchone()[0]\n\n intermediate_rank = (team_name, round(team_rank, 4),\n team_win_count, team_loss_count, player_one_name,\n player_two_name)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def test_get_player_stats_from_game():\n team = \"Titans\"\n year = \"2018\"\n week = \"1\"\n expected_team_stats = {}\n\n assert ff_team.get_player_stats_from_game(\n team, year, week) == expected_team_stats", "def test_team_template_folders_id_team_get(self):\n pass", "def get_companies_and_people(team):", "def test_teams_invite_member(self):\n pass", "def getMergedTeam(self):\n response_obj = dict(\n data=\"\",\n err=\"\",\n code=200\n )\n git_repos_data = []\n bit_repos_data = []\n GIT_TEAM_URI = config.GITHUB_API_ENDPOINT + \"/orgs/{team_name}\".format(team_name=self.team_name)\n git_team_info = self._json_request(GIT_TEAM_URI)\n GIT_REPOS_URI = git_team_info.get('repos_url')\n if GIT_REPOS_URI:\n self.repo = GIT_REPOS_URI\n git_repos_data = self._json_request(GIT_REPOS_URI)\n else:\n print('Cant find any information for this team on github') # you can log some thing here \n \"\"\" or just throw an json error = Not Found\n response_obj.update({\n 'code': 404,\n 'err': 'Cant find any information for this team on github'\n })\n return response_obj\n \"\"\"\n\n # There is a pagination in API. lets do some fix\n BIT_REPOS_URI = config.BITBUCKET_API_ENDPOINT + \"/repositories/{team_name}\".format(team_name=self.team_name)\n if BIT_REPOS_URI:\n bit_repos_data = self._recersive_request(BIT_REPOS_URI)\n else:\n print('Cant find any information for this team on bitbucket') # you can log some thing here\n \"\"\" or just throw an json error = Not Found\n response_obj.update({\n 'code': 404,\n 'err': 'Cant find any information for this team on bitbucket'\n })\n return response_obj\n \"\"\"\n merged_data = self.getMergedObj(git_team_info, git_repos_data, bit_repos_data)\n response_obj.update (merged_data)\n\n return response_obj", "def test_findbymembers(self):\n p1, p2, p3 = self.create3persons()\n model.Team(name='Tigers', persons=[p1, p2, p3]).store()\n model.Team(name='Lions', persons=[p1,p2]).store()\n model.Team(name='Snakes', persons=[p2, p3]).store()\n\n teams = model.Team.find_teams_by_members(p1) # find all teams with p1 (2)\n self.assertEqual(len(teams), 2)", "def test_get_requests_for_team_by_user(self):\n\n token = Token.objects.get(user=self.another_user1)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def yield_team(self) -> str: # pragma: no cover", "def test_teams_save_team_v1(self):\n pass", "def test_update_team_state(self):\n pass", "def getAllTeams(self):\n return []", "def test_get_game_boxscore(self):\n pass", "def get_available_companies_and_people(team):", "def test_user_list_get_success(client, user1, team1):\n\n team1.members.append(user1)\n DB.session.add(team1)\n DB.session.commit()\n\n response = client.get(\n \"/users\",\n headers={\n \"Accept\": \"application/vnd.api+json\",\n \"Authorization\": f\"Bearer {create_access_token(identity=user1.id)}\",\n },\n )\n team_membership = user1.team_memberships[0]\n assert response.status_code == 200\n assert get_content_type(response) == \"application/vnd.api+json\"\n assert json.loads(response.data.decode()) == {\n \"links\": {\"self\": \"http://localhost/users\"},\n \"data\": [\n {\n \"type\": \"users\",\n \"id\": user1.id,\n \"attributes\": {\n \"created_at\": user1.created_at.isoformat(),\n \"updated_at\": user1.updated_at.isoformat(),\n \"is_active\": True,\n \"first_name\": user1.first_name,\n \"last_name\": user1.last_name,\n \"username\": user1.username,\n \"email\": user1.email,\n \"visibility\": user1.visibility,\n },\n \"relationships\": {\n \"team_memberships\": {\n \"data\": [{\"type\": \"team_memberships\", \"id\": team_membership.id}]\n },\n \"teams\": {\"data\": [{\"type\": \"teams\", \"id\": team1.id}]},\n },\n \"links\": {\"self\": f\"http://localhost/users/{user1.id}\"},\n }\n ],\n \"included\": [\n {\n \"type\": \"team_memberships\",\n \"id\": team_membership.id,\n \"attributes\": {\"user_id\": user1.id, \"team_id\": team1.id},\n \"links\": {\n \"self\": f\"http://localhost/team_memberships/{team_membership.id}\"\n },\n },\n {\n \"type\": \"teams\",\n \"id\": team1.id,\n \"attributes\": {\"name\": team1.name},\n \"links\": {\"self\": f\"http://localhost/teams/{team1.id}\"},\n },\n ],\n }", "def test_team_view(self):\n with self.app.app_context():\n u = user(save=True)\n t = team(users=[u], save=True)\n\n response = self.client.get('/team/%s' % t.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/team/not-a-real-team')\n eq_(response.status_code, 404)", "def test_get(self):\n john_gamer = Gamer(self.john)\n john_gamer.gamer.set_new_location(50, 50)\n john_gamer.tasks.start(1)\n\n # in game field, in game, 2 tasks started:\n user_1 = ActiveUser.create()\n gamer_1 = Gamer(user_1)\n gamer_1.gamer.set_new_location(45, 40)\n gamer_1.tasks.start(1)\n gamer_1.tasks.start(2)\n\n # in game field, in game, 1 task started:\n user_2 = ActiveUser.create()\n gamer_2 = Gamer(user_2)\n gamer_2.gamer.set_new_location(55, 60)\n gamer_2.tasks.start(3)\n\n self.client.force_login(self.john)\n\n resp = self.client.get(self.URL)\n\n with self.subTest(\"Test status is correct\"):\n self.assertEqual(\n resp.status_code, status.HTTP_200_OK,\n )\n\n with self.subTest(\"Test response is not empty\"):\n # as we tested 'show_game_field' method in details, here we run smoke test only:\n self.assertNotEqual(\n resp.json(),\n []\n )", "def teams():\n print 'Getting Teams'\n\n substring = \"%\" + request.args.get('t') + \"%\"\n\n team_list = datastore.get_teams_typeahead(engine, substring, max_teams=10)\n\n print 'Teams:', team_list\n return jsonify(team_list)", "def test_brains_get(self):\n pass", "def test_get_teams_in_year_len():\n assert len(gtiy(2008)) == 8\n assert len(gtiy(2009)) == 8\n assert len(gtiy(2010)) == 8\n assert len(gtiy(2011)) == 10\n assert len(gtiy(2012)) == 9\n assert len(gtiy(2013)) == 9\n assert len(gtiy(2014)) == 8\n assert len(gtiy(2015)) == 8\n assert len(gtiy(2016)) == 8\n assert len(gtiy(2017)) == 8", "def test_get_goals(self):\n pass", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def get_all_teams(group):\n base_url = 'http://worldcup.kimonolabs.com/api/teams'\n url = (base_url + '?apikey={key}&group={group}&sort={sort}'\n .format(group=group,\n key='KERbxAUfDYovbQnn9pR3pbLWEMRp47AQ',\n sort='groupRank'))\n r = requests.get(url)\n return r.json()", "def test_handle_list_no_teams(self):\n self.db.query.return_value = []\n self.assertTupleEqual(self.testcommand.handle(\"team list\", user),\n (\"No Teams Exist!\", 200))", "def test_old_teams_not_returned(self):\n args1 = {\n 'name': 'new team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team1 = Team(args1)\n db.session.add(team1)\n db.session.commit()\n args2 = {\n 'name': 'old team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E2 6LT',\n 'time': '2010-01-01 13:00'\n }\n team2 = Team(args2)\n db.session.add(team2)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'new team', response.data)\n self.assertNotIn(b'old team', response.data)", "def test_time_league(self):\n result = self.test_client.league\n\n assert isinstance(result, dict)", "def _validteams(self):\n db_filename = self.registryValue('dbLocation')\n with sqlite3.connect(db_filename) as conn:\n cursor = conn.cursor()\n query = \"select team from mlb\"\n cursor.execute(query)\n teamlist = []\n for row in cursor.fetchall():\n teamlist.append(str(row[0]))\n\n return teamlist", "def test_returns_200_if_user_team_member(self):\n # Arrange\n # Create a team and add user to it\n test_team = create_canned_team()\n add_user_to_team(\n test_team, self.test_user, TeamMemberFunctions.MEMBER.value, True\n )\n # Assign team to project\n assign_team_to_project(\n self.test_project, test_team, TeamRoles.PROJECT_MANAGER.value\n )\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n TestGetProjectsRestAPI.assert_project_response(\n response.json, self.test_project, assert_type=\"notasks\"\n )", "def test_get_league_leaders___skaters(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___skaters(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)", "def test_get_one_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='teammate@bar.com',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], teammate.uid)", "def test_get2(self):\n pass", "def get_team(self):\n try:\n team_id = self.request.GET.get('team')\n if team_id is not None:\n team_id = int(team_id)\n return self.get_available_teams().get(pk=team_id)\n return self.get_available_teams().latest()\n except (Team.DoesNotExist, ValueError):\n return None", "def test_get_all_boats(self):\n pass", "def teams(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'teams')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def test_create_new_team(self):\n default_user = AnotherUserFactory(email_confirmed=True)\n token = Token.objects.get(user=default_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Team.objects.filter(name=data['name']).exists())", "def test_get(self):\n pass", "def test_get_sprint_dates_success(self):\n team = \"swen90013-2020-sp\"\n response = self.client.get('/api/v1/jira/' + team + '/issues_per_sprint')\n #print(response.json()[\"data\"])\n self.assertEqual(response.json()[\"code\"], RespCode.success.value.key, \"response is not success\")", "def test_team_access(self):\n self.client.login(username='bob@example.org', password=\"password\")\n\n #a url to make a request to alice's stuff using Bobs's credential who is a team member\n url = \"/accounts/switch_team/alice\"\n response = self.client.get(url)\n self.assertNotEqual(response.status_code, 403)\n self.assertEqual(response.status_code, 301)", "def test_poets_get(self):\n pass", "def mlbteamleaders(self, irc, msg, args, optteam, optcategory):\n\n optteam = optteam.upper().strip()\n optcategory = optcategory.lower().strip()\n \n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n category = {'avg':'avg', 'hr':'homeRuns', 'rbi':'RBIs', 'r':'runs', 'ab':'atBats', 'obp':'onBasePct', \n 'slug':'slugAvg', 'ops':'OPS', 'sb':'stolenBases', 'runscreated':'runsCreated',\n 'w': 'wins', 'l': 'losses', 'win%': 'winPct', 'era': 'ERA', 'k': 'strikeouts', \n 'k/9ip': 'strikeoutsPerNineInnings', 'holds': 'holds', 's': 'saves',\n 'gp': 'gamesPlayed', 'cg': 'completeGames', 'qs': 'qualityStarts', 'whip': 'WHIP' }\n\n if optcategory not in category:\n irc.reply(\"Error. Category must be one of: %s\" % category.keys())\n return\n\n lookupteam = self._translateTeam('eid', 'team', optteam)\n\n url = self._b64decode('aHR0cDovL20uZXNwbi5nby5jb20vbWxiL3RlYW1zdGF0cw==') + '?teamId=%s&lang=EN&category=%s&y=1&wjb=' % (lookupteam, category[optcategory]) \n # &season=2012\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n\n html = html.replace('<b >', '<b>')\n html = html.replace('class=\"ind alt', 'class=\"ind')\n html = html.replace('class=\"ind tL', 'class=\"ind')\n\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'table'})\n rows = table.findAll('tr')\n\n object_list = []\n\n for row in rows[1:6]: # grab the first through ten.\n rank = row.find('td', attrs={'class':'ind', 'width': '10%'}).renderContents().strip()\n player = row.find('td', attrs={'class':'ind', 'width': '65%'}).find('a').renderContents().strip()\n stat = row.find('td', attrs={'class':'ind', 'width': '25%'}).renderContents().strip()\n object_list.append(rank + \". \" + player + \" \" + stat)\n\n thelist = string.join([item for item in object_list], \" | \")\n irc.reply(\"Leaders in %s for %s: %s\" % (ircutils.bold(optteam.upper()), ircutils.bold(optcategory.upper()), thelist))", "def test_boxscore_player_stats(self):\n test_v_player_stat = self.BS.vTeam_player_stats[0]['firstName']\n test_answer_v = 'Isaac'\n test_h_player_stat = self.BS.hTeam_player_stats[0]['firstName']\n test_answer_h = 'Pascal'\n self.assertEqual(test_v_player_stat, test_answer_v)\n self.assertEqual(test_h_player_stat, test_answer_h)", "def test_add_team_manager_to_team(self):\n pass" ]
[ "0.8449335", "0.84178495", "0.84178495", "0.81079006", "0.81024987", "0.7863653", "0.7826981", "0.7781774", "0.7720299", "0.7501402", "0.7489745", "0.7379568", "0.7356525", "0.72275877", "0.7195928", "0.71437454", "0.71002096", "0.70476884", "0.69918925", "0.6990166", "0.6847316", "0.66865164", "0.6636804", "0.66096234", "0.6602237", "0.6601782", "0.6584438", "0.65309304", "0.64946145", "0.6462027", "0.6452098", "0.6411903", "0.6379778", "0.6360569", "0.63571423", "0.6335653", "0.631735", "0.63104945", "0.63104796", "0.6289133", "0.6279147", "0.62726057", "0.625427", "0.6235564", "0.62248516", "0.6217335", "0.62149477", "0.62143064", "0.6203841", "0.61967033", "0.61858696", "0.6173106", "0.6168954", "0.6156826", "0.61365443", "0.61348534", "0.61297387", "0.6126084", "0.6116095", "0.6106438", "0.60975176", "0.6081306", "0.6070367", "0.60692406", "0.60574824", "0.6046751", "0.6042005", "0.60414827", "0.60358715", "0.6019038", "0.5984755", "0.5979126", "0.5979056", "0.597594", "0.5962505", "0.59592444", "0.5944842", "0.59284425", "0.59150267", "0.59095305", "0.5906731", "0.5905358", "0.58924913", "0.58728814", "0.5869967", "0.5867738", "0.585815", "0.58544296", "0.5846917", "0.5844834", "0.58424306", "0.58421314", "0.583978", "0.58183056", "0.5817849", "0.58128357", "0.5804866", "0.5781852", "0.57811296", "0.57747126" ]
0.93158627
0
Test case for basketballteams_id_get
def test_basketballteams_id_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_get(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_gridironfootballplayers_id_get(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_cyclingleagues_id_get(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_team_v1(self):\n pass", "def test_plays_id_get(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_workflows_id_get(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def test_racetracks_id_get(self):\n pass", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_sport_id(self):\n result = self.test_client.sport_id\n\n assert result == \"1\"", "def handballteams_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=HANDBALLTEAM_TYPE_URI,\n rdf_type_name=HANDBALLTEAM_TYPE_NAME, \n kls=HandballTeam)", "def get_teams():", "def test_poets_id_get(self):\n pass", "def get_offense_team_id(self):\n pass", "def test_workflows_id_exists_get(self):\n pass", "async def getch_team(self, id: str):\n return self.get_team(id) or await self.fetch_team(id)", "def test_get_one_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='teammate@bar.com',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], teammate.uid)", "def test_solareclipses_id_get(self):\n pass", "def test_teams_get_users_teams_v2(self):\n pass", "def test_teams_list(self):\n pass", "def test_groups_group_id_get(self):\n pass", "def test_user_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_prefectures_id_get(self):\n pass", "def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass", "def test_abbeys_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/abbeys/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_team_id(team_name):\n\n team_name = team_name.lower()\n endpoint = \"/teams\"\n response = api.nhl_api(endpoint)\n\n if not response:\n raise ConnectionError(\"An invalid response was returned from the NHL Teams API.\")\n\n teams_json = response.json()\n teams = teams_json[\"teams\"]\n\n team_id = None\n for team in teams:\n if team[\"name\"].lower() == team_name:\n team_id = team[\"id\"]\n break\n\n if not team_id:\n raise ValueError(\"{} is not a valid NHL team. Check your configuraiton file!\".format(team_name))\n\n return team_id", "def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))", "def club_id(self, club_name):\r\n # UTF-8 comparison\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n # Get teamid from the bets\r\n team1 = i.find('a')['title']\r\n team2 = i.find_all('a')[1]['title']\r\n if club_name == team1:\r\n return i.find('a')['href'].split('cid=')[1]\r\n elif club_name == team2:\r\n return i.find_all('a')[1]['href'].split('cid=')[1]\r\n return None", "def test_teams_read(self):\n pass", "def test_gridironfootballplayers_get(self):\n pass", "def test_coupledmodels_id_get(self):\n pass", "def test_variablepresentations_id_get(self):\n pass", "def test_teams_get_users_teams_v1(self):\n pass", "def find_by_id(self, team, params={}, **options):\n path = \"/teams/%s\" % (team)\n return self.client.get(path, params, **options)", "def assert_team_exists(self, team_id):\n result = self.con.execute(\n 'SELECT id FROM team WHERE id = ?', (team_id,)\n ).fetchone()\n if result is None:\n raise err.UnknownTeamError(team_id)", "def test_team_template_folders_id_get(self):\n pass", "def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_lacrosseplayers_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/lacrosseplayers/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def determine_basketball_outcome_from_api(market, params, enp_id):\n\n n_bet = 1\n outcome = None\n if market == BasketballMarkets.FULL_TIME_POINT_SPREAD:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n\n if selection == BasketballSelections.HOME_TEAM:\n hc_score = score_home + handicap\n if hc_score == score_away:\n outcome = 0\n elif hc_score > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n hc_score = score_away + handicap\n if hc_score == score_home:\n outcome = 0\n elif hc_score > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTPS bet should be ONE or TWO')\n\n elif market == BasketballMarkets.FULL_TIME_MONEYLINE:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n if selection == BasketballSelections.HOME_TEAM:\n if score_home == score_away:\n outcome = 0\n elif score_home > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n if score_away == score_home:\n outcome = 0\n elif score_away > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('selection should be ONE or TWO')\n elif market == BasketballMarkets.FULL_TIME_TOTAL_POINTS:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n score_total = score_home + score_away\n\n if selection == BasketballSelections.OVER:\n if score_total == handicap:\n outcome = 0\n elif score_total > handicap:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.UNDER:\n if score_total == handicap:\n outcome = 0\n elif score_total < handicap:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTTP bet should be OVER or UNDER')\n else:\n raise ValueError('implement more markets')\n\n return outcome, n_bet", "def get_team_bid(self, team_id):\n\t\tplayer_ids = [(self.reference_id + team_id) % 4, (self.reference_id + team_id + 2) % 4]\n\t\tres = []\n\t\tsummable = True\n\t\tfor player_id in player_ids:\n\t\t\tif player_id in self.bids:\n\t\t\t\tif self.bids[player_id] == \"N\" or self.bids[player_id] == \"B\":\n\t\t\t\t\tsummable = False\n\t\t\t\tres.append(self.bids[player_id])\n\t\t\telse:\n\t\t\t\tsummable = False\n\t\t\t\tres.append('?')\n\t\tif summable:\n\t\t\tres = [res[0] + res[1]]\n\t\treturn res", "def test_get_case_by_id(self):\n pass", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def test_get_teams(self):\n owner2 = AnotherUserFactory(email_confirmed=True)\n owner3 = AnotherUserFactory(username='team owner 3', email='teamowner3@gmail.com', email_confirmed=True,)\n TeamFactory(owner=owner2, name='second team')\n TeamFactory(owner=owner3, name='third team')\n\n usual_user = UserFactory(\n username='usualuser',\n email='default@email.com',\n email_confirmed=True,\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n response = self.client.get(reverse('api:teams-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 3)", "def test_drugs_id_get(self):\n pass", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def team_id(self):\n return self._team_id", "def test_intercommunalitys_id_get(self):\n pass", "def test_medicians_id_get(self):\n pass", "def test_api_random_id(api_client, single_brew_number):\n response = api_client.get(path='/breweries' + '/' + str(single_brew_number))\n assert response.json()['id'] == single_brew_number", "def test_variables_id_get(self):\n pass", "def test_get_team_history(self):\n pass", "def validate_team_id(self, team_id: int) -> Team:\n try:\n return Team.objects.get(id=team_id, organization=self.organization)\n except Team.DoesNotExist:\n raise serializers.ValidationError(\"This team does not exist.\")", "def test_metrostations_id_get(self):\n pass", "def get_team(team_id):\n team = TeamController.get(filters={\"Team\": {\"id\": team_id}})\n return jsonify(format_team(team)), 200", "def test_success(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n\n assert response.status_code == status.HTTP_200_OK\n\n response_team = response.json()['results'][0]\n team = Team.objects.get(id=response_team['id'])\n\n assert response_team == get_expected_data_from_team(team)", "def test_id_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_team_swimmer_id(input_val)\n self.assertEqual(output_val, self.line.team_swimmer_id)", "def find_team(self):\n if self.team_id is not None:\n return ItopapiPrototype.get_itop_class('Team').find(self.team_id)\n return None", "def test_get_open_requests_by_team(self):\n pass", "def get_team_id(self, team_name):\n\n teams = self.get_teams()\n for team in teams:\n if team['name'] == team_name:\n return team['id']\n\n return None", "def test_comicscreators_id_get(self):\n pass", "def test_get_request_only_from_one_team(self):\n another_user3 = AnotherUserFactory(username='anotheruser3', email='anotheruser3@gmail.com')\n another_user4 = AnotherUserFactory(username='anotheruser4', email='anotheruser4@gmail.com')\n another_user5 = AnotherUserFactory(username='anotheruser5', email='anotheruser5@gmail.com')\n another_user6 = AnotherUserFactory(username='anotheruser6', email='anotheruser6@gmail.com')\n another_team = TeamFactory(owner=another_user3,\n name='Soul Eaters',\n description='We`ll destroy all the souls. And the age of darkness will come')\n UserTeamRequestFactory(\n user=another_user4,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user5,\n team=another_team,\n )\n UserTeamRequestFactory(\n user=another_user6,\n team=another_team,\n )\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)", "def get_team(self, team_id):\n try:\n return CourseTeam.objects.get(team_id=team_id)\n except CourseTeam.DoesNotExist:\n raise Http404 # lint-amnesty, pylint: disable=raise-missing-from", "def test_data_source_soaps_id_get(self):\n pass", "def get_people(team):", "def test_get_sentence_by_id(self):\n response = self.client.open(\n '/api/v1/sentence/{sentenceID}'.fpgapiat(sentenceID=56),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_austriansettlements_id_get(self):\n pass", "def test_get_sdb_id(self, mock_get):\n sdb_data = [\n {\n \"id\": \"5f0-99-414-bc-e5909c\",\n \"name\": \"Disco Events\",\n \"path\": \"app/disco-events/\",\n \"category_id\": \"b07-42d0-e6-9-0a47c03\"\n },\n {\n \"id\": \"a7192aa7-83f0-45b7-91fb-f6b0eb\",\n \"name\": \"snowflake\",\n \"path\": \"app/snowflake/\",\n \"category_id\": \"b042d0-e6-90-0aec03\"\n }\n ]\n\n mock_get.return_value = self._mock_response(content=json.dumps(sdb_data))\n sdb_id = self.client.get_sdb_id(\"snowflake\")\n\n # confirm the id matches\n assert_equals(sdb_id, sdb_data[1]['id'])\n assert_in('X-Cerberus-Client', self.client.HEADERS)\n mock_get.assert_called_with(\n self.cerberus_url + '/v2/safe-deposit-box',\n headers=self.client.HEADERS\n )", "def test_api_predictors_predictor_id_get(self):\n pass", "def test_presenters_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_installments_id_get(self):\n pass", "def test_teams_create(self):\n pass", "def get_team_info(self, id):\n params = {'key': self.key, 'start_at_team_id': id,\n 'teams_requested': 1}\n r = requests.get(self.TEAM_URL, params=params)\n return TeamResponse(r.json()['result']['teams'][0])", "def test_get_requests_for_team_by_user(self):\n\n token = Token.objects.get(user=self.another_user1)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_chain_by_id(self):\n pass", "def get_team(self):\n try:\n team_id = self.request.GET.get('team')\n if team_id is not None:\n team_id = int(team_id)\n return self.get_available_teams().get(pk=team_id)\n return self.get_available_teams().latest()\n except (Team.DoesNotExist, ValueError):\n return None", "def test_liechtensteinsettlements_id_get(self):\n pass", "def get_one_team(id):\n # Try to get team from database\n query = Team.query.filter_by(id=id)\n\n try:\n team = query.one()\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 404\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialze the team object and return json response\n team_schema = TeamSchema()\n output = team_schema.dump(team).data\n\n return jsonify({\n 'success': 'Successfully retrieved team.',\n 'team': output\n }), 200", "def test_groups_group_id_state_get(self):\n pass", "def test_create_team(self):\n pass", "async def read_team(team_id: str = Path(..., description=\"ID value of the desired team\"), db_handler: DBHandler = Depends(database_dependency)):\n team_record = await db_handler.select_team(team_id=team_id)\n team_record = init_BaseTeam(team_record)\n\n return team_record", "def test_assign_managing_team(self):\n pass", "def test_meme_meme_id_get(self):\n pass", "def test_deaths_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/deaths/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_update_team(self):\n pass", "def test_get_by_id(self):\n actual = chef_role.get_by_id(self.role_id)\n eq_(actual['chef_role_name'], self.role_name)", "def test_user_get_teams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/teams')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_get_specific_by_id(self):\n token = self.get_token()\n self.client.post('/api/v2/party', data=self.add_party,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n response = self.client.get('/api/v2/party/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 200)", "def getTeamByEspn(self, name):\n # team, created = Teams.objects.get_or_create(name_espn=name, defaults={'country_id' : 1})\n # return int(team.id)\n\n try:\n team = Teams.objects.get(name_espn=name)\n return int(team.id)\n except:\n return 0", "def test_team_template_folders_id_exists_get(self):\n pass", "def get_team_gk_ids(match_id):\n homeid, awayid, all = get_match_info(match_id)\n data = service_request(\"GetMatchSquad\", {\"matchId\": match_id})\n gks = {k: {\"team_id\": data.get(k).get(\"data\")[3],\n \"jersey_no\": data.get(k).get(\"data\")[1],\n \"player_id\": k}\n for k in data if data.get(k).get(\"data\")[4]==1 and data.get(k).get(\"data\")[2]==1}\n\n teams = {\n int(homeid): 0,\n int(awayid): 1\n }\n\n return {teams.get(gks.get(k).get(\"team_id\")):gks.get(k) for k in gks}", "def test_get_requests_for_team_by_owner(self):\n\n params = {'teamID': self.team.id}\n response = self.client.get(reverse('api:user-team-requests-get-requests-for-team'), params)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)", "def test_get_campaign_by_id_passes(self):\n response = self.client.get(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"id\": CAMPAIGN_ID, \"name\": CAMPAIGN_NAME})" ]
[ "0.7799063", "0.7685305", "0.7607886", "0.74427456", "0.72060305", "0.6990276", "0.6840412", "0.68341905", "0.68341905", "0.6772549", "0.6731343", "0.67035055", "0.6620671", "0.66105074", "0.6533537", "0.6475667", "0.645218", "0.6435485", "0.63834304", "0.6345525", "0.630249", "0.6249877", "0.6200476", "0.618257", "0.61821526", "0.6166438", "0.61655784", "0.6103684", "0.6068002", "0.60654026", "0.6043198", "0.6042444", "0.60415167", "0.60304207", "0.6028849", "0.600294", "0.5995734", "0.5982089", "0.5972361", "0.5957958", "0.5955142", "0.5945079", "0.59439415", "0.59339106", "0.59243745", "0.58913296", "0.5889888", "0.5870483", "0.58649874", "0.5858552", "0.5843266", "0.5827785", "0.582534", "0.5819448", "0.5819404", "0.58108103", "0.5799043", "0.57759047", "0.5742708", "0.5740475", "0.57333", "0.5722841", "0.56754917", "0.5674465", "0.56703585", "0.5668364", "0.56648886", "0.56568867", "0.565589", "0.5642825", "0.5633721", "0.5623866", "0.56122077", "0.56027746", "0.5598356", "0.5589357", "0.55842376", "0.5580439", "0.5575418", "0.5572228", "0.5559791", "0.5553889", "0.55507404", "0.5542619", "0.55323493", "0.551768", "0.55152714", "0.5513436", "0.5496357", "0.54874957", "0.54874146", "0.54868263", "0.5484395", "0.54819065", "0.5480957", "0.5475785", "0.54757035", "0.5475343", "0.5462505", "0.5459719" ]
0.939899
0
Initialize the matplotlib figure.
def initialize_plot(self, ranges=None): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_plot(x: float, y: float) -> plt.figure:\n LOG.debug(\"Initializing plot.\")\n plt.ion()\n fig = plt.figure(figsize=(x, y), num=\"GlacierFlowModel\")\n fig.patch.set_facecolor(\"black\")\n return fig", "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def _plot_setup(self, fig, ax):\n\n self._check_data_valid()\n\n if ax:\n self.fig = fig\n self.ax = ax\n else:\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection=self.wcs)\n\n # Set basic figure display options\n if self.options.get('grid', True):\n self.ax.coords.grid(color='white', alpha=0.5)\n\n if self.options.get('title', True):\n title = self.options.get('title', self.surveyname)\n self.ax.set_title(title, fontdict={'fontsize': 20, 'fontweight': 10})\n\n self.set_xlabel('RA (J2000)')\n self.set_ylabel('Dec (J2000)')\n\n # Set compact or extended label / tick configuration\n if self.options.get('compact', False):\n tickcolor = 'k' if np.nanmax(np.abs(self.data)) == np.nanmax(self.data) else 'gray'\n\n lon = self.ax.coords[0]\n lat = self.ax.coords[1]\n\n lon.display_minor_ticks(True)\n lat.display_minor_ticks(True)\n\n lon.set_ticks(number=5)\n lat.set_ticks(number=5)\n\n self.ax.tick_params(axis='both', direction='in', length=5, color=tickcolor)\n self.padlevel = self.options.get('ylabelpad', 5)\n\n # Set colourmap normalisation\n self.norm = self._get_cmap_normalisation()", "def create_figure(self):\n plt.rcParams.update(general_utils.returnGraphConfigs(\"anim\"))\n self.fig = plt.figure()\n self.axes = plt.axes()\n self.axes.set_xlabel(\"Cells In X (Columns)\")\n self.axes.set_ylabel(\"Cells In Y (Rows)\")\n self.axes.set_xlim(0, self.dimensions - 1)\n self.axes.set_ylim(0, self.dimensions - 1)", "def initialize(self) -> None:\n # Only do matplotlib import when necessary\n super().initialize()\n from matplotlib import pyplot as plt\n self.fig, self.ax = plt.subplots()\n if self.state_map is not None:\n self._add_state_map(self.state_map)\n else:\n self.categories = self.simulation.state_list", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])", "def _setup_figure(self):\n\n plt.figure(1)\n plt.clf()\n\n # Two main axes\n self._tsne_window = plt.axes([0.05, 0.05, 0.4, 0.4])\n self._main_window = plt.axes([0.05, 0.55, 0.4, 0.4])\n\n # Nine sub axes\n self._sub_windows = []\n for row in range(3):\n for col in range(3):\n tt = plt.axes([0.5+0.17*col, 0.75-0.25*row, 0.15, 0.15])\n tt.set_xticks([])\n tt.set_yticks([])\n self._sub_windows.append(tt)\n\n # Register the button click\n self._cid = plt.figure(1).canvas.mpl_connect('button_press_event', self._onclick)\n\n # Text\n plt.figure(1).text(0.6, 0.2, 'Click with 2nd or 3rd mouse button to select image...')\n plt.figure(1).text(0.05, 0.5, 'Click in main image or tSNE plot to find similar cutouts...')\n plt.figure(1).text(0.6, 0.05, 'The tSNE data reduction calculated from data run through {}'.format(self._model_name), fontsize=8)\n\n # Show\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def axInit():\n ax.init()", "def __init__(self):\n\n fig_width_pt = 800.0 \n pylab.rcParams.update(plot_params)", "def initialize_mwplot(self):\n if self.fig is None:\n self.fig, self.ax = plt.subplots(1, figsize=self.figsize, dpi=self.dpi)\n if self.title is not None:\n self.fig.suptitle(self.title, fontsize=self.fontsize)\n if self.__grid == 'galactic':\n self.ax.set_xlabel('Galactic Longitude (Degree)', fontsize=self.fontsize)\n self.ax.set_ylabel('Galactic Latitude (Degree)', fontsize=self.fontsize)\n self.__ext = [(self.__center[0] - self.__radius[0]).value, (self.__center[0] + self.__radius[0]).value,\n (self.__center[1] - self.__radius[1]).value, (self.__center[1] + self.__radius[1]).value]\n self.ax.set_facecolor('k') # have a black color background for image with <1.0 alpha\n self.ax.imshow(self.__img, zorder=2, extent=self.__ext, alpha=self.imalpha, rasterized=True)\n self.ax.tick_params(labelsize=self.fontsize * 0.8, width=self.fontsize / 10, length=self.fontsize / 2)", "def __init__(self, fig):\n # Initialize systems\n self.set_quantum_sys()\n\n #################################################################\n #\n # Initialize plotting facility\n #\n #################################################################\n\n self.fig = fig\n\n ax = fig.add_subplot(111)\n\n ax.set_title('Wavefunction density, $| \\\\Psi(x_1, x_2, t) |^2$')\n extent=[\n self.quant_sys.x2.min(),\n self.quant_sys.x2.max(),\n self.quant_sys.x1.min(),\n self.quant_sys.x1.max()\n ]\n self.img = ax.imshow([[]], extent=extent, origin='lower')\n\n self.fig.colorbar(self.img)\n\n ax.set_xlabel('$x_2$ (a.u.)')\n ax.set_ylabel('$x_1$ (a.u.)')", "def initialize_mwplot(self):\n if self.fig is None:\n self.fig, self.ax = plt.subplots(1, figsize=self.figsize, dpi=self.dpi)\n if self.title is not None:\n self.fig.suptitle(self.title, fontsize=self.fontsize)\n self.ax.set_xlabel(f'{self._coord_english} ({self._unit_english})', fontsize=self.fontsize)\n self.ax.set_ylabel(f'{self._coord_english} ({self._unit_english})', fontsize=self.fontsize)\n self.ax.set_aspect(self.__aspect)\n self.ax.set_facecolor('k') # have a black color background for image with <1.0 alpha\n self.ax.imshow(self.__img, zorder=2, extent=self.__ext, alpha=self.imalpha, rasterized=True)\n self.ax.tick_params(labelsize=self.fontsize * 0.8, width=self.fontsize / 10, length=self.fontsize / 2)", "def figure(self):\n if self._figure is None:\n\n self._figure, ax = plt.subplots(nrows=1, dpi=self._dpi)\n if self._verbose:\n print(f\" Figure dpi set to {self._dpi}\")\n\n self._figure.set_size_inches(self._size)\n if self._verbose:\n print(\" Figure size set to \" + str(self._size) + \" inches.\")\n\n for model in self._models:\n xs, ys, _ = zip(*model._nodes)\n\n for face in model._elements:\n xf = tuple(xs[k - 1] for k in face) # 1-base index to 0-base index\n yf = tuple(ys[k - 1] for k in face)\n # plt.fill(\n # xf,\n # yf,\n # linestyle=\"dotted\",\n # edgecolor=\"magenta\",\n # alpha=0.5,\n # facecolor=\"gray\",\n # )\n plt.fill(\n xf,\n yf,\n alpha=model._alpha,\n edgecolor=model._edgecolor,\n facecolor=model._facecolor,\n linestyle=model._linestyle,\n linewidth=model._linewidth,\n )\n\n if self._xticks:\n ax.set_xticks(self._xticks)\n\n if self._yticks:\n ax.set_yticks(self._yticks)\n\n if self._xlim:\n ax.set_xlim(self._xlim)\n\n if self._ylim:\n ax.set_ylim(self._ylim)\n\n if self._xlabel:\n ax.set_xlabel(self._xlabel)\n\n if self._ylabel:\n ax.set_ylabel(self._ylabel)\n\n # set frame on or off based on the Bool \"frame\" in .json input\n ax.set_frame_on(b=self._frame)\n if len(self._tick_params) > 0:\n ax.tick_params(**self._tick_params)\n\n if self._display:\n plt.show()\n\n if self._serialize:\n self.serialize(self._folder, self._file)\n\n plt.close(\"all\")\n self._figure = None", "def __init__(self):\n self.figure = plt.figure()\n FigureCanvas.__init__(self, self.figure)\n self.figure.patch.set_facecolor('blue')\n self.figure.patch.set_alpha(0.0)\n self.pv_monitor = controls.PvMonitors.get_instance()", "def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text", "def __init__(self, fig):\n # Initialize the system\n self.set_sys()\n\n #################################################################\n #\n # Initialize plotting facility\n #\n #################################################################\n\n self.fig = fig\n\n # import utility to visualize the wigner function\n from wigner_normalize import WignerNormalize, WignerSymLogNorm\n\n img_params = dict(\n origin='lower',\n cmap='seismic',\n norm=WignerNormalize(vmin=-0.01, vmax=0.05),\n # norm=WignerSymLogNorm(linthresh=1e-5, vmin=-0.01, vmax=0.1),\n )\n\n ax = fig.add_subplot(121)\n ax.set_title('Classical density, $\\\\rho(x,p,t)$')\n\n # generate empty plots\n self.img_clasical_rho = ax.imshow(\n [[]],\n **img_params,\n extent=[self.quant_sys.X.min(), self.quant_sys.X.max(), self.quant_sys.P.min(), self.quant_sys.P.max()],\n )\n\n ax.set_xlabel('$x$ (a.u.)')\n ax.set_ylabel('$p$ (a.u.)')\n\n ax = fig.add_subplot(122)\n ax.set_title('Real part quantum density matrix, $\\Re\\hat{\\\\rho}$')\n\n # generate empty plots\n self.img_Upsilon2 = ax.imshow([[]], **img_params, extent=[1, 2, 1, 2])\n\n #ax.set_xlabel('$x$ (a.u.)')\n #ax.set_ylabel('$p$ (a.u.)')\n\n #self.fig.colorbar(self.img_clasical_rho)", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def init_fig(self, fig):\n # type: (Figure) -> None\n self.init_vars()\n\n self.xs, self.ys = np.meshgrid(np.arange(0., self.max_iter+.5)-.5, np.arange(0., self.n_vars+.5)-.5)\n self.cs = np.zeros((self.n_vars, self.max_iter))\n\n self.ax = fig.add_subplot(111)\n self.ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n self.ax.yaxis.set_ticks(np.arange(0, self.n_vars))\n self.ax.yaxis.set_ticklabels(self.var_names)\n\n self.ax.set_xlim([-.5, .5])\n self.ax.set_ylim([-.5, self.n_vars-.5])\n self.quad = self.ax.pcolormesh(self.xs, self.ys, self.cs,\n vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, norm=self.norm)\n\n fig.colorbar(self.quad)\n\n self.ax.set_xlabel('Evaluation #')", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def show_figure(self):\n pylab.show()", "def _setup_plot(self):\n\n # Get plot item and setup\n self.plt = self.getPlotItem()\n self.plt.setDownsampling(auto=True)\n self.plt.setLabel('left', text='Signal', units='V' if self.units is None else self.units['left'])\n\n # Title\n self.plt.setTitle('' if self.name is None else self.name)\n\n # Additional axis if specified\n if 'right' in self.units:\n self.plt.setLabel('right', text='Signal', units=self.units['right'])\n\n # X-axis is time\n self.plt.setLabel('bottom', text='Time', units='s')\n self.plt.showGrid(x=True, y=True, alpha=0.66)\n self.plt.setLimits(xMax=0)\n\n # Make OrderedDict of curves\n self.curves = OrderedDict([(ch, pg.PlotCurveItem(pen=_MPL_COLORS[i % len(_MPL_COLORS)])) for i, ch in enumerate(self.channels)])\n\n # Make legend entries for curves\n self.legend = pg.LegendItem(offset=(80, -50))\n self.legend.setParentItem(self.plt)\n\n # Show data and legend\n for ch in self.channels:\n self.show_data(ch)", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def buildPlot(self):\r\n style.use('fivethirtyeight')\r\n self.fig = plt.figure()\r\n self.ax1 = self.fig.add_subplot(1,1,1)\r\n self.ax1.clear()\r\n self.ax1.plot(self.inputValInt,self.inputValInt1)", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def _setFig(self):\n self.p.background_fill_color = grey['light']\n self.p.xgrid.grid_line_color = None\n self.p.ygrid.grid_line_color = None\n self.p.ygrid.grid_line_dash = 'dotted'\n self.p.ygrid.grid_line_dash = 'dotted'\n\n self.p.xgrid.minor_grid_line_color = grey['median']\n self.p.ygrid.minor_grid_line_color = grey['median']\n self.p.xgrid.minor_grid_line_dash = 'dotted'\n self.p.ygrid.minor_grid_line_dash = 'dotted'\n\n self.p.xaxis.axis_label = \"tsne_feature_0\"\n self.p.yaxis.axis_label = \"tsne_feature_1\"", "def init_plot(self, master):\n b = Figure(figsize=(8, 6), dpi=100)\n ac = b.add_subplot(111)\n ac.plot(10, 10)\n ac.set_title('Current tour plot')\n ac.set_xlabel('X axis coordinates')\n ac.set_ylabel('Y axis coordinates')\n ac.grid(True)\n canvas = FigureCanvasTkAgg(b, master)\n canvas.draw()\n canvas.get_tk_widget().grid(row=1, column=1, sticky=W)", "def initialize_visualization(self) -> None:\n pass", "def embed_matplotlib(self):", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def setup_figure(self):\n # How many data plots are we dealing with in each dimension?\n plots_x = self._dims[0] # Number of columns\n plots_y = self._dims[1] if len(self._dims) > 1 else 1 # Number of rows\n\n # Set up our base row count\n num_rows = plots_y + 1 # Add one more row for the update number\n height_ratios = [1] * plots_y + [0.25]\n num_cols = plots_x + 1 # Add one more column for the colorbar\n width_ratios = [1] * plots_x + [0.10]\n\n if self._is_multi:\n # If we have multiple resources, add another row for the resource legend\n num_rows += 1\n height_ratios.append(0.1)\n\n has_descr = True if len(self._env_str + self._event_str) > 0 else False\n if has_descr:\n # if we need to print some descriptive text, add another at the bottom\n # change this height ratio to make it larger\n num_rows += 1\n height_ratios.append(0.35)\n\n # Create our grid layout\n gs = mpl.gridspec.GridSpec(num_rows, num_cols,\n height_ratios=height_ratios,\n\n width_ratios=width_ratios)\n\n # Plot our category heatmaps\n ndx = 0 # Index into our experiment\n plots = [] # Plots from our experiment\n for col in range(plots_x):\n for row in range(plots_y):\n ax = plt.subplot(gs[row,col])\n base_cmap = self._cmap if not self._is_multi else ColorMaps.gray\n plot = plt.imshow(np.zeros(self._grid_shape), cmap=base_cmap,\n origin='upper', interpolation='nearest',\n vmin=self._vmin, vmax=self._vmax)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n if self._is_left_edge(ndx):\n ax.set_ylabel(self._fact2label(ndx,1))\n if self._is_bottom_edge(ndx):\n ax.set_xlabel(self._fact2label(ndx,0))\n plots.append(plot)\n pa = []\n for pp in self._post_plot:\n pa.append(pp.blit_build(ax, ax_ndx=ndx))\n ndx = ndx+1\n\n # Plot the colorbar\n norm = mpl.colors.Normalize(self._vmin, self._vmax)\n cax = plt.subplot( gs[0:plots_y,-1] ) # Across data rows, last column\n if not self._is_multi:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=self._cmap, norm=norm, orientation='vertical')\n else:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=ColorMaps.gray, norm=norm, orientation='vertical')\n cbar.set_label('Abundance')\n\n # Plot the update\n ax = plt.subplot(gs[plots_y,0:plots_x]) # The row after the data plots, across all data plot columns\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n ax.set_ylim(0,1)\n ax.set_xlim(0,1)\n update = ax.text(0.5,0.25,'Update n/a', ha='center', va='bottom')\n\n # Plot the category legend if needed\n if self._is_multi:\n ax = plt.subplot(gs[plots_y+1,:-1]) # The row after the update axis, acros all data plot columns\n legend_handles = []\n for ndx,cat_name in enumerate(self._categories):\n legend_handles.append(mpl.patches.Patch(color=self._colors[ndx], label=cat_name))\n plt.legend(handles=legend_handles, loc='center', frameon=False, ncol=len(legend_handles))\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # If we have an environment and event strings, plot them in the final row across all columns\n if has_descr:\n ax = plt.subplot(gs[-1,:])\n desc = self._env_str + '\\n\\n' + self._event_str + '\\n\\n' + f'World: {self._world_size[0]} x {self._world_size[1]}'\n env = ax.text(0.05, 1, desc, ha='left', va='top', fontsize=7)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # Title the figure\n plt.suptitle(self._title)\n\n # Store what we need to redraw each frame for blitting.\n # The values in this dictionary may be either a single element\n # or an iterable.\n self._to_draw = {'plots':plots, 'update':update, 'post_plot':pa}", "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "def ready(self):\n plt.ion()\n self.figure = plt.figure()\n axes = self.figure.add_subplot(111)\n self.line, = axes.plot(self.xs, self._get_y_data(), self.colour)\n\n if self.y_range is not None:\n plt.ylim(*self.y_range)\n plt.xlim(self.x.lower, self.x.upper)\n\n plt.xlabel(self.x.tex_name if self.use_tex else self.x.name)\n plt.ylabel(self.y.tex_name if self.use_tex else self.y.name)\n\n self.figure.canvas.draw()", "def init_plot():\n fig = plt.figure(constrained_layout=True, figsize=(7,9), dpi=130)\n gs = fig.add_gridspec(5, 1)\n ax2 = fig.add_subplot(gs[:1, :])\n ax1 = fig.add_subplot(gs[1:, :], projection='3d')\n\n tick_color = (0.2, 0.2, 0.2, 1.0)\n pane_color = (0.12, 0.12, 0.12, 1.0)\n ax1.w_xaxis.set_pane_color(pane_color)\n ax1.w_yaxis.set_pane_color(pane_color)\n ax1.w_zaxis.set_pane_color(pane_color)\n\n ax1.tick_params(axis='x', colors=tick_color)\n ax1.tick_params(axis='y', colors=tick_color)\n ax1.tick_params(axis='z', colors=tick_color)\n ax1.view_init(elev=90, azim=180)\n\n ax1.set_xlim3d(0, 80)\n ax1.set_zlim3d(-2, 5)\n \n return (ax1, ax2)", "def initialize_ripple_detection_fig(self):\n self._rd_fig = plt.figure()\n self._rd_ax = plt.axes()\n self._rd_ax.set_xlabel(\"Time (s)\")\n self._rd_ax.set_ylabel(\"EEG (uV)\")\n self._rd_ax.set_xlim((0.0, RiD.LFP_BUFFER_TIME))\n self._rd_ax.set_ylim((-1.0, 1.0))\n self._rd_ax.grid(True)\n\n lfp_frame, = plt.plot([], [], animated=True)\n ripple_power_frame, = plt.plot([], [], animated=True)\n self._rd_frame.append(lfp_frame)\n self._rd_frame.append(ripple_power_frame)\n\n # Create animation object for showing the EEG\n anim_obj = animation.FuncAnimation(self._rd_fig, self.update_ripple_detection_frame, frames=self.__N_ANIMATION_FRAMES, interval=5, blit=True)\n self._anim_objs.append(anim_obj)", "def create_figure_new(self):\n kw = {}\n self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)\n self.p.circle(x=[0],y=[0])", "def __init__(self, parent_frame, plt_props=None):\n tk.Frame.__init__(self, master=parent_frame)\n if self.matplotlib_ready():\n \"\"\" the import statements are scoped so make new ones\"\"\"\n import matplotlib\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\n\n self.figure_bed = plt.figure(figsize=(7, 3.5))\n self.axis = self.figure_bed.add_subplot(111)\n\n if plt_props:\n for key, value in plt_props.iteritems():\n eval(\"plt.\" + key + \"(\" + value + \")\")\n # self.axis.set_axis_bgcolor('red')\n self.figure_bed.set_facecolor('white')\n self.canvas = FigureCanvasTkAgg(self.figure_bed, master=self)\n self.canvas._tkcanvas.config(highlightthickness=0)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side='top')\n\n # self.make_matplotlib_area(parent, plt_props)\n self.embed_matplotlib()\n self.type = 'matplotlib'\n # TODO ADD TO THIS\n else:\n graph = tk.Canvas(master=self)\n graph.pack(side='left', expand=True, fill=tk.BOTH)\n self.type = 'canvas'", "def show():\n setup()\n plt.show()", "def draw_figure(self, **kwargs):\n\n fig = figure(**kwargs)\n fig.xaxis.axis_label = self.x_label\n fig.yaxis.axis_label = self.y_label\n\n return fig", "def display(self):\n self.figure, self.axes = self.createFigure()\n\n self.setupLayout()\n self.quitFlag = False\n self.animation = animation.FuncAnimation(self.figure, self.animate, interval=100)\n plt.show()", "def createFigure(self):\n\n SMALL_SIZE = 14\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 36\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n fig, axes = plt.subplots()\n fig.set_size_inches(10, 6, forward=True)\n serialNumber = self.spectrometer.getSerialNumber()\n model = self.spectrometer.model\n fig.canvas.manager.set_window_title('Spectrometer [serial # {0}, model {1}]'.format(serialNumber, model))\n axes.set_xlabel(\"Wavelength [nm]\")\n axes.set_ylabel(\"Intensity [arb.u]\")\n return fig, axes", "def create_plot():\n\n fig, ax = plt.subplots()\n return fig, ax", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class", "def init_render(self):\n plt.ion() # interactive plot mode, panning, zooming enabled\n self.fig = plt.figure(figsize=(9,7)) # create figure object\n self.ax = self.fig.add_subplot(111, projection=\"3d\") # attach z-axis to plot\n # set axe limits and labels\n self.ax.set_xlim([-self.l1max, self.l1max])\n self.ax.set_ylim([-self.l1max, self.l1max])\n self.ax.set_zlim([-self.l1max, self.l1max])\n self.ax.set_xlabel(\"X\")\n self.ax.set_ylabel(\"Y\")\n self.ax.set_zlabel(\"Z\")\n # add 3 arrows of coordinate base frame\n ax_base = Arrow3D([0.0, self.arrow_len], [0.0, 0.0], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n ay_base = Arrow3D([0.0, 0.0], [0.0, self.arrow_len], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n az_base = Arrow3D([0.0, 0.0], [0.0, 0.0], [0.0, self.arrow_len],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n self.ax.add_artist(ax_base)\n self.ax.add_artist(ay_base)\n self.ax.add_artist(az_base)\n plt.show(block=False) # display figure and bring focus (once) to plotting window\n self.fig.tight_layout() # fits the plot to window size", "def initializePlot( self ):\n\n self.mNTaxa = len(self.mTree.get_taxa())\n self.mNNodes = max( self.mTree.chain.keys() ) + 1\n\n self.calculateCoordinates()\n \n self.calculateCanvasSize( )", "def __init__(self, ax=None):\n\n if ax is None:\n f = plt.figure()\n self.ax = f.add_subplot(111)\n else:\n self.ax = ax\n\n self.e2 = [] # list to store RMS error results\n self.labels = []\n self.colors = []", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def createBlankPlot(self):\n\n fig = plt.figure(figsize=(8,6),dpi=80)\n fig.set_facecolor('#ededed')\n \n # Format plot\n ax = plt.subplot(111)\n \n fig.canvas.draw()\n \n return fig, ax", "def __init__(self, skin_directory):\n self.ax = None\n self.generate_axis()\n self.skin_directory = skin_directory\n self.figure = plt.gcf()", "def figure(self):\n if self._figure is None:\n\n # dpi versus fig size\n # https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size\n # fig, ax = plt.subplots(nrows=1, dpi=self._dpi)\n self._figure, ax = plt.subplots(nrows=1, dpi=self._dpi)\n if self._verbose:\n print(f\" Figure dpi set to {self._dpi}\")\n\n # ax.ticklabel_format(axis='y', style='scientific')\n # ax.ticklabel_format(axis='both', style='scientific', scilimits=(0,0))\n\n # https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.figure.Figure.html#matplotlib.figure.Figure.set_size_inches\n # fig.set_size_inches(self._size)\n self._figure.set_size_inches(self._size)\n if self._verbose:\n print(\" Figure size set to \" + str(self._size) + \" inches.\")\n\n if self._background_image:\n folder = self._background_image.get(\"folder\", \".\")\n file = self._background_image.get(\"file\", None)\n rel_path_and_file = os.path.join(\n folder, file\n ) # relative to current run location\n im = Image.open(rel_path_and_file)\n\n left = self._background_image.get(\"left\", 0.0)\n right = self._background_image.get(\"right\", 1.0)\n bottom = self._background_image.get(\"bottom\", 0.0)\n top = self._background_image.get(\"top\", 1.0)\n al = self._background_image.get(\"alpha\", 1.0)\n\n # https://github.com/matplotlib/matplotlib/issues/3173\n # https://matplotlib.org/3.1.1/tutorials/intermediate/imshow_extent.html\n bounds = [left, right, bottom, top]\n im = ax.imshow(im, zorder=0, extent=bounds, alpha=al, aspect=\"auto\")\n\n for model in self._models:\n # needs rearchitecting, a logview descends from a view\n if self._xaxislog and not self._yaxislog: # needs rearchitecting\n ax.semilogx(model.x, model.y, **model.plot_kwargs)\n elif not self._xaxislog and self._yaxislog:\n ax.semilogy(model.x, model.y, **model.plot_kwargs)\n elif self._xaxislog and self._yaxislog:\n ax.loglog(model.x, model.y, **model.plot_kwargs)\n else:\n ax.plot(model.x, model.y, **model.plot_kwargs)\n\n if self._xticks:\n ax.set_xticks(self._xticks)\n\n if self._yticks:\n ax.set_yticks(self._yticks)\n\n if self._xlim:\n ax.set_xlim(self._xlim)\n\n if self._ylim:\n ax.set_ylim(self._ylim)\n\n if self._yaxis_rhs:\n rhs_axis_scale = self._yaxis_rhs.get(\"scale\", 1)\n rhs_axis_label = self._yaxis_rhs.get(\"label\", None)\n # rhs_yticks_str = self._yaxis_rhs.get('yticks', None)\n rhs_yticks = self._yaxis_rhs.get(\"yticks\", None)\n\n # ax2 = fig.add_subplot(111, sharex=ax, frameon=False)\n ax2 = self._figure.add_subplot(111, sharex=ax, frameon=False)\n bottom, top = ax.get_ylim() # get from left-hand-side y-axis\n ax2.set_ylim(rhs_axis_scale * bottom, rhs_axis_scale * top)\n ax2.yaxis.tick_right()\n # ax2.ticklabel_format(axis='both', style='scientific', scilimits=(0,0))\n # ax.ticklabel_format(axis='both', style='scientific', scilimits=(0,0))\n # _ticklabel_format = self._yaxis_rhs.get('ticklabel_format', None)\n # _ticklabel_format = self._yaxis_rhs.get('ticklabel_format', None)\n # if _ticklabel_format:\n # scilimits_str = _ticklabel_format.get('scilimitsl', \"(0, 0)\")\n # ax2.ticklabel_format(**_ticklabel_format)\n # ax2.ticklabel_format(axis='both', style='scientific', scilimits=(0,0))\n # plt.ticklabel_format(axis='y', style='scientific', useOffset=False)\n # ax2.ticklabel_format(axis='y', style='scientific', useOffset=False)\n if rhs_yticks:\n ax2.set_yticks(rhs_yticks)\n ax2.yaxis.set_label_position(\"right\")\n ax2.set_ylabel(rhs_axis_label)\n\n # fig.suptitle(self._title)\n self._figure.suptitle(self._title)\n ax.set_xlabel(self._xlabel)\n ax.set_ylabel(self._ylabel)\n # set frame on or off based on the Bool \"frame\" in .json input\n ax.set_frame_on(b=self._frame)\n if len(self._tick_params) > 0:\n ax.tick_params(**self._tick_params)\n ax.grid()\n ax.legend()\n\n if self._details:\n now = datetime.now()\n now_str = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n user = str(os.getlogin())\n host = str(os.getenv(\"HOSTNAME\"))\n details_str = (\n self._file + \" created \" + now_str + \" by \" + user + \" on \" + host\n )\n ax.set_title(details_str, fontsize=10, ha=\"center\", color=\"dimgray\")\n\n if self._display:\n plt.show()\n\n if self._serialize:\n self.serialize(self._folder, self._file)\n\n plt.close(\"all\")\n self._figure = None", "def init(self, data_len):\n self._t = 0\n self._data_len = data_len\n self._data = np.empty((data_len, 0))\n self._plots = [self._ax.plot([], [], '.', markersize=4, color='black', \n alpha=self._alpha)[0] for _ in range(data_len)]\n\n self._init = True", "def plot(self):\n pass", "def __init__(self):\n self.pt = Plotter(2, width=self.width, height=self.height)\n self.pt.use_grid()\n self.pt.set_title(\n \"Exponentials plotted from {:.1f} to {:.1f}\", self.xMin, self.xMax)\n self.pt.set_xlabel(\"X\")\n self.pt.set_ylabel(\"a*exp(-b*X)\")", "def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)", "def setup_axes():\n fig, ax = plt.subplots(1)\n\n ax.set_xlabel(\"Density [$n_H$ cm$^{-3}$]\")\n ax.set_ylabel(\"Temperature [K]\")\n\n ax.loglog()\n\n return fig, ax", "def figure():\n global fig\n return fig", "def set_canvas(self):\n self.ui.figure = plt.figure(figsize=(10, 10))\n self.ui.figure.patch.set_facecolor('None')\n self.ui.canvas = FigureCanvas(self.ui.figure)\n self.ui.canvas.setStyleSheet('background-color:transparent;')\n # Matplotlib toolbar\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, self)\n self.ui.toolbar.setMaximumHeight(30)\n self.ui.figureLayout.addWidget(self.ui.toolbar)\n self.ui.figureLayout.addWidget(self.ui.canvas)\n self.ui.canvas.mpl_connect('button_press_event', self.onclick)\n self.ui.canvas.mpl_connect('pick_event', self.onclick_pick)", "def init_figure(self): \r\n # add the big circle to represent the container\r\n BigCirc = plt.Circle((0,0), self.__ContainerRad, ec = 'b', fill = False, ls = 'solid')\r\n ax.add_artist(BigCirc)\r\n # initialise the axis to be animated and add it to the plot\r\n self.__text0 = ax.text(-9.9,9,\"f={:4d}\".format(0,fontsize=12))\r\n patches = [self.__text0]\r\n # add the patches for the balls to the plot\r\n for b in self.__ballList:\r\n pch = b.get_patch()\r\n ax.add_patch(pch)\r\n patches.append(pch)\r\n return patches", "def init(self, info):\n info.object.mpl_setup()\n return True", "def init(self, info):\r\n# info.object.mpl_setup()\r\n return True", "def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)", "def __init__(self, *args, **kwargs):\n figtitle = kwargs.pop('figtitle', 'hi mom')\n Figure.__init__(self, *args, **kwargs)\n self.text(0.5, 0.95, figtitle, ha='center')\n self.history_pwm = np.zeros(100, dtype=float)\n self.history_rps = np.zeros(100, dtype=float)\n self.history_rps_target = np.zeros(100, dtype=float)\n self.t = range(100) \n self.subplots_adjust(left=0.25, bottom=0.35)\n self.lock = threading.Lock()\n self.param = MyParam()", "def draw_plot(self):\n # X axis is auto follow.\n XLEN = 100\n xmax = max(len(self.daq.data0), XLEN)\n xmin = xmax - XLEN\n\n # The Y value will lie between 0.0 and 5.0 volts\n ymax = 5.0\n ymin = 0.0\n\n self.main_plot.set_xbound(lower=xmin, upper=xmax)\n self.main_plot.set_ybound(lower=ymin, upper=ymax)\n\n # Add the grid. Grid looks cool and is actually very helpful.\n self.main_plot.grid(True, color='gray')\n\n pylab.setp(self.main_plot.get_xticklabels(), \n visible=True)\n \n self.plot_data.set_xdata(arange(len(self.daq.data0)))\n self.plot_data.set_ydata(array(self.daq.data0))\n \n self.canvas.draw()", "def setupPlotVariables(self):\n\n ### Borrowed from Thomas' plot routines\n self.plotLabels = [r'$m_1$', r'$m_2$', r'eccentricity', \\\n r'period (days)', \\\n r'inclination (rad)',r'$\\omega$ (rad)',r'$t_0$',r'$\\alpha$ (rad)']\n\n ### Change these to update the plot ranges for each\n ### parameter. \n angOut = np.pi+0.3\n self.plotLimsLo = [1.0, -1.0, -0.2, -1.0, -angOut, -angOut, -10,0]\n self.plotLimsHi = [2.2, 10.0, 1.2, 35.0, angOut, angOut, 10,1.2]\n\n ### We specify the method for the uniformly-spaced grid. If we\n ### want to make one of these logspace (say) we just change\n ### the method identified in the appropriate place in the\n ### list.\n nMeth = len(self.plotLimsLo)\n self.plotSpacerMethods = [np.linspace for i in range(nMeth)]\n\n self.plotNfine = 1000 ### number of fine points to use\n self.plotNcols = 3 ### number of columns in the plot\n\n self.plotNrows = int(np.ceil(nMeth/float(self.plotNcols)) )", "def __init__(self, plot_function, interactive=True):\n global plt\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise DependencyError(\n 'matplotlib must be installed to use PlotFunctionMonitor')\n if interactive:\n plt.ion()\n self._fig = plt.figure()\n else:\n plt.ioff()\n self._fig = None\n self._plot_function = plot_function", "def show(self):\n plt.show()", "def _ps_init(self):\n\n self.ps_ax.set_xlim(-np.pi, np.pi)\n self.ps_ax.set_ylim(-10, 10)\n self.ps_ax.set_xlabel(\"degree [rad]\")\n self.ps_ax.set_ylabel(\"velocity [rad/s]\")\n for ap in self.ps_plots:\n ap.set_data([], [])\n return self.ps_plots", "def plot_init(bottom_left: Point, top_right: Point):\n global figure\n global axes\n\n plt.ion()\n figure, axes = plt.subplots(1, 1)\n axes.set_xlim(bottom_left[0], top_right[0])\n axes.set_ylim(bottom_left[1], top_right[1])\n axes.set_aspect(\"equal\", adjustable=\"box\")", "def __init__(self, width, height, dpi, hasCalma, parent=None):\n\n # Create Figure instance (which stores our plots)\n self.fig = Figure(figsize=(2, 2), dpi=dpi, edgecolor='blue')\n\n # Add an initial plot to our figure\n self.canvasGraph = self.fig.add_subplot(111)\n\n # Fetch colour map\n self.colourMap = self.get_colour_map()\n\n # Initialize figure canvas, which initializes an instance of QtWidget\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n\n # Store reference to axes\n self.ax = self.fig.gca()\n\n # Hide tick labels to create default style\n self.ax.set_yticklabels([])\n self.ax.set_xticklabels([])\n\n # Add placeholder text\n if hasCalma:\n self.placeHolderText = self.fig.text(0.5, 0.65,'Click on a performance track for CALMA data',horizontalalignment='center',\n verticalalignment='center', fontsize=16)\n else:\n self.placeHolderText = self.fig.text(0.5, 0.65,'No CALMA data available for this query',horizontalalignment='center',\n verticalalignment='center',\n fontsize=16)\n\n # Make background transparent\n self.fig.patch.set_alpha(1.0)\n\n # Resize with window\n FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n self.setMinimumSize(self.size())", "def initialize(self, stepEntities):\n if self.fig is not None:\n self.fig = None\n if self.ax is not None:\n self.ax = None\n self.xCoordinates = []\n self.sourceName = []\n\n self.destinations = self.options['how']['how'].lower().split(',')\n\n if 'figureProperties' in self.options:\n key = 'figureProperties'\n if 'figsize' not in self.options[key]:\n self.options[key]['figsize'] = None\n else:\n if self.options[key]['figsize'] is not None:\n if isinstance(self.options[key]['figsize'], str):\n self.options[key]['figsize'] = tuple([float(elm) for elm in ast.literal_eval(self.options[key]['figsize'])])\n if 'dpi' not in self.options[key]:\n self.options[key]['dpi'] = 'None'\n if 'facecolor' not in self.options[key]:\n self.options[key]['facecolor'] = 'None'\n if 'edgecolor' not in self.options[key]:\n self.options[key]['edgecolor'] = 'None'\n if 'frameon' not in self.options[key]:\n self.options[key]['frameon'] = 'True'\n elif utils.stringIsTrue(self.options[key]['frameon']):\n self.options[key]['frameon'] = 'True'\n elif utils.stringIsFalse(self.options[key]['frameon']):\n self.options[key]['frameon'] = 'False'\n self.fig, self.ax = plt.subplots(num=self.name,\n figsize=self.options[key]['figsize'],\n dpi=ast.literal_eval(self.options[key]['dpi']),\n facecolor=self.options[key]['facecolor'],\n edgecolor=self.options[key]['edgecolor'],\n frameon=ast.literal_eval(self.options[key]['frameon']),\n **self.options[key].get('attributes', {}))\n else:\n self.fig, self.ax = plt.subplots(num=self.name)\n if 'screen' in self.destinations and display:\n self.fig.show()\n\n if self.dim == 3:\n self.ax.remove() # remove axis since it was initialized for 2-d plots\n self.ax = self.fig.add_subplot(111, projection='3d') # replace with 3-d axis\n\n # initialize lists\n for pltIndex in range(len(self.options['plotSettings']['plot'])):\n self.colorMapCoordinates[pltIndex] = None\n if 'y' in self.options['plotSettings']['plot'][pltIndex]:\n self.yCoordinates = []\n if 'z' in self.options['plotSettings']['plot'][pltIndex]:\n self.zCoordinates = []\n if 'clusterLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.clusterLabels = []\n if 'mixtureLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.mixtureLabels = []\n if 'attributes' in self.options['plotSettings']['plot'][pltIndex]:\n if 'mixtureMeans' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureMeans = []\n if 'mixtureCovars' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureCovars = []\n\n for pltIndex in range(len(self.options['plotSettings']['plot'])):\n # fill lists\n self.xCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['x'].split(','))\n self.sourceName.append(self.xCoordinates [pltIndex][0].split('|')[0].strip())\n if 'y' in self.options['plotSettings']['plot'][pltIndex]:\n self.yCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['y'].split(','))\n if self.yCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. y_coord source is {self.yCoordinates[pltIndex][0].split(\"|\")[0]}')\n if 'z' in self.options['plotSettings']['plot'][pltIndex]:\n self.zCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['z'].split(','))\n if self.zCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. z_coord source is {self.zCoordinates [pltIndex][0].split(\"|\")[0]}')\n if 'clusterLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.clusterLabels.append(self.options['plotSettings']['plot'][pltIndex]['clusterLabels'].split(','))\n if self.clusterLabels[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. clusterLabels source is {self.clusterLabels [pltIndex][0].split(\"|\")[0]}')\n if 'mixtureLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.mixtureLabels.append(self.options['plotSettings']['plot'][pltIndex]['mixtureLabels'].split(','))\n if self.mixtureLabels[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. mixtureLabels source is {self.mixtureLabels [pltIndex][0].split(\"|\")[0]}')\n if 'colorMap' in self.options['plotSettings']['plot'][pltIndex]:\n self.colorMapCoordinates[pltIndex] = self.options['plotSettings']['plot'][pltIndex]['colorMap'].split(',')\n if self.colorMapCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. colorMap_coordinates source is {self.colorMapCoordinates[pltIndex][0].split(\"|\")[0]}')\n # update options\n if 'interpPointsY' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpPointsY'] = '20'\n if 'interpPointsX' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpPointsX'] = '20'\n if 'interpolationType' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpolationType'] = 'linear'\n elif self.options['plotSettings']['plot'][pltIndex]['interpolationType'] not in self.availableInterpolators:\n self.raiseAnError(IOError, f'surface interpolation unknown. Available are : {self.availableInterpolators}')\n if 'epsilon' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['epsilon'] = '2'\n if 'smooth' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['smooth'] = '0.0'\n if ('cmap' not in self.options['plotSettings']['plot'][pltIndex]) or (self.options['plotSettings']['plot'][pltIndex]['cmap'] is None):\n self.options['plotSettings']['plot'][pltIndex]['cmap'] = 'None'\n elif (self.options['plotSettings']['plot'][pltIndex]['cmap'] != 'None') and (self.options['plotSettings']['plot'][pltIndex]['cmap'] not in matplotlib.cm.datad):\n self.raiseAnError(IOError, f'The colorMap \"{self.options[\"plotSettings\"][\"plot\"][pltIndex][\"cmap\"]}\" does not exist... Available are {matplotlib.cm.datad.keys()}')\n if 'interpolationTypeBackUp' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpolationTypeBackUp'] = 'nearest'\n elif self.options['plotSettings']['plot'][pltIndex]['interpolationTypeBackUp'] not in self.availableInterpolators:\n self.raiseAnError(IOError, f'surface interpolation (BackUp) unknown. Available are : {self.availableInterpolators}')\n if 'attributes' in self.options['plotSettings']['plot'][pltIndex]:\n if 'mixtureMeans' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureMeans.append(self.options['plotSettings']['plot'][pltIndex]['attributes']['mixtureMeans'].split(','))\n if 'mixtureCovars' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureCovars.append(self.options['plotSettings']['plot'][pltIndex]['attributes']['mixtureCovars'].split(','))\n self.numberAggregatedOS = len(self.options['plotSettings']['plot'])\n # collect sources\n self.legacyCollectSources(stepEntities)\n # initialize here the base class\n super().initialize(stepEntities)\n # execute actions (we execute the actions here also because we can perform a check at runtime!!\n self.__executeActions()", "def __init__(self, **kwargs): \n self.kwargs = kwargs\n\n # pretty figure up\n prettyplot() \n pretty_colors = prettycolors() \n \n self.fig = plt.figure(1) \n self.sub = self.fig.add_subplot(111) \n\n self.hist_max = 0.0", "def __createCanvas(self):\r\n # create a canvas and pass a figure to it\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(1, 1, 1) # 1X1 grid, 1st subplot\r\n self.canvas.axes.set_title(\"Plot\")\r\n\r\n # create Navigation widget and pass a Canvas widget and the parent\r\n self.toolbar = NavigationToolbar(self.canvas, self)", "def __init__(self):\n #region trivial\n self.argumentParser = reqparse.RequestParser()\n\n self.argumentParser.add_argument('plotName', type=str, default=\"\", required=False,\n help=\"The name of the plot\")\n\n self.argumentParser.add_argument('xLabel', type=str, default=\"\", required=False,\n help=\"The label of the x axes\")\n\n self.argumentParser.add_argument('yLabel', type=str, default=\"\", required=False,\n help=\"The label of the y axes\")\n\n # color of plot\n self.argumentParser.add_argument('plotNameColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of the plot name\")\n self.argumentParser.add_argument('xAxisLabelColor', type=str, default=ColorCollection.black, required=False,\n help=\"The label color of the x axes\")\n self.argumentParser.add_argument('yAxisLabelColor', type=str, default=ColorCollection.black, required=False,\n help=\"The label color of the y axes\")\n\n self.argumentParser.add_argument('bottomSpineColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of the plot 's bottom spine\")\n self.argumentParser.add_argument('topSpineColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of the plot 's top spine\")\n self.argumentParser.add_argument('leftSpineColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of the plot 's left spine\")\n self.argumentParser.add_argument('rightSpineColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of the plot 's right spine\")\n\n self.argumentParser.add_argument('figureBackgroundColor', type=str, default=ColorCollection.white,\n required=False,\n help=\"\")\n self.argumentParser.add_argument('axesBackgroundColor', type=str, default=ColorCollection.white, required=False,\n help=\"\")\n\n\n #endregion\n\n #region ticks\n self.argumentParser.add_argument('xTickColor', type=str, default=ColorCollection.black, required=False,\n help=\"The color of tick in x\")\n self.argumentParser.add_argument('yTickColor', type=str, default=ColorCollection.black, required=False,\n help=\"he color of tick in y\")\n self.argumentParser.add_argument('xMinorLocatorValue', type=float, default=0, required=False,\n help=\"The amount of minor ticks of this value multiply\")\n self.argumentParser.add_argument('yMinorLocatorValue', type=float, default=0, required=False,\n help=\"The amount of minor ticks of this value multiply\")\n #endregion", "def __init__(self, nx, ny, nxsize=5.4, nysize=6.2):\n self.nx = nx\n self.ny = ny\n self.n = 1\n plt.figure(figsize=(nysize*ny, nxsize*nx))\n plt.subplot(nx, ny, self.n)", "def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return", "def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return", "def create_init_fig(wrapped_signal, freq_arr, xcm_arr):\n \n fig, ax = pyplot.subplots(figsize=(10.0, 5.0))\n pyplot.tight_layout()\n fig.suptitle('Frequency = {:.2f}'.format(freq_arr[0]))\n\n ax1 = pyplot.subplot2grid((1, 3), (0, 0))\n ax2 = pyplot.subplot2grid((1, 3), (0, 1), colspan=2)\n\n circle1 = pyplot.Circle((0, 0), 1, fill=None, lw=2, ls='--', alpha=0.3)\n\n ax1.add_patch(circle1)\n ax1.grid()\n\n ticks= numpy.linspace(-1,1, 5, endpoint=True)\n\n ylabels = [-1, -0.5, None, 0.5, 1]\n\n ax1.set_xticks(ticks)\n ax1.set_yticks(ticks)\n ax1.set_yticklabels(ylabels)\n\n\n wrapped_signal_plot = ax1.plot(wrapped_signal.real, \n wrapped_signal.imag, alpha=0.5,\n label=r'$g(t)e^{2\\pi ift}$')[0]\n\n # Move left y-axis and bottim x-axis to centre, passing through (0,0)\n ax1.spines['left'].set_position('center')\n ax1.spines['bottom'].set_position('center')\n\n # Eliminate upper and right axes\n ax1.spines['right'].set_color('none')\n ax1.spines['top'].set_color('none')\n\n\n ax1.set_adjustable('box')\n ax1.set_aspect('equal')\n ax1.set_xlim(-1.1,1.1)\n ax1.set_ylim(-1.1,1.1)\n ax1.legend(loc='upper left', bbox_to_anchor=(0.48, 1.12))\n\n #f_list = numpy.full_like(freqs, None)\n almost_fourier_plot = ax2.plot(freq_arr[0], xcm_arr[0], '-')[0]\n ax2.spines['right'].set_color('none')\n ax2.spines['top'].set_color('none')\n ax2.set_adjustable('box')\n ax2.set_aspect('equal')\n ax2.set_xlabel('Frequency')\n ax2.set_ylabel('xcm')\n\n ax2.set_xlim(0.9,5.1)\n ax2.set_ylim(-0.3,1.1)\n ax2.grid()\n pyplot.tight_layout()\n pyplot.close()\n \n return {'fig': fig, 'WSP': wrapped_signal_plot, 'AF': almost_fourier_plot}", "def initialize_figure(\n self,\n mosaic: Optional[List[List[str]]] = None,\n figsize: Tuple[int, int] = (10, 8),\n cmap: str = \"tab10\",\n return_ax: bool = False,\n ) -> None:\n if mosaic is None:\n mosaic = self.get_default_mosaic()\n\n self.cmap = plt.get_cmap(cmap)\n\n figure, axes = plt.subplot_mosaic(mosaic, figsize=figsize)\n if return_ax:\n return axes\n\n self.figure = figure\n self.axes = axes\n self.mosaic = mosaic", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plot():\n pass", "def _init(self) -> List[PlotType]:\n self.plots[0].set_data([], [], 'bx', markersize=5)\n self.plots[1].set_data([], [], 'r.', markersize=15)\n return self.plots", "def initialize_plot(self):\n\n self.scat = self.config.ax.scatter(self.lons[0, 0], self.lats[0, 0], c=self.z[0,0]\n # , vmax=self.vmax\n # , vmin=self.vmin\n # ,cmap='coolwarm'\n # , norm=LogNorm()\n , transform=self.config.projection, edgecolor='none', s=0.6)\n cbar = plt.colorbar(self.scat)\n # cbar.set_label('Melt water Flux (mm/yr)')\n\n self.ttl = self.config.ax.text(1.5, 1.05, '', transform=self.config.ax.transAxes, va='center')\n\n return self.scat", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def __init__(self, parent=None):\n super().__init__()\n\n self.parent = parent\n\n # plot object, can be 2D or 3D\n self.plt = None", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def _init_world_plot():\n # Initialise figure and axis\n fig = plt.figure()\n ax = fig.subplots()\n\n # Hide axes ticks\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # Colour map\n colour_map = plt.get_cmap('RdYlGn').reversed()\n\n return fig, ax, colour_map", "def figure():\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.set_aspect('equal')\n return fig, ax", "def __init__(self):\n self.km, self.kc = \\\n start_new_kernel(extra_arguments=['--matplotlib=inline'],\n stderr=open(os.devnull, 'w'))", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def figure(figsize=None,xSize=3.5,ySize=3.5,dpi=600,kw_setup=dict(),**kw):\n plot_setup(**kw_setup)\n if (figsize is not None):\n xSize = figsize[0]\n ySize = figsize[1]\n return plt.figure(figsize=(xSize,ySize),dpi=dpi,**kw)", "def _plotDisplay(self):\n self.gc.tick_labels.set_xformat('ddd')\n self.gc.tick_labels.set_yformat('ddd')\n if self.csys == 'GAL':\n if self.xlabel is None: self.xlabel = r'Galactic longitude $l$ $(^{\\circ})$'\n if self.ylabel is None: self.ylabel = r'Galactic latitude $b$ $(^{\\circ})$'\n else:\n if self.xlabel is None: self.xlabel = r'RA (J2000)'\n if self.ylabel is None: self.ylabel = r'Dec (J2000)'\n self.gc.axis_labels.set_xtext(self.xlabel)\n self.gc.axis_labels.set_ytext(self.ylabel)\n self.gc.set_axis_labels_font(size=self.ftsize1)\n self.gc.tick_labels.set_font(size=self.ftsize2) # <====== perhaps a string here?\n self.gc.ticks.set_color('black')", "def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".pdf\")\n if show is False:\n plt.close()\n else:\n plt.show()\n return", "def plot_settings(clear = True, grid = True):\n if clear:\n plt.clf() # Clears any previous figures\n\n # Setting figure size\n figure = plt.gcf()\n figure.set_size_inches(18, 10)\n\n # Setting size of plot elements\n plt.rc('axes', labelsize = 22, titlesize = 24) \n plt.rc('xtick', labelsize = 18) \n plt.rc('ytick', labelsize = 18) \n plt.rc('legend', fontsize = 20)\n plt.rc('axes', axisbelow = True) # Ensures that the grid is behind any graph elements\n if grid:\n plt.grid() # Adds a grid to the plot", "def setup_layout(self):\n\n # check if we should animate plot\n anim = self.get_option(self.sctn,'animate')\n if anim != None:\n self.animate = anim.lower() in ['t','true','1']\n else:\n self.animate = False\n self.anim_range=[]\n t = self.get_option(self.sctn,'anim_start')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(0)\n t = self.get_option(self.sctn,'anim_end')\n if t!=None:\n self.anim_range.append(int(t))\n else:\n self.anim_range.append(5)\n \n self.times = self.get_option(self.sctn,'times')\n if self.times == \"None\":\n self.times = [None]\n else:\n self.times = self.times.split()\n \n if len(self.variables)>1:\n self.numdata = len(self.variables)\n else:\n self.numdata = len(self.times)\n try:\n self.numcol = int(self.get_option(self.sctn,'ncol'))\n except:\n self.numcol = self.numdata\n if len(self.variables)>1:\n self.numrow = len(self.times)\n else:\n self.numrow = 1", "def __init__(self, plot_location=None):\n if plot_location == None:\n root = tk.Tk()\n root.withdraw()\n file_name = askopenfilename()\n root.destroy()\n else:\n file_name = plot_location\n\n data = open(file_name, \"r\").read()\n\n x_axis = []\n print(\"yay im here\")\n data = data.splitlines()\n for i in range(0, len(data)):\n data[i] = float(data[i])\n x_axis.append(i)\n plt.bar(x_axis, data, 1.0, color=\"blue\")\n plt.title(\"Loss\")\n plt.xlabel(\"Step\")\n plt.ylabel(\"Loss\")\n plt.show(block=True)" ]
[ "0.79452497", "0.79369026", "0.77807647", "0.77807647", "0.7650264", "0.75176746", "0.7469306", "0.7419887", "0.728096", "0.7241806", "0.722278", "0.7206917", "0.71901965", "0.7183423", "0.70679206", "0.704637", "0.7034995", "0.69461167", "0.68985367", "0.6886135", "0.68792033", "0.6869375", "0.68621343", "0.68405503", "0.6790598", "0.6771148", "0.67506367", "0.67424726", "0.67268133", "0.66729224", "0.6650616", "0.66425705", "0.6628639", "0.6612065", "0.6608695", "0.65842533", "0.65819895", "0.65552425", "0.6517089", "0.6502525", "0.6499272", "0.6495815", "0.6472292", "0.64470917", "0.64395505", "0.6398792", "0.6368199", "0.6365128", "0.63584834", "0.6357339", "0.635031", "0.6308987", "0.63085073", "0.6304556", "0.6289843", "0.627684", "0.6274933", "0.62627697", "0.62578547", "0.62287945", "0.62106526", "0.62069196", "0.6206436", "0.62053573", "0.62039375", "0.6196432", "0.6191421", "0.6188366", "0.6178026", "0.6173749", "0.61682683", "0.61631376", "0.6153086", "0.6145523", "0.61352766", "0.61300534", "0.6129207", "0.61236244", "0.6122523", "0.6120557", "0.6120557", "0.61144185", "0.61132926", "0.61087817", "0.6107658", "0.6103956", "0.6098239", "0.60948384", "0.60745853", "0.6064338", "0.6061189", "0.60507554", "0.6048614", "0.604248", "0.6031366", "0.602765", "0.6025895", "0.60042626", "0.6000643", "0.5991272" ]
0.63938004
46
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
def update(self, key): return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_frame(self, key, ranges=None):", "def _get_frame(self, key):\n layout_frame = self.layout.clone(shared_data=False)\n keyisint = isinstance(key, int)\n if not isinstance(key, tuple): key = (key,)\n nthkey_fn = lambda x: zip(tuple(x.name for x in x.kdims),\n list(x.data.keys())[min([key[0], len(x)-1])])\n if key == self.current_key:\n return self.current_frame\n else:\n self.current_key = key\n\n for path, item in self.layout.items():\n if self.dynamic == 'open':\n if keyisint:\n counts = item.traverse(lambda x: x.counter, (DynamicMap,))\n if key[0] >= counts[0]:\n item.traverse(lambda x: next(x), (DynamicMap,))\n dim_keys = item.traverse(nthkey_fn, (DynamicMap,))[0]\n else:\n dim_keys = zip([d.name for d in self.dimensions\n if d in item.dimensions('key')], key)\n self.current_key = tuple(k[1] for k in dim_keys)\n elif item.traverse(lambda x: x, [DynamicMap]):\n with dimensionless_cache(item, not self._force or not self.drawn):\n key, frame = util.get_dynamic_item(item, self.dimensions, key)\n layout_frame[path] = frame\n continue\n elif self.uniform:\n dim_keys = zip([d.name for d in self.dimensions\n if d in item.dimensions('key')], key)\n else:\n dim_keys = item.traverse(nthkey_fn, (HoloMap,))[0]\n if dim_keys:\n obj = item.select((HoloMap,), **dict(dim_keys))\n if isinstance(obj, HoloMap) and len(obj) == 0:\n continue\n else:\n layout_frame[path] = obj\n else:\n layout_frame[path] = item\n traverse_setter(self, '_force', False)\n\n self.current_frame = layout_frame\n return layout_frame", "def update_frame(self, key, ranges=None, plot=None):\n element = self._get_frame(key)\n source = self.handles['source']\n data, mapping = self.get_data(element, ranges)\n self._update_datasource(source, data)", "def __getitem__(self, frame):\n if not self.dynamic == 'open' and isinstance(frame, int) and frame > len(self):\n self.warning(\"Showing last frame available: %d\" % len(self))\n if not self.drawn: self.handles['fig'] = self.initialize_plot()\n if not self.dynamic == 'open' and not isinstance(frame, tuple):\n frame = self.keys[frame]\n self.update_frame(frame)\n return self.state", "def update(self, key, val):\n state_dict = self.todict()\n assert key in state_dict\n state_dict[key] = val\n return self.state_factory.build(state_dict)", "def update_key(self):\n self.__prev_key = self.__new_key", "def __setitem__(self, key, value):\n if (key in ['__id', '__src_id', '__dst_id']):\n raise KeyError('Cannot modify column %s. Changing __id column will\\\n change the graph structure' % key)\n else:\n self.__is_dirty__ = True\n super(GFrame, self).__setitem__(key, value)", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n self.store.setdefault(s, dict())[a] = value", "def send_state(self, key=None):\n state = self.get_state(key=key)\n if len(state) > 0:\n if self._property_lock: # we need to keep this dict up to date with the front-end values\n for name, value in state.items():\n if name in self._property_lock:\n self._property_lock[name] = value\n state, buffer_paths, buffers = _remove_buffers(state)\n msg = {'method': 'update', 'state': state, 'buffer_paths': buffer_paths}\n self._send(msg, buffers=buffers)", "def _set_tuple_structure(self, key):\n if len(key) == 2:\n self.ks = list(np.array(key[1]))\n self.set_neighs(key[0])", "def __setitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___setitem__(self, *args)", "def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )", "def _set_tuple_tuple_structure(self, key):\n if len(key) == 2:\n ks = [key[1]] if type(key[1]) == int else key[1]\n self.ks = list(np.array([ks]).ravel())\n self._set_tuple_only_structure(key[0])", "def __setitem__(self, key, value):\n assert not isinstance(value, Slot), \\\n \"Can't use setitem to connect slots. Use connect()\"\n assert self.level == 0, \\\n (\"setitem can only be used with slots of level 0.\"\n \" Did you forget to append a key?\")\n assert self.operator is not None, \\\n \"cannot do __setitem__ on Slot '{}' -> no operator !!\"\n assert slicingtools.is_bounded(key), \\\n \"Can't use Slot.__setitem__ with keys that include : or ...\"\n roi = self.rtype(self, pslice=key)\n if self._value is not None:\n self._value[key] = value\n\n # only propagate the dirty key at the very beginning of\n # the chain\n self.setDirty(roi)\n if self._type == \"input\":\n self.operator.setInSlot(self, (), roi, value)\n\n # Forward to partners\n for p in self.partners:\n p[key] = value", "def __getitem__(self, key):\n nrows, ncols = self.get_geometry()\n\n def _normalize(key, size): # Includes last index.\n if isinstance(key, slice):\n start, stop, _ = key.indices(size)\n if stop > start:\n return start, stop - 1\n else:\n if key < 0:\n key += size\n if 0 <= key < size:\n return key, key\n raise IndexError(\"invalid index\")\n\n if isinstance(key, tuple):\n try:\n k1, k2 = key\n except ValueError:\n raise ValueError(\"unrecognized subplot spec\")\n num1, num2 = np.ravel_multi_index(\n [_normalize(k1, nrows), _normalize(k2, ncols)], (nrows, ncols))\n else: # Single key\n num1, num2 = _normalize(key, nrows * ncols)\n\n return SubplotSpec(self, num1, num2)", "def _get_frame(self, key):\n pass", "def __setitem__(self, *args):\n return _osgAnimation.vectorFloatKeyframe___setitem__(self, *args)", "def update( self ):\n\t\t# Read the current state\n\t\tself.read( store=1 )\n\t\tfor key in range( 12 ):\n\t\t\t# Key state has changed? (pressed or release)\n\t\t\tif self.touched[key] != self.touched[key+12]:\n\t\t\t\tself.debug( \"Key %i is %s\" %(key,\"PRESSED\" if self.touched[key]>0 else \"Released\") )\n\t\t\t\tif self.on_key_change:\n\t\t\t\t\tself.on_key_change( key, pressed=(self.touched[key]>0) )\n\t\t\t\t# remember the current state as last state\n\t\t\t\tself.touched[key+12]=self.touched[key]", "def update_overlaid_plot(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n waveforms = [trigger, trace]\n\n first_peak, second_peak = self.get_windowed_data(waveforms[0], waveforms[1])\n self.overlaid_lines[0].set_ydata(first_peak)\n self.overlaid_lines[0].set_xdata(range(len(first_peak)))\n self.overlaid_lines[1].set_ydata(second_peak)\n self.overlaid_lines[1].set_xdata(range(len(second_peak)))\n\n areas = [integ.simps(first_peak), integ.simps(second_peak)]\n labels = ['%.1f' % areas[0], '%.1f' % areas[1]]\n\n# for area in areas:\n# if area < 0.1:\n# raise RangeError # calculation warning error for example\n self.ax2.legend([self.overlaid_lines[0], self.overlaid_lines[1]],\n labels)\n\n self.draw()", "def _refreshKey(self, displayKey):\n refreshRect = Rect(*displayKey.scaled)\n refreshRect.Inflate(2, 2)\n self.RefreshRect(refreshRect.Get())", "def __setitem__(self, *args):\n return _osgAnimation.vectorQuatKeyframe___setitem__(self, *args)", "def __setitem__(self, key, value: numbers.Number) -> None:\n if key in self.layout.bladeTupMap.keys():\n self.value[self.layout.bladeTupMap[key]] = value\n elif isinstance(key, tuple):\n sign, blade = compute_reordering_sign_and_canonical_form(key, np.array(self.layout.sig),\n self.layout.firstIdx)\n self.value[self.layout.bladeTupMap[blade]] = sign*value\n else:\n self.value[key] = value", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n self.store.setdefault(s, dict())[a] = value", "def _set_tuple_k_structure(self, key):\n self.ks = [key[1]] if type(key[1]) == int else key[1]\n self.set_neighs(key[0])", "def __getitem__(self, key):\n return self._to_draw[key]", "def __setitem__(self, *args):\n return _osgAnimation.vectorVec3Keyframe___setitem__(self, *args)", "def myUpdate(self, stateDict=None):\n\n # store stateDict so we can replot on changing dark theme\n if stateDict is None and self.stateDict is not None:\n # re-use our stateDict\n stateDict = self.stateDict\n else:\n if stateDict is None:\n return\n self.stateDict = stateDict.copy()\n\n if stateDict is None:\n return\n \n dataType = stateDict['dataType']\n hue = stateDict['hue']\n groupByColumnName = stateDict['groupByColumnName']\n\n plotType = stateDict['plotType']\n #self.plotType = plotType\n\n xStatHuman = stateDict['xStatHuman']\n yStatHuman = stateDict['yStatHuman']\n\n xStat = stateDict['xStat']\n yStat = stateDict['yStat']\n\n '''\n print('=== myMplCanvas.myUpdate()')\n print(' ', plotType)\n print(' ', 'xStatHuman:', xStatHuman, 'yStatHuman:', yStatHuman)\n print(' ', 'xStat:', xStat, 'yStat:', yStat)\n '''\n\n xIsCategorical = stateDict['xIsCategorical']\n yIsCategorical = stateDict['yIsCategorical']\n\n masterDf = stateDict['masterDf']\n meanDf = stateDict['meanDf']\n\n self.plotDf = meanDf\n\n self.canvas.axes.clear()\n\n picker = 5\n if plotType in ['Scatter Plot', 'Scatter + Raw + Mean']:\n # scatter plot user selection\n self.scatterPlotSelection, = self.canvas.axes.plot([], [], 'oy',\n markersize=12, fillstyle='none')\n\n # main scatter\n try:\n self.whatWeArePlotting = sns.scatterplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes, picker=picker,\n zorder=0)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print(' EXCEPTION: in myUpdate() \"Scatter Plot\", exception is:')\n print(' ', e)\n print(' ', 'plotType:', plotType)\n print(' ', 'xStat:', xStat)\n print(' ', 'yStat:', yStat)\n print(' ', 'hue:', hue)\n\n # sem in both x and y, pulling from masterDf\n if dataType=='File Mean' or plotType=='Scatter + Raw + Mean':\n # we need to do this for each hue???\n # if x or y is in categorical (e.g. a string) then do not do this ...\n if xIsCategorical or yIsCategorical:\n pass\n else:\n print(' grabbing mean +- sem for self.groupByColumnName:', groupByColumnName)\n color = 'k'\n xd = masterDf.groupby(groupByColumnName).mean()[xStat]\n xerrd = masterDf.groupby(groupByColumnName).sem()[xStat]\n yd = masterDf.groupby(groupByColumnName).mean()[yStat]\n yerrd = masterDf.groupby(groupByColumnName).sem()[yStat]\n \n # logger.info('2023 declan')\n # print(' groupByColumnName:', groupByColumnName)\n # print(' xd:', xd)\n # print(' yd:', yd)\n # print(' xerrd:', xerrd)\n # print(' yerrd:', yerrd)\n \n self.canvas.axes.errorbar(xd, yd, xerr=xerrd, yerr=yerrd,\n fmt='none', capsize=0, zorder=10, color=color, alpha=0.5);\n\n elif plotType == 'Histogram':\n yStatHuman = 'Count'\n doKde = False #stateDict['doKDE']\n try:\n g = sns.histplot(x=xStat, hue=hue, kde=doKde,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTIONin Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Violin Plot':\n if not xIsCategorical:\n warningStr = 'Violin plot requires a categorical x statistic'\n else:\n g = sns.violinplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Box Plot':\n if not xIsCategorical:\n warningStr = 'Box plot requires a categorical x statistic'\n else:\n g = sns.boxplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Raw + Mean Plot':\n if not xIsCategorical:\n warningStr = 'Raw + Mean plot requires a categorical x statistic'\n else:\n try:\n # does not work here for categorical x\n #self.scatterPlotSelection, = self.canvas.axes[0].plot([], [], 'oy',\n # markersize=12, fillstyle='none')\n\n '''\n colorList = [('red'), ('green'), 'b', 'c', 'm', 'y']\n hueList = meanDf[hue].unique()\n palette = {}\n for idx, hue in enumerate(hueList):\n palette[hue] = colorList[idx]\n print(palette)\n '''\n\n palette = sns.color_palette(\"Paired\")\n #palette = ['r', 'g', 'b']\n\n # stripplot\n #g = sns.swarmplot(x=xStat, y=yStat,\n g = sns.stripplot(x=xStat, y=yStat,\n hue=hue,\n palette=palette,\n data=meanDf,\n ax=self.canvas.axes,\n #color = color,\n dodge=True,\n alpha=0.6,\n picker=picker,\n zorder=1)\n\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n logger.info(f' REMAKING LEGEND sns.pointplot() plotNumber:{self.plotNumber}')\n handles, labels = self.canvas.axes.get_legend_handles_labels()\n l = self.canvas.axes.legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n #self.myLegend = self.canvas.axes.Legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n '''\n if self.darkTheme:\n color = 'w'\n else:\n color = 'k'\n color = [color] * len(hueList)\n print('color:', color)\n '''\n\n self.whatWeArePlotting = sns.pointplot(x=xStat, y=yStat,\n hue=hue,\n #palette=palette,\n data=meanDf,\n estimator=np.nanmean,\n errorbar=('ci', 68),\n capsize=0.1,\n ax=self.canvas.axes,\n color='r',\n #legend='full',\n #zorder=10)\n )\n except (ValueError) as e:\n print('EXCEPTION in \"Raw + Mean Plot\":', e)\n traceback.print_exc()\n\n elif plotType == 'Regression Plot':\n # regplot does not have hue\n if xIsCategorical or yIsCategorical:\n warningStr = 'Regression plot requires continuous x and y statistics'\n else:\n # todo: loop and make a regplot\n # for each unique() name in\n # hue (like Region, Sex, Condition)\n hueList = masterDf[hue].unique()\n for oneHue in hueList:\n if oneHue == 'None':\n continue\n tmpDf = meanDf [ meanDf[hue]==oneHue ]\n #print('regplot oneHue:', oneHue, 'len(tmpDf)', len(tmpDf))\n sns.regplot(x=xStat, y=yStat, data=tmpDf,\n ax=self.canvas.axes);\n else:\n print(' did not understand plot type:', plotType)\n\n\n #\n # update\n self.canvas.axes.figure.canvas.mpl_connect(\"pick_event\", self.onPick)\n\n self.mplCursorHover = None\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n\n #\n #self.mySetStatusBar(warningStr)\n\n self.canvas.axes.spines['right'].set_visible(False)\n self.canvas.axes.spines['top'].set_visible(False)\n\n if not stateDict['showLegend']:\n #print('self.canvas.axes.legend():', self.canvas.axes.legend())\n #print('self.canvas.axes.legend:', self.canvas.axes.legend)\n #if self.canvas.axes.legend() is not None:\n if 1:\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #print('myUpdate() self.plotSize:', self.plotSize)\n self.canvas.axes.set_xlabel(xStatHuman)\n self.canvas.axes.set_ylabel(yStatHuman)\n '''\n if self.plotSize == 'paper':\n fontsize = 10\n self.canvas.axes[0].set_xlabel(xStatHuman, fontsize=fontsize)\n self.canvas.axes[0].set_ylabel(yStatHuman, fontsize=fontsize)\n else:\n self.canvas.axes[0].set_xlabel(xStatHuman)\n self.canvas.axes[0].set_ylabel(yStatHuman)\n '''\n\n # subplots_adjust\n #self.fig.canvas.draw_idle()\n self.fig.canvas.draw()", "def __setitem__(self, key, value):\n self.xg[key] = value", "def setValue(self, *args):\n return _osgAnimation.MatrixKeyframe_setValue(self, *args)", "def __setitem__(self, key: Tuple[int, int], value: complex) -> None:\n self.coeff[self._core.index_alpha(key[0]),\n self._core.index_beta(key[1])] = value", "def __setstate__(self, dct):\n\t\tself.__dict__ = dct\n\t\tself.image = None\n\t\tself.draw() # Regenerate Image", "def updateKeys(self, _key):\n\t\tif _key == curses.KEY_UP:\n\t\t\tself.pointer.dec(1)\n\t\telif _key == curses.KEY_DOWN:\n\t\t\tself.pointer.inc(1)\n\t\telif _key == 261 or _key == 10: # Execute (Key RIGHT / ENTER)\n\t\t\tif len(self.actions) == 0:\n\t\t\t\tpass\n\t\t\telif len(self.actions) == 1:\t# If only one action, execute this, no matter what menu entry is selected\n\t\t\t\tself.actions[0]()\n\t\t\telif len(self.actions) == len(self.content):\n\t\t\t\tself.actions[self.pointer.get()]()\n\t\tself.highlight(self.pointer.get())\n\t\t# highlight linked objects\n\t\tfor o in self.linkedObjects:\n\t\t\to.highlight(self.pointer.get())\n\t\treturn (self.id, _key)\t\t# send key back, to handle in main program", "def __setitem__(self, key, value):\n if not isinstance(key, int):\n raise TypeError(f\"Expected an int for index, not {type(key)}\")\n\n if not (0 <= key < self.led_count):\n raise ValueError(\"Index is out of range\")\n\n # value is GRB\n g = value[0]\n r = value[1]\n b = value[2]\n\n self.set_pixel(key, r, g, b)", "def key_state(self, key):\r\n return self.handler.key_state(key_to_code(key))", "def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()", "def __setitem__(self, key, value):\r\n T=type(key)\r\n if T!=types.IntType and T!=types.LongType:\r\n raise TypeError, \"index must be integer\"\r\n\r\n if key==0: self.x = value\r\n elif key==1: self.y = value\r\n elif key==2: self.z = value\r\n elif key==3: self.w = value\r\n else:\r\n raise IndexError,\"index out of range\"", "def _set_structure_tuple(self, key):\n if len(key) == 2:\n msg = \"Ambiguous input in `set` function of pst.Neighs_Info.\"\n warnings.warn(msg, SyntaxWarning)\n if type(key[0]) == tuple:\n self.ks = list(np.array([key[1]]).ravel())\n self._set_structure_tuple(key[0])\n else:\n aux_bool = type(key[0]) in [np.ndarray, list]\n if type(key[0]) == list and type(key[0][0]) == tuple:\n self._set_tuple_list_tuple_structure(key)\n elif type(key[0]) == type(key[1]) and aux_bool:\n if len(key[0]) == len(key[1]):\n self._set_tuple_only_structure(key)\n else:\n self.ks = list(np.array(key[1]))\n self.set_neighs(key[0])\n else:\n self._set_tuple_only_structure(key)\n else:\n self.set_neighs(key[0])", "def __setitem__(self, *args):\n return _osgAnimation.vectorVec4Keyframe___setitem__(self, *args)", "def __setitem__(self, *args):\n return _osgAnimation.vectorVec2Keyframe___setitem__(self, *args)", "def updateKeys(self, _key):\n\t\tif _key == curses.KEY_UP or _key == curses.KEY_DOWN:\n\t\t\tself.switch()\n\t\telif _key == 261 or _key == 10: # Execute (Key RIGHT / ENTER)\n\t\t\treturn str(self.pointer.get())\n\t\treturn (50, _key)\t\t# send key back, to handle in main program", "def setPositionKey(self, time, index, value, id, view) -> None:\n ...", "def __setstate__(self, state):\n if len(state) != 1:\n raise TypeError('Invalid state length, expected 1; received %i' %\n len(state))\n kwargs = state[0]\n if not isinstance(kwargs, dict):\n raise TypeError('Key accepts a dict of keyword arguments as state; '\n 'received %r' % kwargs)\n self.__reference = None\n self.__pairs = tuple(kwargs['pairs'])\n self.__app = kwargs['app']\n self.__namespace = kwargs['namespace']", "def update(self):\n self._state = 23", "def __setitem__(self, key: Any, widget: Component) -> None:\n if not isinstance(widget, Widgets):\n self._add(widget, *self._key_to_rows_columns(key))", "def update_plot(self, msg):\n if not self.plots_created:\n self.create_plots(msg.keys())\n self.plots_created = True\n\n for k, v in msg.iteritems():\n current = self.plotdata.get_data(k)\n self.plotdata.set_data(k, np.r_[current, v])", "def __setitem__(self, key, val):\n # TODO: see if this __setitem__ can be reduced\n try:\n assert key < 4\n except AssertionError:\n return\n else:\n if key == 0:\n self.R = val\n self.HEX = RGBtoHEX(*self.refs_RGB())\n elif key == 1:\n self.G = val\n self.HEX = RGBtoHEX(*self.refs_RGB())\n elif key == 2:\n self.B = val\n self.HEX = RGBtoHEX(*self.refs_RGB())\n else:\n self.HEX = val\n self.R, self.G, self.B = HEXtoRGB(*self.refs_HEX())\n return val", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def update_plot():\n pass", "def getKey(self, index) -> AnimCurveKey:\n ...", "def copy(self, key, new_key=None):\n\n if new_key is None:\n new_key = self.processing_buffer\n\n self[new_key] = self[key].copy()", "def __setitem__(self, key: int, value):\n super().__setitem__(key, value)\n if self.VTKObject is not None:\n self.VTKObject.Modified()", "def __missing__(self,key):\n # First time the dictionary is in actual use, so we do some setup\n if not self.sortedkeys:\n # If there are no keys, then we have an empty dictionary\n # which will raise an exception when searching for keys.\n # Since we are assuming no change once created, we will\n # set a lambda function to replace this one because we're\n # not using the method anyway.\n if not len(self.keys()):\n self.__missing__ = lambda x: (0.0,)\n return (0.0,)\n else:\n # ASSUMPTION: This dictionary is not changed once created.\n # TODO: We need to make this immutable by modifying __getattr__\n self.sortedkeys = sorted(self.keys())\n\n # Find the previous and next available keys\n ind = bisect_right(self.sortedkeys, key)-1\n x0 = int(self.sortedkeys[ind])\n x1 = int(self.sortedkeys[ind+1])\n\n # Find the previous and next available values\n y0 = self[x0]\n y1 = self[x1]\n val = None\n if isinstance(y0, tuple):\n if not len(y0): return () # We have nothing in the tuple, so return a blank tuple (not 'val', which is None)\n for i in xrange(len(y0)):\n # Try to add value to the tuple of values\n try: val += y0[i] + ((y1[i]-y0[i])*(key-x0))/(x1-x0),\n # Unless we have a value of None, then we make a tuple\n except TypeError: val = y0[i] + ((y1[i]-y0[i])*(key-x0))/(x1-x0),\n else: val = y0 + ((y1-y0)*(key-x0))/(x1-x0)\n # This can store the value in the dictionary to prevent us from\n # interpolating later, however, it is too much data for big models\n # and will fill the memory like a leak. It's commented here, but\n # kept in case people want to play.\n #self[key] = val\n return val", "def key(self, key):\n self._key = key", "def key(self, key):\n self._key = key", "def update(self):\n self._state = self._state", "def update(self, update_data):\n logger.info(update_data)\n self.x, self.y = update_data['coords']\n self.color = update_data['color']\n self.is_visible = update_data['is_visible']\n\n # todo: direction, state", "def update_position_and_spike_frame(self, step=0):\n if self._spk_pos_ax is not None:\n # print(self._pos_x)\n # print(self._pos_y)\n # TODO: Add colors based on which cluster the spikes are coming from\n self._spk_pos_frame[0].set_data((self._spk_pos_x, self._spk_pos_y))\n # self._spk_pos_frame[0].set_color(self._spk_clusters)\n self._spk_pos_frame[1].set_data((self._pos_x, self._pos_y))\n if len(self._speed) > 0:\n self._spk_pos_frame[2].set_text('speed = %.2fcm/s'%self._speed[-1])\n return self._spk_pos_frame", "def key(self, key):\n return self.__key.set(key)", "def UpdateState( self, **kwargs ):\n if bool( self ):\n if 'scale_mode' in kwargs:\n kwargs[ 'replot' ] = True\n\n kwargs = self._UpdateStateValues( **kwargs )\n redraw = kwargs.get( 'redraw', False )\n replot = kwargs.get( 'replot', False )\n\n if self.logger.isEnabledFor( logging.DEBUG ):\n self.logger.debug(\n '%s: redraw=%s, replot=%s',\n\t self.GetTitle(), str( redraw ), str( replot )\n\t )\n\n if replot:\n self._UpdateDataSetValues()\n self._UpdatePlot()\n\n elif redraw:\n self._DoUpdateRedraw()\n self.canvas.draw()", "def _set_x_and_y_keys(self, data_key, x, y):\r\n if self.stack_pos == 'stack_root':\r\n self[data_key].__set_x_key(x)\r\n self[data_key].__set_y_key(y)\r\n else:\r\n raise KeyError(\"set_x_keys can only be called from a stack at root level. Current level is '{0}'\".format(self.stack_pos))", "def __setstate__(self, state):\n if \"_version\" not in state.keys(): # Compatibility mode\n self._array = state['_array']\n self._version = 0.1 # promote to the latest version\n x = state['_x']\n y = state['_y']\n z = state['_z']\n dx = state['_dx']\n dy = state['_dy']\n dz = state['_dz']\n if x is not None and y is not None and z is not None:\n self[x:x+dx, y:y+dy, z:z+dz]\n elif x is not None and y is not None:\n self[x:x+dx, y:y+dy]\n elif x is not None:\n self[x:x+dx]\n else:\n self._view = self._array\n self._view_item = None\n else:\n self.__dict__ = state\n if self._view_item is not None:\n self.__getitem__(self._view_item)\n else:\n self._view = self._array", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def setPosition(self, key, univ):\n self.positions[key] = univ", "def _set_tuple_only_structure(self, key):\n self.set_neighs(key[0])\n if len(key) == 2:\n self.set_sp_rel_pos(key[1])\n elif len(key) > 2:\n raise TypeError(\"Not correct input.\")", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def change_key(self, i, key):\n self.__keys[i] = key\n self.__swim(self.__qp[i])\n self.__sink(self.__qp[i])", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def __setitem__(self, key, val):\n x, y = key\n self.matrix[y][x] = val", "def setValue(self, *args):\n return _osgAnimation.FloatKeyframe_setValue(self, *args)", "def key(self, key):\n\n self._key = key", "def key(self, key):\n\n self._key = key", "def update(self, *args):\n return _osgAnimation.Channel_update(self, *args)", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def set_key(self, key):\n self.key = key", "def update(self, *args):\n return _osgAnimation.Animation_update(self, *args)", "def __delitem__(self, key) -> None:\n\n if key in self.layout.bladeTupMap.keys():\n self.value[self.layout.bladeTupMap[key]] = 0\n elif isinstance(key, tuple):\n sign, blade = compute_reordering_sign_and_canonical_form(key, np.array(self.layout.sig),\n self.layout.firstIdx)\n self.value[self.layout.bladeTupMap[blade]] = 0\n else:\n self.value[key] = 0", "def update(self, *args, **kwargs):\n assign = ('id', 'width', 'height', 'x', 'y')\n if args:\n for key, idx in zip(assign, range(len(args))):\n exec('self.{} = {}'.format(key, args[idx]))\n else:\n for key, val in kwargs.items():\n if key in ('id', 'width', 'height', 'x', 'y'):\n exec('self.{} = {}'.format(key, val))", "def __setitem__(self, key, value):\n # if isinstance(value, MutableMapping):\n # self._axl_data[key] = AXLDataModel(value)\n # else:\n # self._axl_data[key] = value\n if isinstance(value, MutableMapping):\n raise TypeError(mutable_mapping_msg)\n self._axl_data[key] = value", "def checkKey(self):\n if self.isClosed():\n raise GraphicsError(\"checkKey in closed window\")\n self.update()\n key = self.lastKey\n self.lastKey = \"\"\n return key", "def add_animation(self, animation, key):\n\t\tif animation.from_value == animation.to_value:\n\t\t\treturn\n\t\tanimation.attribute = key\n\t\tanimation.layer = self\n\t\tself.animations[key] = animation", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def update(self, new_gameStateData):\r\n pass", "def set_key(attr):\n cmds.setKeyframe(attr)", "def __getitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___getitem__(self, *args)", "def __getitem__(self, key):\n return self.points.__getitem__(key)", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def __getitem__(self, key):\n if isinstance(key, int):\n i = key if key >= 0 else len(self) + key\n return self.get_frame(i)\n else:\n return SliceableIterable(self, range(len(self)), len(self))[key]", "def __init__(self, env, key, keep_raw=True):\n gym.ObservationWrapper.__init__(self, env)\n self.prev = None\n self.keep_raw = keep_raw\n self.key = key\n\n space = self.observation_space.spaces[self.key]\n shape = list(space.shape)\n\n # adapt the observation space\n if self.keep_raw:\n shape[0] = shape[0]*2\n\n self.observation_space.spaces[self.key] = gym.spaces.Box(0, 255, shape, dtype=np.float32)", "def update(self):\n try:\n self._state = self.pushbullet.data[self._element]\n self._state_attributes = self.pushbullet.data\n except (KeyError, TypeError):\n pass", "def __setitem__(self, key, value):\n dict.__setitem__(self, key, value)\n\n self.changed()", "def update_indicator(self, i_key, color):\n if self._myIndicatorsManager.get_line_type(i_key) < 2:\n # horizontal or vertical\n canvas_line_index = self._myIndicatorsManager.get_canvas_line_index(i_key)\n self._myCanvas.updateLine(ikey=canvas_line_index, vecx=None, vecy=None, linecolor=color)\n else:\n # 2-way\n canvas_line_index_h, canvas_line_index_v = self._myIndicatorsManager.get_canvas_line_index(i_key)\n # h_vec_set, v_vec_set = self._myIndicatorsManager.get_2way_data(i_key)\n\n self._myCanvas.updateLine(ikey=canvas_line_index_h, vecx=None, vecy=None, linecolor=color)\n self._myCanvas.updateLine(ikey=canvas_line_index_v, vecx=None, vecy=None, linecolor=color)\n\n return", "def state(self, state: _State) -> None:\n prev_data = self._state.data\n self._state = state.with_data(prev_data)", "def setValue(self, *args):\n return _osgAnimation.Vec3Keyframe_setValue(self, *args)", "def __setitem__(self, key, value):\n\t\tif not self._is_valid_key(key):\n\t\t\traise KeyError('The key is not valid')\n\t\t\t\n\t\tif not value in [-1, 0, 1]:\n\t\t\traise ValueError('The value must be -1, 0 or 1')\n\t\t\t\n\t\tx, y = self._index_from_key(key)\n\t\tself._board[x][y] = value", "def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)", "def __setitem__(self, k, value):\n self._coords[k] = value", "def __setitem__(self, *args):\n return _osgAnimation.VertexList___setitem__(self, *args)", "def setValue(self, *args):\n return _osgAnimation.Vec2Keyframe_setValue(self, *args)", "def setValue(self, *args):\n return _osgAnimation.QuatKeyframe_setValue(self, *args)", "def _fancy_getitem(self, key):\n new_data = {}\n for i, k in enumerate(zip(*key)):\n if k in self.data:\n new_data[i] = self.data[k]\n return DOK(\n shape=(len(key[0])),\n data=new_data,\n dtype=self.dtype,\n fill_value=self.fill_value,\n )" ]
[ "0.6366824", "0.57968193", "0.57502097", "0.5714252", "0.56339014", "0.5459689", "0.54484665", "0.54372066", "0.5365949", "0.5340403", "0.5326292", "0.5320078", "0.5305696", "0.52860016", "0.52631617", "0.52473605", "0.5218694", "0.52184427", "0.5185107", "0.51785284", "0.5154304", "0.51286566", "0.5123627", "0.5122041", "0.51158696", "0.5092829", "0.50886256", "0.50725776", "0.50688666", "0.5040174", "0.5008681", "0.5006402", "0.49982813", "0.49929616", "0.49929085", "0.4986803", "0.498475", "0.49784607", "0.49775386", "0.4958903", "0.49512714", "0.49357986", "0.4931702", "0.49238917", "0.49235904", "0.49216437", "0.4918375", "0.489932", "0.4898126", "0.4886995", "0.48842475", "0.48722792", "0.48708072", "0.48708072", "0.48658538", "0.4864676", "0.4860528", "0.48520425", "0.48406374", "0.48270148", "0.48240927", "0.48003206", "0.47997418", "0.47944853", "0.47939956", "0.47895026", "0.478907", "0.4772236", "0.47704235", "0.47556505", "0.47556505", "0.47543627", "0.47494516", "0.47446436", "0.4734894", "0.47295433", "0.47277912", "0.47118825", "0.47027364", "0.46993104", "0.46991333", "0.469672", "0.46956068", "0.4695378", "0.46938124", "0.46933478", "0.46895874", "0.46745843", "0.46691808", "0.46688843", "0.46684626", "0.46639416", "0.46627906", "0.46612823", "0.4660752", "0.46594495", "0.46583596", "0.4656313", "0.46553347", "0.46516088" ]
0.64864296
0
The plotting state that gets updated via the update method and used by the renderer to generate output.
def state(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_plot():\n pass", "def store(self, state):\n if self.interactive:\n self._fig.clear()\n fig = self._fig\n else:\n fig = plt.figure()\n\n self._plot_function(fig, copy_state(state))\n\n fig.canvas.draw()\n if not self.interactive:\n plt.show()", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def UpdateState( self, **kwargs ):\n if bool( self ):\n if 'scale_mode' in kwargs:\n kwargs[ 'replot' ] = True\n\n kwargs = self._UpdateStateValues( **kwargs )\n redraw = kwargs.get( 'redraw', False )\n replot = kwargs.get( 'replot', False )\n\n if self.logger.isEnabledFor( logging.DEBUG ):\n self.logger.debug(\n '%s: redraw=%s, replot=%s',\n\t self.GetTitle(), str( redraw ), str( replot )\n\t )\n\n if replot:\n self._UpdateDataSetValues()\n self._UpdatePlot()\n\n elif redraw:\n self._DoUpdateRedraw()\n self.canvas.draw()", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def get_plot_state(self_or_cls, obj, renderer=None, **kwargs):\n if not isinstance(obj, Plot):\n obj = self_or_cls.get_plot(obj=obj, renderer=renderer, **kwargs)\n return obj.state", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def myUpdate(self, stateDict=None):\n\n # store stateDict so we can replot on changing dark theme\n if stateDict is None and self.stateDict is not None:\n # re-use our stateDict\n stateDict = self.stateDict\n else:\n if stateDict is None:\n return\n self.stateDict = stateDict.copy()\n\n if stateDict is None:\n return\n \n dataType = stateDict['dataType']\n hue = stateDict['hue']\n groupByColumnName = stateDict['groupByColumnName']\n\n plotType = stateDict['plotType']\n #self.plotType = plotType\n\n xStatHuman = stateDict['xStatHuman']\n yStatHuman = stateDict['yStatHuman']\n\n xStat = stateDict['xStat']\n yStat = stateDict['yStat']\n\n '''\n print('=== myMplCanvas.myUpdate()')\n print(' ', plotType)\n print(' ', 'xStatHuman:', xStatHuman, 'yStatHuman:', yStatHuman)\n print(' ', 'xStat:', xStat, 'yStat:', yStat)\n '''\n\n xIsCategorical = stateDict['xIsCategorical']\n yIsCategorical = stateDict['yIsCategorical']\n\n masterDf = stateDict['masterDf']\n meanDf = stateDict['meanDf']\n\n self.plotDf = meanDf\n\n self.canvas.axes.clear()\n\n picker = 5\n if plotType in ['Scatter Plot', 'Scatter + Raw + Mean']:\n # scatter plot user selection\n self.scatterPlotSelection, = self.canvas.axes.plot([], [], 'oy',\n markersize=12, fillstyle='none')\n\n # main scatter\n try:\n self.whatWeArePlotting = sns.scatterplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes, picker=picker,\n zorder=0)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print(' EXCEPTION: in myUpdate() \"Scatter Plot\", exception is:')\n print(' ', e)\n print(' ', 'plotType:', plotType)\n print(' ', 'xStat:', xStat)\n print(' ', 'yStat:', yStat)\n print(' ', 'hue:', hue)\n\n # sem in both x and y, pulling from masterDf\n if dataType=='File Mean' or plotType=='Scatter + Raw + Mean':\n # we need to do this for each hue???\n # if x or y is in categorical (e.g. a string) then do not do this ...\n if xIsCategorical or yIsCategorical:\n pass\n else:\n print(' grabbing mean +- sem for self.groupByColumnName:', groupByColumnName)\n color = 'k'\n xd = masterDf.groupby(groupByColumnName).mean()[xStat]\n xerrd = masterDf.groupby(groupByColumnName).sem()[xStat]\n yd = masterDf.groupby(groupByColumnName).mean()[yStat]\n yerrd = masterDf.groupby(groupByColumnName).sem()[yStat]\n \n # logger.info('2023 declan')\n # print(' groupByColumnName:', groupByColumnName)\n # print(' xd:', xd)\n # print(' yd:', yd)\n # print(' xerrd:', xerrd)\n # print(' yerrd:', yerrd)\n \n self.canvas.axes.errorbar(xd, yd, xerr=xerrd, yerr=yerrd,\n fmt='none', capsize=0, zorder=10, color=color, alpha=0.5);\n\n elif plotType == 'Histogram':\n yStatHuman = 'Count'\n doKde = False #stateDict['doKDE']\n try:\n g = sns.histplot(x=xStat, hue=hue, kde=doKde,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTIONin Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Violin Plot':\n if not xIsCategorical:\n warningStr = 'Violin plot requires a categorical x statistic'\n else:\n g = sns.violinplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Box Plot':\n if not xIsCategorical:\n warningStr = 'Box plot requires a categorical x statistic'\n else:\n g = sns.boxplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Raw + Mean Plot':\n if not xIsCategorical:\n warningStr = 'Raw + Mean plot requires a categorical x statistic'\n else:\n try:\n # does not work here for categorical x\n #self.scatterPlotSelection, = self.canvas.axes[0].plot([], [], 'oy',\n # markersize=12, fillstyle='none')\n\n '''\n colorList = [('red'), ('green'), 'b', 'c', 'm', 'y']\n hueList = meanDf[hue].unique()\n palette = {}\n for idx, hue in enumerate(hueList):\n palette[hue] = colorList[idx]\n print(palette)\n '''\n\n palette = sns.color_palette(\"Paired\")\n #palette = ['r', 'g', 'b']\n\n # stripplot\n #g = sns.swarmplot(x=xStat, y=yStat,\n g = sns.stripplot(x=xStat, y=yStat,\n hue=hue,\n palette=palette,\n data=meanDf,\n ax=self.canvas.axes,\n #color = color,\n dodge=True,\n alpha=0.6,\n picker=picker,\n zorder=1)\n\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n logger.info(f' REMAKING LEGEND sns.pointplot() plotNumber:{self.plotNumber}')\n handles, labels = self.canvas.axes.get_legend_handles_labels()\n l = self.canvas.axes.legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n #self.myLegend = self.canvas.axes.Legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n '''\n if self.darkTheme:\n color = 'w'\n else:\n color = 'k'\n color = [color] * len(hueList)\n print('color:', color)\n '''\n\n self.whatWeArePlotting = sns.pointplot(x=xStat, y=yStat,\n hue=hue,\n #palette=palette,\n data=meanDf,\n estimator=np.nanmean,\n errorbar=('ci', 68),\n capsize=0.1,\n ax=self.canvas.axes,\n color='r',\n #legend='full',\n #zorder=10)\n )\n except (ValueError) as e:\n print('EXCEPTION in \"Raw + Mean Plot\":', e)\n traceback.print_exc()\n\n elif plotType == 'Regression Plot':\n # regplot does not have hue\n if xIsCategorical or yIsCategorical:\n warningStr = 'Regression plot requires continuous x and y statistics'\n else:\n # todo: loop and make a regplot\n # for each unique() name in\n # hue (like Region, Sex, Condition)\n hueList = masterDf[hue].unique()\n for oneHue in hueList:\n if oneHue == 'None':\n continue\n tmpDf = meanDf [ meanDf[hue]==oneHue ]\n #print('regplot oneHue:', oneHue, 'len(tmpDf)', len(tmpDf))\n sns.regplot(x=xStat, y=yStat, data=tmpDf,\n ax=self.canvas.axes);\n else:\n print(' did not understand plot type:', plotType)\n\n\n #\n # update\n self.canvas.axes.figure.canvas.mpl_connect(\"pick_event\", self.onPick)\n\n self.mplCursorHover = None\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n\n #\n #self.mySetStatusBar(warningStr)\n\n self.canvas.axes.spines['right'].set_visible(False)\n self.canvas.axes.spines['top'].set_visible(False)\n\n if not stateDict['showLegend']:\n #print('self.canvas.axes.legend():', self.canvas.axes.legend())\n #print('self.canvas.axes.legend:', self.canvas.axes.legend)\n #if self.canvas.axes.legend() is not None:\n if 1:\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #print('myUpdate() self.plotSize:', self.plotSize)\n self.canvas.axes.set_xlabel(xStatHuman)\n self.canvas.axes.set_ylabel(yStatHuman)\n '''\n if self.plotSize == 'paper':\n fontsize = 10\n self.canvas.axes[0].set_xlabel(xStatHuman, fontsize=fontsize)\n self.canvas.axes[0].set_ylabel(yStatHuman, fontsize=fontsize)\n else:\n self.canvas.axes[0].set_xlabel(xStatHuman)\n self.canvas.axes[0].set_ylabel(yStatHuman)\n '''\n\n # subplots_adjust\n #self.fig.canvas.draw_idle()\n self.fig.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def update_figure(self):\n\n self.draw()", "def plot(self):\n pass", "def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def update_plot(self,ax):\n self.replot(ax)", "def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def myUpdateGlobal(self, stateDict):\n self.canvas.axes.legend().set_visible(stateDict['showLegend'])\n #self.myLegend.set_visible(stateDict['showLegend'])\n\n if stateDict['showMplToolbar']:\n self.mplToolbar.show()\n else:\n self.mplToolbar.hide()\n\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n elif not stateDict['doHover']:\n # cancel mplCursorHover hover selection\n if self.mplCursorHover is not None:\n selections = self.mplCursorHover.selections\n if len(selections) ==1 :\n self.mplCursorHover.remove_selection(selections[0])\n #self.mplCursorHover = None\n\n #\n #self.draw() # to update hover\n self.fig.canvas.draw()", "def update_visualization(self) -> None:\n pass", "def UpdatePlot(self):\n\n if self.first_time:\n for ID, plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.oplot(\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw = True,\n side ='left',\n label = tmp['label'],\n color = tmp['color'],\n xlabel = None, ylabel = None, y2label = None,\n title = None,\n dy = None,\n ylog_scale = False,\n xmin = None, xmax = None, ymin = None, ymax = None,\n refresh = True,\n show_legend= True, legend_loc='ur', legend_on= True,\n delay_draw = False,\n marker = 'None', markersize = None,\n autoscale=True,\n linewidth = 3, # default 2\n drawstyle = 'line', style = 'solid',\n grid = True,\n bgcolor= None, framecolor= None, gridcolor= None,\n labelfontsize= 10, # default 9\n legendfontsize= 12, # default 7\n fullbox=None, # 'box', 'open', 'bottom'\n axes_style=None,\n zorder=None,\n )\n self.first_time = False\n\n else:\n i = 0\n for ID,plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.update_line(\n i,\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw=True,\n )\n i += 1\n\n self.plot_panel.set_xylims(\\\n [\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) )\\\n ]\\\n )\n\n self.panel_sizer.Fit(self)", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def on_update(self):\n if self.main.data is not None:\n font = {\n 'family': str(self.le_font.text()),\n 'size': int(str(self.le_font_size.text()))\n }\n\n mpl.rc('font', **font)\n\n # Clear the plot\n self.ax.clear()\n\n # Get the data and colormap\n x, y, z = self.main.data.get_pcolor()\n cmap = self.main.canvas.colormap.get_mpl_colormap()\n\n tri_checkboxes = [self.cb_tripcolor.checkState(),\n self.cb_triangulation.checkState()]\n\n # If we are going to need to plot triangulation data, prepare\n # the data so it can be plotted\n if QtCore.Qt.Checked in tri_checkboxes:\n if self.main.data.tri is None:\n self.main.data.generate_triangulation()\n\n xc, yc = self.main.data.get_triangulation_coordinates()\n\n tri = mpl.tri.Triangulation(xc, yc,\n self.main.data.tri.simplices)\n\n # Plot the data using either pcolormesh or tripcolor\n if self.cb_tripcolor.checkState() != QtCore.Qt.Checked:\n quadmesh = self.ax.pcolormesh(x, y, z,\n cmap=cmap,\n rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n else:\n quadmesh = self.ax.tripcolor(tri,\n self.main.data.z.ravel(),\n cmap=cmap, rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n\n # Plot the triangulation\n if self.cb_triangulation.checkState() == QtCore.Qt.Checked:\n self.ax.triplot(tri, 'o-', color='black',\n linewidth=0.5, markersize=3)\n\n self.ax.axis('tight')\n\n title = self.format_label(str(self.le_title.text()))\n title = '\\n'.join(textwrap.wrap(title, 40,\n replace_whitespace=False))\n\n # Set all the plot labels\n self.ax.set_title(title)\n self.ax.set_xlabel(self.format_label(self.le_x_label.text()))\n self.ax.set_ylabel(self.format_label(self.le_y_label.text()))\n\n # Set the axis tick formatters\n self.ax.xaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_x_format.text()), float(self.le_x_div.text())))\n self.ax.yaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_y_format.text()), float(self.le_y_div.text())))\n\n if self.cb is not None:\n self.cb.remove()\n\n # Colorbar layout\n orientation = str(self.cb_cb_orient.currentText())\n self.cb = self.fig.colorbar(quadmesh, orientation=orientation)\n\n self.cb.formatter = FixedOrderFormatter(\n str(self.le_z_format.text()), float(self.le_z_div.text()))\n\n self.cb.update_ticks()\n\n self.cb.set_label(self.format_label(self.le_z_label.text()))\n self.cb.draw_all()\n\n # Plot the current linecut if neccesary\n if self.cb_linecut.checkState() == QtCore.Qt.Checked:\n for linetrace in self.main.linecut.linetraces:\n if linetrace.type == 'horizontal':\n plt.axhline(linetrace.position, color='red')\n elif linetrace.type == 'vertical':\n plt.axvline(linetrace.position, color='red')\n\n self.fig.tight_layout()\n\n self.canvas.draw()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def plot_changed(self):\n self.plotType = self.ui.selectPlotType.currentText()\n self.value_changed()", "def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)", "def display_state(self):\n # self.__display(self.state)\n self.__draw(self.state)", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def redraw(self, state: EngineeringState) -> None:\n pass", "def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1", "def config_plot_update_func(self):\n return self._config_plot_update_func", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def _UpdateStateValues( self, **kwargs ):\n #replot = kwargs.get( 'replot', False )\n #redraw = kwargs.get( 'redraw', kwargs.get( 'force_redraw', False ) )\n replot = kwargs.get( 'replot', kwargs.get( 'force_redraw', False ) )\n redraw = kwargs.get( 'redraw', False )\n\n if 'data_model_mgr' in kwargs:\n replot = True\n\n if 'dataset_added' in kwargs:\n wx.CallAfter( self.container.GetDataSetMenu().UpdateAllMenus )\n\n if 'time_value' in kwargs and kwargs[ 'time_value' ] != self.timeValue:\n if self._IsTimeReplot():\n replot = True\n else:\n redraw = True\n self.timeValue = kwargs[ 'time_value' ]\n\n if redraw:\n kwargs[ 'redraw' ] = True\n if replot:\n kwargs[ 'replot' ] = True\n\n return kwargs", "def on_plot(self, event=None):\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=False)\n self.enable_remove_plot()", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)", "def plot_refresh():\n figure.canvas.draw()", "def plot_state(\n self, highlightRobot=None, plotRobotIDs=True,\n returnax=True, figname=\"kaijuGrid.pdf\"\n ):\n from kaiju.utils import plotOne\n if returnax:\n ax = plotOne(\n step=0, robotGrid=self, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot, returnax=True\n )\n return ax\n else:\n plotOne(\n step=0, robotGrid=self, figname=figname, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot\n )", "def force_plot_update(settings_dict):\n settings_dict[\"new_data\"] = True # Initiates the update of the plots\n settings_dict[\"last_plot_update\"] = settings_dict[\"update_counter\"]", "def get_state(self, _settings = None):\n settings = _settings or {}\n settings['window_type'] = 'ImageWindow'\n settings['actionPlotSettings'] = self.actionPlotSettings.isChecked()\n # Disabled QLineEdits are confusing to QSettings. Store a dummy _\n settings['x_label'] = \"_\" + self.settingsWidget.ui.x_label.text()\n settings['y_label'] = \"_\" + self.settingsWidget.ui.y_label.text()\n settings['x_label_auto'] = self.settingsWidget.ui.x_label_auto.isChecked()\n settings['y_label_auto'] = self.settingsWidget.ui.y_label_auto.isChecked()\n settings['colormap_min'] = str(self.settingsWidget.ui.colormap_min.text())\n settings['colormap_max'] = str(self.settingsWidget.ui.colormap_max.text())\n settings['transpose'] = self.settingsWidget.ui.transpose.currentText()\n settings['flipx'] = self.settingsWidget.ui.flipx.currentText()\n settings['flipy'] = self.settingsWidget.ui.flipy.currentText()\n settings['viewbox'] = self.plot.getView().getViewBox().getState()\n settings['x_view'] = self.actionX_axis.isChecked()\n settings['y_view'] = self.actionY_axis.isChecked()\n settings['histogram_view'] = self.actionHistogram.isChecked()\n settings['crosshair'] = self.actionCrosshair.isChecked()\n settings['circular_roi'] = self.actionCircularROI.isChecked()\n settings['gradient_mode'] = self.plot.getHistogramWidget().item.gradient.saveState()\n \n return DataWindow.get_state(self, settings)", "def plot_state(self, **options):\n f = plt.gcf()\n if len(f.axes) < 2:\n f, _ = plt.subplots(1, 2, figsize=(\n 13, 6), sharex='row', sharey='row')\n\n gp = self.target_model\n\n # Draw the GP surface\n visin.draw_contour(\n gp.predict_mean,\n gp.bounds,\n self.target_model.parameter_names,\n title='GP target surface',\n points=gp.X,\n axes=f.axes[0],\n **options)\n\n # Draw the latest acquisitions\n if options.get('interactive'):\n point = gp.X[-1, :]\n if len(gp.X) > 1:\n f.axes[1].scatter(*point, color='red')\n\n displays = [gp.instance]\n\n if options.get('interactive'):\n from IPython import display\n displays.insert(\n 0,\n display.HTML('<span><b>Iteration {}:</b> Acquired {} at {}</span>'.format(\n len(gp.Y), gp.Y[-1][0], point)))\n\n # Update\n visin._update_interactive(displays, options)\n\n acq_index = self._get_acquisition_index(self.state['n_batches'])\n\n def acq(x):\n return self.acquisition_method.evaluate(x, acq_index)\n\n # Draw the acquisition surface\n visin.draw_contour(\n acq,\n gp.bounds,\n self.target_model.parameter_names,\n title='Acquisition surface',\n points=None,\n axes=f.axes[1],\n **options)\n\n if options.get('close'):\n plt.close()", "def _update_current_graph(self, **kwargs):\n\n self.current_graph.redraw()", "def updatePlotLayout(self):\n self.plotLayoutType = self.plotLayoutDropdown.currentText() # ['paper', 'poster', 'talk']\n\n self.updatePlotLayoutGrid()\n\n self.update2()", "def plot_data(self):", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def show( self ):\n if self.changed:\n self._update_ax() \n self.changed = False", "def update_simulate_plot(self):\n a = self.plot_zoom.getViewBox().viewRange()\n self.plot_simulate.setXRange(a[0][0], a[0][1])\n self.plot_simulate.setYRange(a[1][0], a[1][1])", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def plot_internal_controller_states(self, plot='z', **kwargs):\n \n # Check if trajectory is already computed\n if self.traj == None:\n self.compute_trajectory()\n \n plotter = graphical.TrajectoryPlotter( self )\n plotter.plot( self.traj, plot, **kwargs)", "def update(self):\n self._state = self._state", "def update(self, update_data):\n logger.info(update_data)\n self.x, self.y = update_data['coords']\n self.color = update_data['color']\n self.is_visible = update_data['is_visible']\n\n # todo: direction, state", "def showPlot(self):\r\n self.plot = not self.plot\r\n if self.plot:\r\n self.plot_button['text'] = \"No Plot\"\r\n else:\r\n self.plot_button['text'] = \"Plot\"", "def plot(self, *args, **kwargs):\n pass", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def render(self, mode='human', close=False):\n plt.figure(figsize=(20,12))\n plt.plot(self.history)\n plt.show()", "def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass", "def plot_config(self):\n self.dynamic.current_plot.setTitle(\n \"Dynamic IV waiting time analysis\", **self.titleStyle\n )\n self.dynamic.current_plot.setLabel(\n \"left\", \"current\", units=\"A\", **self.labelStyle\n )\n self.dynamic.current_plot.setLabel(\n \"bottom\", \"time\", units=\"s\", **self.labelStyle\n )\n self.dynamic.current_plot.showAxis(\"top\", show=True)\n self.dynamic.current_plot.showAxis(\"right\", show=True)\n self.dynamic.current_plot.plotItem.showGrid(x=True, y=True)\n self.dynamic.current_plot.getPlotItem().invertY(True)\n\n change_axis_ticks(self.dynamic.current_plot, self.ticksStyle)", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()", "def updateState(self):\n QtGui.QLabel.setText(self, self._state[0])", "def plot():\n pass", "def __plot_pres__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotPressureVar.get():\n return\n\n # Check for a closed window:\n if 'pressure' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['pressure'].number):\n del self.plots['pressure']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'pressure' in self.plots.keys()\n if refresh:\n if 'pressure' in self.plots.keys():\n fig = self.plots['pressure']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('pressure, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.P((self.it), self.ir)[0], 'k-')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('Pressure (GBar)', fontsize=12)\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['pressure'] = fig", "def initialize(self) -> None:\n # Only do matplotlib import when necessary\n super().initialize()\n from matplotlib import pyplot as plt\n self.fig, self.ax = plt.subplots()\n if self.state_map is not None:\n self._add_state_map(self.state_map)\n else:\n self.categories = self.simulation.state_list", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def plot(self):\n\t\tself.plotOfTF().plot()", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def state(self):\n pass", "def update_plot(self, msg):\n if not self.plots_created:\n self.create_plots(msg.keys())\n self.plots_created = True\n\n for k, v in msg.iteritems():\n current = self.plotdata.get_data(k)\n self.plotdata.set_data(k, np.r_[current, v])", "def plot(self, iteration=None, stateVectorConv=None): \n r = [\"{0}\".format(self.__class__.__name__)]\n if iteration is not None:\n r.append(\"i: {0}\".format(iteration))\n fmt = lambda a : \", \".join([\"{0:.4g}\".format(float(v)) for v in a])\n r.append(\"stateVector: {0}\".format(fmt(self.stateVector)))\n if stateVectorConv is not None:\n r.append(\"stateVectorConv: {0:.4g}\".format(stateVectorConv))\n \n s = \"; \".join(r)\n \n if iteration is not None and self.verbose > 0:\n print(s)\n \n if self.verbose > 4:\n nplot = 2 + len(self.stateVector)\n fig = pyplot.figure()\n fig.subplots_adjust(left=0.17, bottom=0.09, right=0.98, \n top=0.92, wspace=0.12, hspace=0.2)\n ax = fig.add_subplot(nplot,1,1)\n ax.set_title(s)\n ax.set_ylabel(\"$R [sr^{-1}]$\")\n ax.plot(self.independentVariable, self.observation, 'k', \n label='measurement')\n ax.plot(self.independentVariable, self.modelCalculation, 'r', \n label='model')\n ax.legend(loc='lower right')\n \n l = fig.add_subplot(nplot,1,2)\n l.plot(self.independentVariable, \n (self.observation-self.modelCalculation)/self.observationError, \n 'k', label=\"err\")\n l.set_ylabel(\"$\\Delta R/\\sigma$\")\n \n color = ['k-', 'r-', 'b-', 'g-', 'k--', 'r--', 'b--', 'g--', 'k-.', \n 'r-.', 'b-.', 'g-.', 'k:', 'r:', 'b:', 'g:']\n for i in range(len(self.stateVector)):\n name = self.parameterNames[i]\n k = fig.add_subplot(nplot,1,3+i)\n k.plot(self.independentVariable, self.Jacobian[:, i], 'k')\n k.set_ylabel(r\"$\\partial R/\\partial ({0})$\".format(name.replace(\"_\", \" \")))\n \n k.set_xlabel(\"$\\lambda [nm]$\")\n \n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n if self.verbose > 1:\n fig.show()\n else:\n fig.savefig(\"{0}_{1}_{2}.pdf\".format(r[0], \n r[1].split()[1][:-1], \n (\"{0:02d}\".format(iteration) \n if iteration is not None \n else \"final\")), transparent=True)", "def update_plot_preview(self):\n\n min_val = self.range_slider.first_position\n max_val = self.range_slider.second_position\n img_data = self.normalize_histogram(min_val, max_val)\n new_hist = self.calc_histogram(img_data)\n\n self.hist_canvas.axes.clear()\n self.hist_canvas.axes.bar(range(256), self.original_hist, color='b', alpha=0.7)\n self.hist_canvas.axes.bar(range(256), new_hist, color='g', alpha=0.7)\n self.hist_canvas.draw()\n\n self.current_img_data = img_data", "def update(self, pos = 0, msg = \"\"):\n if self.print_indicator and self.indicator and not self.video_model == None:\n C=pyqtgraph.hsvColor(1)\n pen=pyqtgraph.mkPen(color=C,width=1)\n data = np.zeros(10)\n\n pos = int(self.video_model.get_pos(datatype = self.model.get_datatype()))\n self.indicator.setData([pos,pos],[self.indicator_min,self.indicator_max]) #= self.plot_item.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def visual_attr_changed(self):\n if self.component:\n self.component.invalidate_draw()\n self.component.request_redraw()\n else:\n self.invalidate_draw()\n self.request_redraw()", "def save_plot(self, ):\n pass", "def update(self):\n self.line.set_ydata(self._get_y_data())\n self.figure.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n nc = len(self.curvelist)\n xpos = self.curvelist[0].xvinfo.vidx\n for i in range(nc):\n ypos = self.curvelist[i].yvinfo.vidx\n self.axes.plot(self.data[xpos],\n self.data[ypos], self.col[i])\n if self.idata is not None:\n self.axes.plot(self.idata[xpos],\n self.idata[ypos], self.col[i]+'.')\n self.canvas.draw()", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def update(self):\n self._state = 23", "def ready(self):\n plt.ion()\n self.figure = plt.figure()\n axes = self.figure.add_subplot(111)\n self.line, = axes.plot(self.xs, self._get_y_data(), self.colour)\n\n if self.y_range is not None:\n plt.ylim(*self.y_range)\n plt.xlim(self.x.lower, self.x.upper)\n\n plt.xlabel(self.x.tex_name if self.use_tex else self.x.name)\n plt.ylabel(self.y.tex_name if self.use_tex else self.y.name)\n\n self.figure.canvas.draw()", "def plot(self):\n raise Exception(\"pure virtual function\")", "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, width, height, text='{}'.format(state[key]), \n loc='center',facecolor= self.__color_map[key])\n\n ax.add_table(tb)\n plt.show()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def update():\n global dragon, x, y, position, angle_left, angle_right, size, new\n x, y, position, angle_left, angle_right, new = modify_pos(x, y, position,\n angle_left,\n angle_right,\n size, new)\n dragon.setData(x, y) # update plot", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def _set_data(self):\n\n # Remove old elements from plot\n if self.scatter is not None:\n self.scatter.remove()\n if self.oneoneline is not None:\n self.oneoneline.remove()\n\n # Get new data and plot\n self.slice = self.results.set_index('Location').loc[self.locnaam, [self.input_parameter, self.result_parameter]].values.T\n self.scatter = self.ax.scatter(*self.slice, s=5, alpha=0.7, color='C0')\n\n # Determine axes limits\n lowerlim, upperlim = self.slice.min(), self.slice.max()\n span = (upperlim - lowerlim)\n lowerlim = max(0, lowerlim - 0.05 * span)\n upperlim = upperlim + 0.05 * span\n\n # Plot a diagonal 1:1 line\n self.oneoneline, = self.ax.plot([lowerlim, upperlim], [lowerlim, upperlim], color='grey', dashes=(4, 3), lw=1.0)\n\n # Set the axes limits\n self.ax.set_xlim(lowerlim, upperlim)\n self.ax.set_ylim(lowerlim, upperlim)\n self.canvas.draw()", "def plot_ins_state(time, state):\n pylab.ion()\n\n plot_trajectory(state[:,0], state[:,1], state[:,2])\n\n\n # Plot position vs. time\n\n\n pylab.figure()\n pylab.subplot(311)\n pylab.plot(time, state[:,0],'r')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\phi$, rad')\n pylab.title('Latitude')\n pylab.grid(True)\n\n pylab.subplot(312)\n pylab.plot(time, state[:,1],'g')\n pylab.xlabel('time (s)')\n pylab.ylabel('$\\\\lambda$, rad')\n pylab.title('Longitude')\n pylab.grid(True)\n\n pylab.subplot(313)\n pylab.plot(time, state[:,2],'b')\n pylab.xlabel('time, s')\n pylab.ylabel('$h$, m')\n pylab.title('Altitude')\n pylab.grid(True)\n pylab.show()\n\n\n # Plot velocity vs. time\n pylab.figure()\n pylab.plot(time, state[:,3:6])\n pylab.xlabel('time, s')\n pylab.ylabel('Vn, Ve, Vd')\n pylab.title('Velocity vs. time')\n\n pylab.grid(True)\n pylab.show()\n\n # Plot acceleration vs. time\n pylab.figure()\n pylab.plot(time, state[:,6:9])\n pylab.xlabel('time, s')\n pylab.ylabel('an, ae, ad')\n pylab.title('Acceleration vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()\n\n # Plot quaternions vs. time\n pylab.figure()\n pylab.plot(time, state[:,9:])\n pylab.xlabel('time, s')\n pylab.ylabel('q0, q1, q2, q3')\n pylab.title('Quaternion vs. time')\n\n pylab.grid(True)\n pylab.show()\n pylab.ioff()", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)" ]
[ "0.78376055", "0.7461222", "0.7259287", "0.7013627", "0.69416213", "0.6845786", "0.68316853", "0.6825293", "0.6825293", "0.6825293", "0.6825293", "0.6825293", "0.67640865", "0.67583346", "0.6713076", "0.67130053", "0.666491", "0.66637933", "0.66528654", "0.66066337", "0.65670663", "0.65662307", "0.65662307", "0.65597504", "0.6527471", "0.641995", "0.63907355", "0.6389599", "0.6332738", "0.63048893", "0.6278279", "0.62377423", "0.62352043", "0.6209318", "0.62012106", "0.61914337", "0.6184405", "0.6157345", "0.612387", "0.612387", "0.6094726", "0.60928243", "0.6077399", "0.60651416", "0.6045138", "0.60163456", "0.6006776", "0.5991926", "0.59802973", "0.59758717", "0.59522223", "0.594821", "0.5934539", "0.59279114", "0.59274024", "0.59257805", "0.5924021", "0.5923439", "0.59224814", "0.5913487", "0.5912818", "0.5900377", "0.59000313", "0.58939046", "0.5882673", "0.58739865", "0.586551", "0.5860252", "0.58497214", "0.58497155", "0.58459926", "0.5843813", "0.5840078", "0.5837866", "0.5835978", "0.5833956", "0.58324087", "0.5829566", "0.58223695", "0.58192986", "0.5818459", "0.58182234", "0.5812808", "0.5809669", "0.5804011", "0.5791155", "0.5783781", "0.5782854", "0.5776314", "0.57718605", "0.57611513", "0.5759997", "0.57532966", "0.57507426", "0.5745038", "0.5743241", "0.5741324", "0.5740483", "0.5731944", "0.5728357" ]
0.57563335
92
Returns the total number of available frames.
def __len__(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_frames(self) -> int:\n return self.num_frames", "def size(self):\n if self.frames is None:\n return 0\n return self.frames.size", "def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:\n try:\n frame_count = int(self.__dict__['nb_frames'])\n except ValueError:\n raise FFProbeError('None integer frame count')\n return frame_count", "def FrameCount(self):\r\n\t\treturn self._get_attribute('frameCount')", "def get_num_frames(self):\n return self._frames.shape[0]", "def capacity(self):\r\n return len(self.frames)", "def num_frames(self):\n return len(self.video)", "def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)", "def num_frames(self):\n return self._first_rgb.shape[1]", "def remaining_frames(self):\n return self.sound.nframes - self.current_frame", "def total_buffers_count(self) -> int:\n return int(self._pts / self._duration)", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def get_frame_size(self) -> Tuple[int, int]:\n return self.__sim.frame_size()", "def frame_length(self):\r\n return self.config.frame_length", "def _frameLen(self):\n return self.numCols * self.numRows", "def get_frame_size(self):\n return self._frames.shape[-1]", "def num_available(self) -> int:\n return len(self)", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def total_buffers_count(self) -> int:\n return self._counter", "def pending_nb_bytes(self):\n if self.df_length is not None:\n if self.df_length > 0:\n return self.df_length - len(self.buf)\n\n if self.cf_length is not None:\n if self.cf_length > 0:\n return self.cf_length - len(self.buf)\n \n return 4", "def frame_size(self):\n return self._frame_size", "def bframes_count(**kwargs) -> int:\n path_project = kwargs['project_name']\n project_name = path_project.split( '/' )[-1].strip( '.' )\n if project_name in frames_count:\n return frames_count[project_name]['count']\n else:\n bpy.ops.wm.open_mainfile( filepath=path_project )\n count_frames = bpy.context.scene.frame_end\n frames_count[project_name] = {'project_name': project_name, 'count': count_frames}\n return count_frames", "def get_num_cards(self):\n \n return self._hand.get_size()", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def realFrameNumber(self, callback=None):\n count = 0\n theoreticalFrameNumber = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if theoreticalFrameNumber > 30000:\n return theoreticalFrameNumber\n while(True):\n # Capture frame-by-frame\n ret, frame = self.video.read()\n if not ret:\n break\n if callback != None:\n callback(0.1 + (count / theoreticalFrameNumber) * 0.75, \"Calculating the number of frame\")\n count += 1\n return count", "def get_max_frames(self):\n return 8", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def get_num_frames(filename, ext='*.jpg'):\n if os.path.isdir(filename):\n return len(glob.glob(os.path.join(filename, ext)))\n elif os.path.isfile(filename):\n cmd = ('ffprobe -v 0 -count_frames -select_streams v:0 '\n '-show_entries stream=nb_read_frames -of '\n 'default=nokey=1:noprint_wrappers=1 ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n nframes_expr = pid.stdout\n nframes = int(nframes_expr.rstrip())\n return nframes\n else:\n raise ValueError('Unexpect filename: {}'.format(filename))", "def n_total_files(self):\n return len(self.fileinfo)", "def full_frame_length(self):\n return self.height * self.width * 3", "def sample_count(self):\n if self._sample_count:\n return self._sample_count\n else:\n return self._wave.getnframes()", "def get_frame_width(self) -> int:\n return self.__sim.frame_size()[0]", "def in_waiting(self):\n [ack, txcount, rxcount] = self._GetResponseFrame()\n return rxcount", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self):\n return(len(self.cards))", "def get_frame_size(*args):\n return _ida_frame.get_frame_size(*args)", "def count(self):\n return len(self.deck)", "def get_frame_retsize(*args):\n return _ida_frame.get_frame_retsize(*args)", "def size(self) -> int:\n return self.stat().size", "def num_cards(self):\n length=len(self.cards)\n return length", "def count(self):\n return len(self.read_ints())", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def get_received_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def size(self):\n\t\treturn self._count", "def total_nt(self) -> int:\n return self.sequence.length", "def CapturedPacketCount(self):\n if self.force_auto_sync:\n self.get('CapturedPacketCount')\n return self._CapturedPacketCount", "def total_bytes_to_process(self) -> float:\n return pulumi.get(self, \"total_bytes_to_process\")", "def count(self):\n # TODO not implemented yet\n return 0", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def get_total_length_of_probe_chains(self):\n return self.total_probe_length", "def size(self): #returns the size or number of items in the stack\n if self.is_empty():\n return 0\n else:\n return self.num_items", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def bytes_total(self):\n return int(self.status[\"pgmap\"][\"bytes_total\"])", "def get_total_cameras(self) -> int:\n return self.num_cameras", "def get_bytes_consumed(self):\n total = 0\n for event in self.iter_events(EVENT_NAME_BYTES_CONSUMED):\n total += event.data[\"bytes_consumed\"]\n\n return total", "def nbytes(self) -> int:\n return self._nbytes(False)", "def get_total_session_count(self) -> int:\n return self.streams_count", "def getLength(self):\n return self.count", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def frame_width(self) -> int:\n pass", "def numReady(antReady) :\n return len(antReady.ready)", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def num_calls_total(self):\n return self._num_calls_total", "def get_length(self):\n return len(self.cards)", "def rx_packet_count(self):\n return self._rx_packet_count", "def getWidth(self):\n return frameWidth", "def frameWidth(self):\n return self._frame_width", "def get_frame_height(self) -> int:\n return self.__sim.frame_size()[1]", "def __len__(self):\n\t\treturn len(self._idle) + len(self._running)", "def size(self):\n return len(self.cards)", "def size(self):\n return len(self._cards)", "def count(self):\n return len(self.wallpapers)", "def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def total_bytes_processed(self):\n total_bytes_processed = self._properties.get(\"totalBytesProcessed\")\n if total_bytes_processed is not None:\n return int(total_bytes_processed)", "def get_length(self):\n length = 0\n for card in self.decklist:\n length += card.amount\n return length", "def get_length(self):\n\t\treturn len(self._blocks)", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def duration(self):\n return self.sound.nframes", "def get_uds_3_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size", "def total_count(self) -> int:\n return self.__total_count", "def get_record_count(self):\n return os.path.getsize(self.path) / self._get_record_size()", "def calls_remaining(self) -> int:\n return self.usage_limit - self.current_usage", "def count(self):\n return self.size()", "def count(self):\n \n return len(self.img_lst)", "def count(self):\n return len(self._runs)", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes" ]
[ "0.8600426", "0.77835464", "0.7621566", "0.75558245", "0.7531837", "0.7526261", "0.7391324", "0.7189588", "0.71559983", "0.70775414", "0.70692587", "0.7036695", "0.7004031", "0.69709444", "0.6951622", "0.688492", "0.68131065", "0.6812733", "0.6789151", "0.66947013", "0.66488767", "0.66001004", "0.6592747", "0.65701926", "0.6564938", "0.6561499", "0.6530733", "0.6520889", "0.6509262", "0.647645", "0.6471139", "0.6458702", "0.6456331", "0.6451922", "0.645154", "0.6443686", "0.64222026", "0.642136", "0.642136", "0.642136", "0.642136", "0.6421259", "0.641965", "0.64144915", "0.641336", "0.64094", "0.6383412", "0.63620096", "0.6352646", "0.63466597", "0.634375", "0.634375", "0.6328908", "0.63212365", "0.6320359", "0.63163173", "0.6297705", "0.6293008", "0.62918705", "0.62852746", "0.6284918", "0.62846404", "0.6284639", "0.6278952", "0.62780124", "0.6272427", "0.6257347", "0.6254126", "0.62491995", "0.6247549", "0.6243126", "0.6238075", "0.6228046", "0.62187785", "0.6215849", "0.62156683", "0.6207663", "0.62017983", "0.62002146", "0.6198065", "0.6181442", "0.6172983", "0.6170471", "0.6163588", "0.6153185", "0.61494476", "0.61444867", "0.61412317", "0.61400443", "0.6138791", "0.6137152", "0.6129649", "0.6129056", "0.61277425", "0.6123952", "0.61172706", "0.6108896", "0.61078346", "0.6105151", "0.6103284", "0.6103284" ]
0.0
-1