repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
marrow/mailer
marrow/mailer/validator.py
https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/validator.py#L160-L184
def _apply_common_rules(self, part, maxlength): """This method contains the rules that must be applied to both the domain and the local part of the e-mail address. """ part = part.strip() if self.fix: part = part.strip('.') if not part: return part, 'It cannot be empty.' if len(part) > maxlength: return part, 'It cannot be longer than %i chars.' % maxlength if part[0] == '.': return part, 'It cannot start with a dot.' if part[-1] == '.': return part, 'It cannot end with a dot.' if '..' in part: return part, 'It cannot contain consecutive dots.' return part, ''
[ "def", "_apply_common_rules", "(", "self", ",", "part", ",", "maxlength", ")", ":", "part", "=", "part", ".", "strip", "(", ")", "if", "self", ".", "fix", ":", "part", "=", "part", ".", "strip", "(", "'.'", ")", "if", "not", "part", ":", "return", ...
This method contains the rules that must be applied to both the domain and the local part of the e-mail address.
[ "This", "method", "contains", "the", "rules", "that", "must", "be", "applied", "to", "both", "the", "domain", "and", "the", "local", "part", "of", "the", "e", "-", "mail", "address", "." ]
python
train
viniciuschiele/flask-apscheduler
flask_apscheduler/scheduler.py
https://github.com/viniciuschiele/flask-apscheduler/blob/cc52c39e1948c4e8de5da0d01db45f1779f61997/flask_apscheduler/scheduler.py#L264-L276
def run_job(self, id, jobstore=None): """ Run the given job without scheduling it. :param id: the identifier of the job. :param str jobstore: alias of the job store that contains the job :return: """ job = self._scheduler.get_job(id, jobstore) if not job: raise JobLookupError(id) job.func(*job.args, **job.kwargs)
[ "def", "run_job", "(", "self", ",", "id", ",", "jobstore", "=", "None", ")", ":", "job", "=", "self", ".", "_scheduler", ".", "get_job", "(", "id", ",", "jobstore", ")", "if", "not", "job", ":", "raise", "JobLookupError", "(", "id", ")", "job", "."...
Run the given job without scheduling it. :param id: the identifier of the job. :param str jobstore: alias of the job store that contains the job :return:
[ "Run", "the", "given", "job", "without", "scheduling", "it", ".", ":", "param", "id", ":", "the", "identifier", "of", "the", "job", ".", ":", "param", "str", "jobstore", ":", "alias", "of", "the", "job", "store", "that", "contains", "the", "job", ":", ...
python
train
FPGAwars/apio
apio/commands/uninstall.py
https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/uninstall.py#L31-L42
def cli(ctx, packages, all, list, platform): """Uninstall packages.""" if packages: _uninstall(packages, platform) elif all: # pragma: no cover packages = Resources(platform).packages _uninstall(packages, platform) elif list: Resources(platform).list_packages(installed=True, notinstalled=False) else: click.secho(ctx.get_help())
[ "def", "cli", "(", "ctx", ",", "packages", ",", "all", ",", "list", ",", "platform", ")", ":", "if", "packages", ":", "_uninstall", "(", "packages", ",", "platform", ")", "elif", "all", ":", "# pragma: no cover", "packages", "=", "Resources", "(", "platf...
Uninstall packages.
[ "Uninstall", "packages", "." ]
python
train
acorg/dark-matter
dark/proteins.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/proteins.py#L147-L164
def lookup(self, pathogenName, sampleName): """ Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames """ pathogenIndex = self._pathogens[pathogenName] sampleIndex = self._samples[sampleName] return self._readsFilenames[(pathogenIndex, sampleIndex)]
[ "def", "lookup", "(", "self", ",", "pathogenName", ",", "sampleName", ")", ":", "pathogenIndex", "=", "self", ".", "_pathogens", "[", "pathogenName", "]", "sampleIndex", "=", "self", ".", "_samples", "[", "sampleName", "]", "return", "self", ".", "_readsFile...
Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames
[ "Look", "up", "a", "pathogen", "name", "sample", "name", "combination", "and", "get", "its", "FASTA", "/", "FASTQ", "file", "name", "and", "unique", "read", "count", "." ]
python
train
sys-git/certifiable
certifiable/cli_impl/utils.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/cli_impl/utils.py#L73-L110
def load_value_from_schema(v): """ Load a value from a schema defined string. """ x = urllib.parse.urlparse(v) if x.scheme.lower() == 'decimal': v = Decimal(x.netloc) elif x.scheme.lower() in ['int', 'integer']: v = int(x.netloc) elif x.scheme.lower() == 'float': v = float(x.netloc) elif x.scheme.lower() in ['s', 'str', 'string']: v = str(x.netloc) elif x.scheme.lower() in ['u', 'unicode']: v = six.u(x.netloc) elif x.scheme.lower() == 'email': v = six.u(x.netloc) elif x.scheme.lower() == 'bool': v = bool(x.netloc) elif x.scheme.lower() in ['b', 'bytes']: v = six.b(x.netloc) elif x.scheme.lower() in ['ts.iso8601', 'timestamp.iso8601']: v = MayaDT.from_iso8601(x.netloc).datetime() elif x.scheme.lower() in ['ts.rfc2822', 'timestamp.rfc2822']: v = MayaDT.from_rfc2822(x.netloc).datetime() elif x.scheme.lower() in ['ts.rfc3339', 'timestamp.rfx3339']: v = MayaDT.from_rfc3339(x.netloc).datetime() elif x.scheme.lower() in ['ts', 'timestamp']: v = maya.parse(x.netloc).datetime() elif x.scheme.lower() == 'date': v = datetime.date.fromtimestamp(float(x.netloc)) elif x.scheme.lower() == 'time': v = time.gmtime(float(x.netloc)) else: v = None return v
[ "def", "load_value_from_schema", "(", "v", ")", ":", "x", "=", "urllib", ".", "parse", ".", "urlparse", "(", "v", ")", "if", "x", ".", "scheme", ".", "lower", "(", ")", "==", "'decimal'", ":", "v", "=", "Decimal", "(", "x", ".", "netloc", ")", "e...
Load a value from a schema defined string.
[ "Load", "a", "value", "from", "a", "schema", "defined", "string", "." ]
python
train
saltstack/salt
salt/utils/pkg/rpm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/pkg/rpm.py#L46-L60
def get_osarch(): ''' Get the os architecture using rpm --eval ''' if salt.utils.path.which('rpm'): ret = subprocess.Popen( 'rpm --eval "%{_host_cpu}"', shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] else: ret = ''.join([x for x in platform.uname()[-2:] if x][-1:]) return salt.utils.stringutils.to_str(ret).strip() or 'unknown'
[ "def", "get_osarch", "(", ")", ":", "if", "salt", ".", "utils", ".", "path", ".", "which", "(", "'rpm'", ")", ":", "ret", "=", "subprocess", ".", "Popen", "(", "'rpm --eval \"%{_host_cpu}\"'", ",", "shell", "=", "True", ",", "close_fds", "=", "True", "...
Get the os architecture using rpm --eval
[ "Get", "the", "os", "architecture", "using", "rpm", "--", "eval" ]
python
train
hellupline/flask-manager
flask_manager/utils.py
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/utils.py#L23-L27
def slugify(value): """Simple Slugify.""" s1 = first_cap_re.sub(r'\1_\2', value) s2 = all_cap_re.sub(r'\1_\2', s1) return s2.lower().replace(' _', '_').replace(' ', '_')
[ "def", "slugify", "(", "value", ")", ":", "s1", "=", "first_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "value", ")", "s2", "=", "all_cap_re", ".", "sub", "(", "r'\\1_\\2'", ",", "s1", ")", "return", "s2", ".", "lower", "(", ")", ".", "replace", "...
Simple Slugify.
[ "Simple", "Slugify", "." ]
python
train
Dallinger/Dallinger
dallinger/experiment_server/experiment_server.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/experiment_server/experiment_server.py#L1015-L1065
def node_neighbors(node_id): """Send a GET request to the node table. This calls the neighbours method of the node making the request and returns a list of descriptions of the nodes (even if there is only one). Required arguments: participant_id, node_id Optional arguments: type, connection After getting the neighbours it also calls exp.node_get_request() """ exp = Experiment(session) # get the parameters node_type = request_parameter( parameter="node_type", parameter_type="known_class", default=models.Node ) connection = request_parameter(parameter="connection", default="to") failed = request_parameter(parameter="failed", parameter_type="bool", optional=True) for x in [node_type, connection]: if type(x) == Response: return x # make sure the node exists node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/node/neighbors, node does not exist", error_text="/node/{0}/neighbors, node {0} does not exist".format(node_id), ) # get its neighbors if failed is not None: # This will always raise because "failed" is not a supported parameter. # We just want to pass the exception message back in the response: try: node.neighbors(type=node_type, direction=connection, failed=failed) except Exception as e: return error_response(error_type="node.neighbors", error_text=str(e)) else: nodes = node.neighbors(type=node_type, direction=connection) try: # ping the experiment exp.node_get_request(node=node, nodes=nodes) session.commit() except Exception: return error_response(error_type="exp.node_get_request") return success_response(nodes=[n.__json__() for n in nodes])
[ "def", "node_neighbors", "(", "node_id", ")", ":", "exp", "=", "Experiment", "(", "session", ")", "# get the parameters", "node_type", "=", "request_parameter", "(", "parameter", "=", "\"node_type\"", ",", "parameter_type", "=", "\"known_class\"", ",", "default", ...
Send a GET request to the node table. This calls the neighbours method of the node making the request and returns a list of descriptions of the nodes (even if there is only one). Required arguments: participant_id, node_id Optional arguments: type, connection After getting the neighbours it also calls exp.node_get_request()
[ "Send", "a", "GET", "request", "to", "the", "node", "table", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L99-L106
def store_dcnm_subnet_dict(self, subnet_dict, direc): """Store the subnet attributes and dict. """ if direc == 'in': self.in_dcnm_subnet_dict = subnet_dict self.in_subnet_dict = self._parse_subnet(subnet_dict) else: self.out_dcnm_subnet_dict = subnet_dict self.out_subnet_dict = self._parse_subnet(subnet_dict)
[ "def", "store_dcnm_subnet_dict", "(", "self", ",", "subnet_dict", ",", "direc", ")", ":", "if", "direc", "==", "'in'", ":", "self", ".", "in_dcnm_subnet_dict", "=", "subnet_dict", "self", ".", "in_subnet_dict", "=", "self", ".", "_parse_subnet", "(", "subnet_d...
Store the subnet attributes and dict.
[ "Store", "the", "subnet", "attributes", "and", "dict", "." ]
python
train
mezz64/pyHik
pyhik/hikvision.py
https://github.com/mezz64/pyHik/blob/1e7afca926e2b045257a43cbf8b1236a435493c2/pyhik/hikvision.py#L449-L531
def alert_stream(self, reset_event, kill_event): """Open event stream.""" _LOGGING.debug('Stream Thread Started: %s, %s', self.name, self.cam_id) start_event = False parse_string = "" fail_count = 0 url = '%s/ISAPI/Event/notification/alertStream' % self.root_url # pylint: disable=too-many-nested-blocks while True: try: stream = self.hik_request.get(url, stream=True, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)) if stream.status_code == requests.codes.not_found: # Try alternate URL for stream url = '%s/Event/notification/alertStream' % self.root_url stream = self.hik_request.get(url, stream=True) if stream.status_code != requests.codes.ok: raise ValueError('Connection unsucessful.') else: _LOGGING.debug('%s Connection Successful.', self.name) fail_count = 0 self.watchdog.start() for line in stream.iter_lines(): # _LOGGING.debug('Processing line from %s', self.name) # filter out keep-alive new lines if line: str_line = line.decode("utf-8", "ignore") # New events start with --boundry if str_line.find('<EventNotificationAlert') != -1: # Start of event message start_event = True parse_string += str_line elif str_line.find('</EventNotificationAlert>') != -1: # Message end found found parse_string += str_line start_event = False if parse_string: tree = ET.fromstring(parse_string) self.process_stream(tree) self.update_stale() parse_string = "" else: if start_event: parse_string += str_line if kill_event.is_set(): # We were asked to stop the thread so lets do so. break elif reset_event.is_set(): # We need to reset the connection. raise ValueError('Watchdog failed.') if kill_event.is_set(): # We were asked to stop the thread so lets do so. _LOGGING.debug('Stopping event stream thread for %s', self.name) self.watchdog.stop() self.hik_request.close() return elif reset_event.is_set(): # We need to reset the connection. raise ValueError('Watchdog failed.') except (ValueError, requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as err: fail_count += 1 reset_event.clear() _LOGGING.warning('%s Connection Failed (count=%d). Waiting %ss. Err: %s', self.name, fail_count, (fail_count * 5) + 5, err) parse_string = "" self.watchdog.stop() self.hik_request.close() time.sleep(5) self.update_stale() time.sleep(fail_count * 5) continue
[ "def", "alert_stream", "(", "self", ",", "reset_event", ",", "kill_event", ")", ":", "_LOGGING", ".", "debug", "(", "'Stream Thread Started: %s, %s'", ",", "self", ".", "name", ",", "self", ".", "cam_id", ")", "start_event", "=", "False", "parse_string", "=", ...
Open event stream.
[ "Open", "event", "stream", "." ]
python
train
MacHu-GWU/crawl_zillow-project
crawl_zillow/model.py
https://github.com/MacHu-GWU/crawl_zillow-project/blob/c6d7ca8e4c80e7e7e963496433ef73df1413c16e/crawl_zillow/model.py#L50-L63
def key(self): """ Example:: /browse/homes/ca/ -> ca /browse/homes/ca/los-angeles-county/ -> los-angeles-county /browse/homes/ca/los-angeles-county/91001/ -> 91001 /browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895 :return: """ return [part.strip() for part in self.href.split("/") if part.strip()][ -1]
[ "def", "key", "(", "self", ")", ":", "return", "[", "part", ".", "strip", "(", ")", "for", "part", "in", "self", ".", "href", ".", "split", "(", "\"/\"", ")", "if", "part", ".", "strip", "(", ")", "]", "[", "-", "1", "]" ]
Example:: /browse/homes/ca/ -> ca /browse/homes/ca/los-angeles-county/ -> los-angeles-county /browse/homes/ca/los-angeles-county/91001/ -> 91001 /browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895 :return:
[ "Example", "::" ]
python
train
zebpalmer/WeatherAlerts
weatheralerts/feed.py
https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/feed.py#L38-L50
def raw_cap(self, refresh=False): """ Raw xml(cap) of the the feed. If a valid cache is available it is used, else a new copy of the feed is grabbed Note: you can force refresh here, if you do, don't also manually call refresh """ if refresh is True: self._raw = self.refresh() if self._raw is None: self._raw = self._get_feed_cache() if self._raw is None: self._raw = self.refresh() return self._raw
[ "def", "raw_cap", "(", "self", ",", "refresh", "=", "False", ")", ":", "if", "refresh", "is", "True", ":", "self", ".", "_raw", "=", "self", ".", "refresh", "(", ")", "if", "self", ".", "_raw", "is", "None", ":", "self", ".", "_raw", "=", "self",...
Raw xml(cap) of the the feed. If a valid cache is available it is used, else a new copy of the feed is grabbed Note: you can force refresh here, if you do, don't also manually call refresh
[ "Raw", "xml", "(", "cap", ")", "of", "the", "the", "feed", ".", "If", "a", "valid", "cache", "is", "available", "it", "is", "used", "else", "a", "new", "copy", "of", "the", "feed", "is", "grabbed", "Note", ":", "you", "can", "force", "refresh", "he...
python
train
wq/html-json-forms
html_json_forms/utils.py
https://github.com/wq/html-json-forms/blob/4dfbfabeee924ba832a7a387ab3b02b6d51d9701/html_json_forms/utils.py#L50-L142
def parse_json_path(path): """ Parse a string as a JSON path An implementation of "steps to parse a JSON encoding path" http://www.w3.org/TR/html-json-forms/#dfn-steps-to-parse-a-json-encoding-path """ # Steps 1, 2, 3 original_path = path steps = [] # Step 11 (Failure) failed = [ JsonStep( type="object", key=original_path, last=True, failed=True, ) ] # Other variables for later use digit_re = re.compile(r'^\[([0-9]+)\]') key_re = re.compile(r'^\[([^\]]+)\]') # Step 4 - Find characters before first [ (if any) parts = path.split("[") first_key = parts[0] if parts[1:]: path = "[" + "[".join(parts[1:]) else: path = "" # Step 5 - According to spec, keys cannot start with [ # NOTE: This was allowed in older DRF versions, so disabling rule for now # if not first_key: # return failed # Step 6 - Save initial step steps.append(JsonStep( type="object", key=first_key, )) # Step 7 - Simple single-step case (no [ found) if not path: steps[-1].last = True return steps # Step 8 - Loop while path: # Step 8.1 - Check for single-item array if path[:2] == "[]": path = path[2:] steps.append(JsonStep( type="array", key=0, )) continue # Step 8.2 - Check for array[index] digit_match = digit_re.match(path) if digit_match: path = digit_re.sub("", path) steps.append(JsonStep( type="array", key=int(digit_match.group(1)), )) continue # Step 8.3 - Check for object[key] key_match = key_re.match(path) if key_match: path = key_re.sub("", path) steps.append(JsonStep( type="object", key=key_match.group(1), )) continue # Step 8.4 - Invalid key format return failed # Step 9 next_step = None for step in reversed(steps): if next_step: step.next_type = next_step.type else: step.last = True next_step = step return steps
[ "def", "parse_json_path", "(", "path", ")", ":", "# Steps 1, 2, 3", "original_path", "=", "path", "steps", "=", "[", "]", "# Step 11 (Failure)", "failed", "=", "[", "JsonStep", "(", "type", "=", "\"object\"", ",", "key", "=", "original_path", ",", "last", "=...
Parse a string as a JSON path An implementation of "steps to parse a JSON encoding path" http://www.w3.org/TR/html-json-forms/#dfn-steps-to-parse-a-json-encoding-path
[ "Parse", "a", "string", "as", "a", "JSON", "path", "An", "implementation", "of", "steps", "to", "parse", "a", "JSON", "encoding", "path", "http", ":", "//", "www", ".", "w3", ".", "org", "/", "TR", "/", "html", "-", "json", "-", "forms", "/", "#dfn...
python
valid
axltxl/m2bk
m2bk/drivers/s3.py
https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/drivers/s3.py#L37-L91
def load(*, aws_access_key_id=None, aws_secret_access_key=None, s3_bucket=AWS_S3_DEFAULT_BUCKET_NAME, dry_run=False, **kwargs): """ Load this driver Note that if either aws_access_key_id or aws_secret_access_key are not specified, they will not be taken into account and instead authentication towards AWS will solely rely on boto config :param aws_access_key_id(str, optional): Access key ID :param aws_secret_access_key(str, optional): Secret access key :param s3_bucket(str, optional): Name of the S3 bucket to be used to store the file :param dry_run(bool, optional): Whether to activate dry run mode on this driver :param \*\*kwargs: arbitrary keyword arguments """ global _dry_run, _has_init global _aws_access_key_id, _aws_secret_access_key, _bucket_name, _boto_conn # dry run _dry_run = dry_run # AWS parameters from kwargs _aws_access_key_id = aws_access_key_id _aws_secret_access_key = aws_secret_access_key if _aws_access_key_id is not None and type(_aws_access_key_id) != str: raise TypeError('aws_access_key_id must be str') if _aws_secret_access_key is not None \ and type(_aws_secret_access_key) != str: raise TypeError('aws_secret_access_key must be str') # Check the bucket name before doing anything _bucket_name = s3_bucket if type(_bucket_name) != str: raise TypeError('s3_bucket must be str') if not _bucket_name: raise ValueError("s3_bucket cannot be empty") # Connect to S3 service log.msg("Connecting to Amazon S3 Service") if not _aws_access_key_id or not _aws_secret_access_key: log.msg_warn("No AWS credentials were given. " + "Authentication will be done via boto.config/IAM role") if not _dry_run: _boto_conn = boto.connect_s3() elif not _dry_run: _boto_conn = boto.connect_s3(aws_access_key_id=_aws_access_key_id, aws_secret_access_key=_aws_secret_access_key) log.msg("Connected to AWS S3 service successfully!") # Indicate this driver has been properly initialised _has_init = True
[ "def", "load", "(", "*", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "s3_bucket", "=", "AWS_S3_DEFAULT_BUCKET_NAME", ",", "dry_run", "=", "False", ",", "*", "*", "kwargs", ")", ":", "global", "_dry_run", ",", "_has_...
Load this driver Note that if either aws_access_key_id or aws_secret_access_key are not specified, they will not be taken into account and instead authentication towards AWS will solely rely on boto config :param aws_access_key_id(str, optional): Access key ID :param aws_secret_access_key(str, optional): Secret access key :param s3_bucket(str, optional): Name of the S3 bucket to be used to store the file :param dry_run(bool, optional): Whether to activate dry run mode on this driver :param \*\*kwargs: arbitrary keyword arguments
[ "Load", "this", "driver" ]
python
train
hitchtest/hitchserve
hitchserve/hitch_service.py
https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/hitch_service.py#L237-L247
def subcommand(self, *args): """Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object. """ return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)
[ "def", "subcommand", "(", "self", ",", "*", "args", ")", ":", "return", "Subcommand", "(", "*", "args", ",", "directory", "=", "self", ".", "directory", ",", "env_vars", "=", "self", ".", "env_vars", ")" ]
Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object.
[ "Get", "subcommand", "acting", "on", "a", "service", ".", "Subcommand", "will", "run", "in", "service", "directory", "and", "with", "the", "environment", "variables", "used", "to", "run", "the", "service", "itself", "." ]
python
train
pytroll/satpy
satpy/readers/seviri_l1b_hrit.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/seviri_l1b_hrit.py#L398-L439
def get_area_extent(self, size, offsets, factors, platform_height): """Get the area extent of the file. Until December 2017, the data is shifted by 1.5km SSP North and West against the nominal GEOS projection. Since December 2017 this offset has been corrected. A flag in the data indicates if the correction has been applied. If no correction was applied, adjust the area extent to match the shifted data. For more information see Section 3.1.4.2 in the MSG Level 1.5 Image Data Format Description. The correction of the area extent is documented in a `developer's memo <https://github.com/pytroll/satpy/wiki/ SEVIRI-georeferencing-offset-correction>`_. """ nlines, ncols = size h = platform_height loff, coff = offsets loff -= nlines offsets = loff, coff # count starts at 1 cols = 1 - 0.5 lines = 0.5 - 1 ll_x, ll_y = self.get_xy_from_linecol(-lines, cols, offsets, factors) cols += ncols lines += nlines ur_x, ur_y = self.get_xy_from_linecol(-lines, cols, offsets, factors) aex = (np.deg2rad(ll_x) * h, np.deg2rad(ll_y) * h, np.deg2rad(ur_x) * h, np.deg2rad(ur_y) * h) if not self.mda['offset_corrected']: # Geo-referencing offset present. Adjust area extent to match the shifted data. Note that we have to adjust # the corners in the *opposite* direction, i.e. S-E. Think of it as if the coastlines were fixed and you # dragged the image to S-E until coastlines and data area aligned correctly. # # Although the image is flipped upside-down and left-right, the projection coordinates retain their # properties, i.e. positive x/y is East/North, respectively. xadj = 1500 yadj = -1500 aex = (aex[0] + xadj, aex[1] + yadj, aex[2] + xadj, aex[3] + yadj) return aex
[ "def", "get_area_extent", "(", "self", ",", "size", ",", "offsets", ",", "factors", ",", "platform_height", ")", ":", "nlines", ",", "ncols", "=", "size", "h", "=", "platform_height", "loff", ",", "coff", "=", "offsets", "loff", "-=", "nlines", "offsets", ...
Get the area extent of the file. Until December 2017, the data is shifted by 1.5km SSP North and West against the nominal GEOS projection. Since December 2017 this offset has been corrected. A flag in the data indicates if the correction has been applied. If no correction was applied, adjust the area extent to match the shifted data. For more information see Section 3.1.4.2 in the MSG Level 1.5 Image Data Format Description. The correction of the area extent is documented in a `developer's memo <https://github.com/pytroll/satpy/wiki/ SEVIRI-georeferencing-offset-correction>`_.
[ "Get", "the", "area", "extent", "of", "the", "file", "." ]
python
train
klen/pylama
pylama/hook.py
https://github.com/klen/pylama/blob/f436ccc6b55b33381a295ded753e467953cf4379/pylama/hook.py#L64-L75
def install_git(path): """Install hook in Git repository.""" hook = op.join(path, 'pre-commit') with open(hook, 'w') as fd: fd.write("""#!/usr/bin/env python import sys from pylama.hook import git_hook if __name__ == '__main__': sys.exit(git_hook()) """) chmod(hook, 484)
[ "def", "install_git", "(", "path", ")", ":", "hook", "=", "op", ".", "join", "(", "path", ",", "'pre-commit'", ")", "with", "open", "(", "hook", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "\"\"\"#!/usr/bin/env python\nimport sys\nfrom pylam...
Install hook in Git repository.
[ "Install", "hook", "in", "Git", "repository", "." ]
python
train
tradenity/python-sdk
tradenity/resources/free_shipping_promotion.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/free_shipping_promotion.py#L527-L547
def get_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, **kwargs): """Find FreeShippingPromotion Return single instance of FreeShippingPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_promotion_by_id(free_shipping_promotion_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to return (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) else: (data) = cls._get_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) return data
[ "def", "get_free_shipping_promotion_by_id", "(", "cls", ",", "free_shipping_promotion_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", "....
Find FreeShippingPromotion Return single instance of FreeShippingPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_promotion_by_id(free_shipping_promotion_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to return (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread.
[ "Find", "FreeShippingPromotion" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_web.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_web.py#L191-L196
def get_cgi_parameter_bool(form: cgi.FieldStorage, key: str) -> bool: """ Extracts a boolean parameter from a CGI form, on the assumption that ``"1"`` is ``True`` and everything else is ``False``. """ return is_1(get_cgi_parameter_str(form, key))
[ "def", "get_cgi_parameter_bool", "(", "form", ":", "cgi", ".", "FieldStorage", ",", "key", ":", "str", ")", "->", "bool", ":", "return", "is_1", "(", "get_cgi_parameter_str", "(", "form", ",", "key", ")", ")" ]
Extracts a boolean parameter from a CGI form, on the assumption that ``"1"`` is ``True`` and everything else is ``False``.
[ "Extracts", "a", "boolean", "parameter", "from", "a", "CGI", "form", "on", "the", "assumption", "that", "1", "is", "True", "and", "everything", "else", "is", "False", "." ]
python
train
robertpeteuil/aws-shortcuts
awss/core.py
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L458-L489
def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx)
[ "def", "determine_inst", "(", "i_info", ",", "param_str", ",", "command", ")", ":", "qty_instances", "=", "len", "(", "i_info", ")", "if", "not", "qty_instances", ":", "print", "(", "\"No instances found with parameters: {}\"", ".", "format", "(", "param_str", "...
Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified.
[ "Determine", "the", "instance", "-", "id", "of", "the", "target", "instance", "." ]
python
train
PyCQA/astroid
astroid/raw_building.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/raw_building.py#L167-L170
def object_build_class(node, member, localname): """create astroid for a living class object""" basenames = [base.__name__ for base in member.__bases__] return _base_class_object_build(node, member, basenames, localname=localname)
[ "def", "object_build_class", "(", "node", ",", "member", ",", "localname", ")", ":", "basenames", "=", "[", "base", ".", "__name__", "for", "base", "in", "member", ".", "__bases__", "]", "return", "_base_class_object_build", "(", "node", ",", "member", ",", ...
create astroid for a living class object
[ "create", "astroid", "for", "a", "living", "class", "object" ]
python
train
mitsei/dlkit
dlkit/json_/grading/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/managers.py#L1183-L1206
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id, proxy): """Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook arg: proxy (osid.proxy.Proxy): a proxy return: (osid.grading.GradeSystemLookupSession) - ``a GradeSystemLookupSession`` raise: NotFound - ``gradebook_id`` not found raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_grade_system_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_system_lookup()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_grade_system_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.GradeSystemLookupSession(gradebook_id, proxy, self._runtime)
[ "def", "get_grade_system_lookup_session_for_gradebook", "(", "self", ",", "gradebook_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_grade_system_lookup", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check ...
Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook arg: proxy (osid.proxy.Proxy): a proxy return: (osid.grading.GradeSystemLookupSession) - ``a GradeSystemLookupSession`` raise: NotFound - ``gradebook_id`` not found raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_grade_system_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_system_lookup()`` and ``supports_visible_federation()`` are ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "grade", "system", "lookup", "service", "for", "the", "given", "gradebook", "." ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/compiler.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L582-L591
def macro_def(self, macro_ref, frame): """Dump the macro definition for the def created by macro_body.""" arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args) name = getattr(macro_ref.node, 'name', None) if len(macro_ref.node.args) == 1: arg_tuple += ',' self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, ' 'context.eval_ctx.autoescape)' % (name, arg_tuple, macro_ref.accesses_kwargs, macro_ref.accesses_varargs, macro_ref.accesses_caller))
[ "def", "macro_def", "(", "self", ",", "macro_ref", ",", "frame", ")", ":", "arg_tuple", "=", "', '", ".", "join", "(", "repr", "(", "x", ".", "name", ")", "for", "x", "in", "macro_ref", ".", "node", ".", "args", ")", "name", "=", "getattr", "(", ...
Dump the macro definition for the def created by macro_body.
[ "Dump", "the", "macro", "definition", "for", "the", "def", "created", "by", "macro_body", "." ]
python
train
openstack/monasca-common
monasca_common/rest/utils.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/rest/utils.py#L39-L55
def as_json(data, **kwargs): """Writes data as json. :param dict data: data to convert to json :param kwargs kwargs: kwargs for json dumps :return: json string :rtype: str """ if 'sort_keys' not in kwargs: kwargs['sort_keys'] = False if 'ensure_ascii' not in kwargs: kwargs['ensure_ascii'] = False data = json.dumps(data, **kwargs) return data
[ "def", "as_json", "(", "data", ",", "*", "*", "kwargs", ")", ":", "if", "'sort_keys'", "not", "in", "kwargs", ":", "kwargs", "[", "'sort_keys'", "]", "=", "False", "if", "'ensure_ascii'", "not", "in", "kwargs", ":", "kwargs", "[", "'ensure_ascii'", "]", ...
Writes data as json. :param dict data: data to convert to json :param kwargs kwargs: kwargs for json dumps :return: json string :rtype: str
[ "Writes", "data", "as", "json", "." ]
python
train
androguard/androguard
androguard/gui/treewindow.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/gui/treewindow.py#L33-L72
def fill(self): """Parse all the paths (['Lcom/example/myclass/MyActivity$1;', ...]) and build a tree using the QTreeWidgetItem insertion method.""" log.debug("Fill classes tree") for idx, filename, digest, classes in self.session.get_classes(): for c in sorted(classes, key=lambda c: c.name): sig = Signature(c) path_node = self.root_path_node path = None if not sig.class_path: path = '.' if path not in path_node[0]: path_node[0][path] = ( {}, HashableQTreeWidgetItem(path_node[1])) path_node[0][path][1].setText(0, path) path_node = path_node[0][path] else: # Namespaces for path in sig.class_path: if path not in path_node[0]: path_node[0][path] = ( {}, HashableQTreeWidgetItem(path_node[1])) path_node[0][path][1].setText(0, path) path_node = path_node[0][path] # Class path_node[0][path] = ({}, HashableQTreeWidgetItem(path_node[1])) class_name = sig.class_name if idx > 0: class_name += "@%d" % idx c.current_title = class_name self._reverse_cache[path_node[0][path][1]] = (c, filename, digest) path_node[0][path][1].setText(0, class_name)
[ "def", "fill", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Fill classes tree\"", ")", "for", "idx", ",", "filename", ",", "digest", ",", "classes", "in", "self", ".", "session", ".", "get_classes", "(", ")", ":", "for", "c", "in", "sorted", "...
Parse all the paths (['Lcom/example/myclass/MyActivity$1;', ...]) and build a tree using the QTreeWidgetItem insertion method.
[ "Parse", "all", "the", "paths", "(", "[", "Lcom", "/", "example", "/", "myclass", "/", "MyActivity$1", ";", "...", "]", ")", "and", "build", "a", "tree", "using", "the", "QTreeWidgetItem", "insertion", "method", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/anntools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/anntools.py#L565-L577
def nmb_weights_hidden(self) -> int: """Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18 """ nmb = 0 for idx_layer in range(self.nmb_layers-1): nmb += self.nmb_neurons[idx_layer] * self.nmb_neurons[idx_layer+1] return nmb
[ "def", "nmb_weights_hidden", "(", "self", ")", "->", "int", ":", "nmb", "=", "0", "for", "idx_layer", "in", "range", "(", "self", ".", "nmb_layers", "-", "1", ")", ":", "nmb", "+=", "self", ".", "nmb_neurons", "[", "idx_layer", "]", "*", "self", ".",...
Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18
[ "Number", "of", "hidden", "weights", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiNetworkIPv6.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiNetworkIPv6.py#L60-L69
def undeploy(self, id_networkv6): """Remove deployment of network in equipments and set column 'active = 0' in tables redeipv6 ] :param id_networkv6: ID for NetworkIPv6 :return: Equipments configuration output """ uri = 'api/networkv6/%s/equipments/' % id_networkv6 return super(ApiNetworkIPv6, self).delete(uri)
[ "def", "undeploy", "(", "self", ",", "id_networkv6", ")", ":", "uri", "=", "'api/networkv6/%s/equipments/'", "%", "id_networkv6", "return", "super", "(", "ApiNetworkIPv6", ",", "self", ")", ".", "delete", "(", "uri", ")" ]
Remove deployment of network in equipments and set column 'active = 0' in tables redeipv6 ] :param id_networkv6: ID for NetworkIPv6 :return: Equipments configuration output
[ "Remove", "deployment", "of", "network", "in", "equipments", "and", "set", "column", "active", "=", "0", "in", "tables", "redeipv6", "]" ]
python
train
liminspace/dju-common
dju_common/templatetags/dju_common.py
https://github.com/liminspace/dju-common/blob/c68860bb84d454a35e66275841c20f38375c2135/dju_common/templatetags/dju_common.py#L170-L196
def recurse(parser, token): """ Iterate recurse data structure. <ul> {% recurse items %} <li> {{ item.name }} {% if item.children %} <ul> {{ subitems }} </ul> {% endif %} </li> {% endrecurse %} </ul> If subelements found in other key/attribute/method then need set its name (default is 'children'): {% recurse items 'subitems' %} Also available depth level in variable {{ recurse_level }} (starting of 1) """ params = token.contents.split() if not 2 <= len(params) <= 3: raise template.TemplateSyntaxError('%s parameters error' % params[0]) template_nodes = parser.parse(('endrecurse',)) parser.delete_first_token() return RecurseNode(template_nodes, template.Variable(params[1]), (params[2][1:-1] if len(params) == 3 else 'children'))
[ "def", "recurse", "(", "parser", ",", "token", ")", ":", "params", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "not", "2", "<=", "len", "(", "params", ")", "<=", "3", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "'%s pa...
Iterate recurse data structure. <ul> {% recurse items %} <li> {{ item.name }} {% if item.children %} <ul> {{ subitems }} </ul> {% endif %} </li> {% endrecurse %} </ul> If subelements found in other key/attribute/method then need set its name (default is 'children'): {% recurse items 'subitems' %} Also available depth level in variable {{ recurse_level }} (starting of 1)
[ "Iterate", "recurse", "data", "structure", ".", "<ul", ">", "{", "%", "recurse", "items", "%", "}", "<li", ">", "{{", "item", ".", "name", "}}", "{", "%", "if", "item", ".", "children", "%", "}", "<ul", ">", "{{", "subitems", "}}", "<", "/", "ul"...
python
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L171-L180
def _get_config(self, unit, filename): """Get a ConfigParser object for parsing a unit's config file.""" file_contents = unit.file_contents(filename) # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
[ "def", "_get_config", "(", "self", ",", "unit", ",", "filename", ")", ":", "file_contents", "=", "unit", ".", "file_contents", "(", "filename", ")", "# NOTE(beisner): by default, ConfigParser does not handle options", "# with no value, such as the flags used in the mysql my.cn...
Get a ConfigParser object for parsing a unit's config file.
[ "Get", "a", "ConfigParser", "object", "for", "parsing", "a", "unit", "s", "config", "file", "." ]
python
train
OzymandiasTheGreat/python-libinput
libinput/__init__.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/__init__.py#L221-L236
def next_event_type(self): """Return the type of the next event in the internal queue. This method does not pop the event off the queue and the next call to :attr:`events` returns that event. Returns: ~libinput.constant.EventType: The event type of the next available event or :obj:`None` if no event is available. """ type_ = self._libinput.libinput_next_event_type(self._li) if type_ == 0: return None else: return EventType(type_)
[ "def", "next_event_type", "(", "self", ")", ":", "type_", "=", "self", ".", "_libinput", ".", "libinput_next_event_type", "(", "self", ".", "_li", ")", "if", "type_", "==", "0", ":", "return", "None", "else", ":", "return", "EventType", "(", "type_", ")"...
Return the type of the next event in the internal queue. This method does not pop the event off the queue and the next call to :attr:`events` returns that event. Returns: ~libinput.constant.EventType: The event type of the next available event or :obj:`None` if no event is available.
[ "Return", "the", "type", "of", "the", "next", "event", "in", "the", "internal", "queue", "." ]
python
train
timstaley/voevent-parse
src/voeventparse/voevent.py
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L110-L130
def load(file, check_version=True): """Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. """ s = file.read() return loads(s, check_version)
[ "def", "load", "(", "file", ",", "check_version", "=", "True", ")", ":", "s", "=", "file", ".", "read", "(", ")", "return", "loads", "(", "s", ",", "check_version", ")" ]
Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree.
[ "Load", "VOEvent", "from", "file", "object", "." ]
python
train
saltstack/salt
salt/states/aws_sqs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/aws_sqs.py#L73-L112
def absent( name, region, user=None, opts=False): ''' Remove the named SQS queue if it exists. name Name of the SQS queue. region Region to remove the queue from user Name of the user performing the SQS operations opts Include additional arguments and options to the aws command line ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} does_exist = __salt__['aws_sqs.queue_exists'](name, region, opts, user) if does_exist: if __opts__['test']: ret['result'] = None ret['comment'] = 'AWS SQS queue {0} is set to be removed'.format( name) return ret removed = __salt__['aws_sqs.delete_queue'](name, region, opts, user) if removed['retcode'] == 0: ret['changes']['removed'] = removed['stdout'] else: ret['result'] = False ret['comment'] = removed['stderr'] else: ret['comment'] = '{0} does not exist in {1}'.format(name, region) return ret
[ "def", "absent", "(", "name", ",", "region", ",", "user", "=", "None", ",", "opts", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", ...
Remove the named SQS queue if it exists. name Name of the SQS queue. region Region to remove the queue from user Name of the user performing the SQS operations opts Include additional arguments and options to the aws command line
[ "Remove", "the", "named", "SQS", "queue", "if", "it", "exists", "." ]
python
train
idlesign/django-sitecats
sitecats/models.py
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/models.py#L334-L343
def get_from_category_qs(cls, category): """Returns a QuerySet of objects of this type associated with the given category. :param Category category: :rtype: list :return: """ ids = cls.get_ties_for_categories_qs(category).values_list('object_id').distinct() filter_kwargs = {'id__in': [i[0] for i in ids]} return cls.objects.filter(**filter_kwargs)
[ "def", "get_from_category_qs", "(", "cls", ",", "category", ")", ":", "ids", "=", "cls", ".", "get_ties_for_categories_qs", "(", "category", ")", ".", "values_list", "(", "'object_id'", ")", ".", "distinct", "(", ")", "filter_kwargs", "=", "{", "'id__in'", "...
Returns a QuerySet of objects of this type associated with the given category. :param Category category: :rtype: list :return:
[ "Returns", "a", "QuerySet", "of", "objects", "of", "this", "type", "associated", "with", "the", "given", "category", "." ]
python
train
mbi/django-simple-captcha
captcha/helpers.py
https://github.com/mbi/django-simple-captcha/blob/e96cd8f63e41e658d103d12d6486b34195aee555/captcha/helpers.py#L49-L62
def huge_words_and_punctuation_challenge(): "Yay, undocumneted. Mostly used to test Issue 39 - http://code.google.com/p/django-simple-captcha/issues/detail?id=39" fd = open(settings.CAPTCHA_WORDS_DICTIONARY, 'rb') l = fd.readlines() fd.close() word = '' while True: word1 = random.choice(l).strip() word2 = random.choice(l).strip() punct = random.choice(settings.CAPTCHA_PUNCTUATION) word = '%s%s%s' % (word1, punct, word2) if len(word) >= settings.CAPTCHA_DICTIONARY_MIN_LENGTH and len(word) <= settings.CAPTCHA_DICTIONARY_MAX_LENGTH: break return word.upper(), word.lower()
[ "def", "huge_words_and_punctuation_challenge", "(", ")", ":", "fd", "=", "open", "(", "settings", ".", "CAPTCHA_WORDS_DICTIONARY", ",", "'rb'", ")", "l", "=", "fd", ".", "readlines", "(", ")", "fd", ".", "close", "(", ")", "word", "=", "''", "while", "Tr...
Yay, undocumneted. Mostly used to test Issue 39 - http://code.google.com/p/django-simple-captcha/issues/detail?id=39
[ "Yay", "undocumneted", ".", "Mostly", "used", "to", "test", "Issue", "39", "-", "http", ":", "//", "code", ".", "google", ".", "com", "/", "p", "/", "django", "-", "simple", "-", "captcha", "/", "issues", "/", "detail?id", "=", "39" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/pourbaix_diagram.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/pourbaix_diagram.py#L782-L794
def show(self, *args, **kwargs): """ Shows the pourbaix plot Args: *args: args to get_pourbaix_plot **kwargs: kwargs to get_pourbaix_plot Returns: None """ plt = self.get_pourbaix_plot(*args, **kwargs) plt.show()
[ "def", "show", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "plt", "=", "self", ".", "get_pourbaix_plot", "(", "*", "args", ",", "*", "*", "kwargs", ")", "plt", ".", "show", "(", ")" ]
Shows the pourbaix plot Args: *args: args to get_pourbaix_plot **kwargs: kwargs to get_pourbaix_plot Returns: None
[ "Shows", "the", "pourbaix", "plot" ]
python
train
sdispater/poetry
poetry/mixology/incompatibility.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/mixology/incompatibility.py#L88-L101
def external_incompatibilities(self): # type: () -> Generator[Incompatibility] """ Returns all external incompatibilities in this incompatibility's derivation graph. """ if isinstance(self._cause, ConflictCause): cause = self._cause # type: ConflictCause for incompatibility in cause.conflict.external_incompatibilities: yield incompatibility for incompatibility in cause.other.external_incompatibilities: yield incompatibility else: yield self
[ "def", "external_incompatibilities", "(", "self", ")", ":", "# type: () -> Generator[Incompatibility]", "if", "isinstance", "(", "self", ".", "_cause", ",", "ConflictCause", ")", ":", "cause", "=", "self", ".", "_cause", "# type: ConflictCause", "for", "incompatibilit...
Returns all external incompatibilities in this incompatibility's derivation graph.
[ "Returns", "all", "external", "incompatibilities", "in", "this", "incompatibility", "s", "derivation", "graph", "." ]
python
train
chimera0/accel-brain-code
Generative-Adversarial-Networks/pygan/truesampler/conditionaltruesampler/conditional_image_true_sampler.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Generative-Adversarial-Networks/pygan/truesampler/conditionaltruesampler/conditional_image_true_sampler.py#L23-L32
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.__image_true_sampler.draw() observed_arr = self.add_condition(observed_arr) return observed_arr
[ "def", "draw", "(", "self", ")", ":", "observed_arr", "=", "self", ".", "__image_true_sampler", ".", "draw", "(", ")", "observed_arr", "=", "self", ".", "add_condition", "(", "observed_arr", ")", "return", "observed_arr" ]
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
[ "Draws", "samples", "from", "the", "true", "distribution", ".", "Returns", ":", "np", ".", "ndarray", "of", "samples", "." ]
python
train
Duke-GCB/DukeDSClient
ddsc/core/d4s2.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/d4s2.py#L381-L388
def _is_current_user(self, some_user): """ Is the specified user the current user? :param some_user: RemoteUser user we want to check against the current user :return: boolean: True if the current user is the passed in user """ current_user = self.remote_store.get_current_user() return current_user.id == some_user.id
[ "def", "_is_current_user", "(", "self", ",", "some_user", ")", ":", "current_user", "=", "self", ".", "remote_store", ".", "get_current_user", "(", ")", "return", "current_user", ".", "id", "==", "some_user", ".", "id" ]
Is the specified user the current user? :param some_user: RemoteUser user we want to check against the current user :return: boolean: True if the current user is the passed in user
[ "Is", "the", "specified", "user", "the", "current", "user?", ":", "param", "some_user", ":", "RemoteUser", "user", "we", "want", "to", "check", "against", "the", "current", "user", ":", "return", ":", "boolean", ":", "True", "if", "the", "current", "user",...
python
train
PaulHancock/Aegean
AegeanTools/source_finder.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L744-L822
def save_background_files(self, image_filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, rms=None, bkg=None, cores=1, outbase=None): """ Generate and save the background and RMS maps as FITS files. They are saved in the current directly as aegean-background.fits and aegean-rms.fits. Parameters ---------- image_filename : str or HDUList Input image. hdu_index : int If fits file has more than one hdu, it can be specified here. Default = 0. bkgin, rmsin : str or HDUList Background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. rms, bkg : float A float that represents a constant rms/bkg level for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. outbase : str Basename for output files. """ self.log.info("Saving background / RMS maps") # load image, and load/create background/rms images self.load_globals(image_filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, verb=True, rms=rms, bkg=bkg, cores=cores, do_curve=True) img = self.global_data.img bkgimg, rmsimg = self.global_data.bkgimg, self.global_data.rmsimg curve = np.array(self.global_data.dcurve, dtype=bkgimg.dtype) # mask these arrays have the same mask the same as the data mask = np.where(np.isnan(self.global_data.data_pix)) bkgimg[mask] = np.NaN rmsimg[mask] = np.NaN curve[mask] = np.NaN # Generate the new FITS files by copying the existing HDU and assigning new data. # This gives the new files the same WCS projection and other header fields. new_hdu = img.hdu # Set the ORIGIN to indicate Aegean made this file new_hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__) for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']: if c in new_hdu.header: del new_hdu.header[c] if outbase is None: outbase, _ = os.path.splitext(os.path.basename(image_filename)) noise_out = outbase + '_rms.fits' background_out = outbase + '_bkg.fits' curve_out = outbase + '_crv.fits' snr_out = outbase + '_snr.fits' new_hdu.data = bkgimg new_hdu.writeto(background_out, overwrite=True) self.log.info("Wrote {0}".format(background_out)) new_hdu.data = rmsimg new_hdu.writeto(noise_out, overwrite=True) self.log.info("Wrote {0}".format(noise_out)) new_hdu.data = curve new_hdu.writeto(curve_out, overwrite=True) self.log.info("Wrote {0}".format(curve_out)) new_hdu.data = self.global_data.data_pix / rmsimg new_hdu.writeto(snr_out, overwrite=True) self.log.info("Wrote {0}".format(snr_out)) return
[ "def", "save_background_files", "(", "self", ",", "image_filename", ",", "hdu_index", "=", "0", ",", "bkgin", "=", "None", ",", "rmsin", "=", "None", ",", "beam", "=", "None", ",", "rms", "=", "None", ",", "bkg", "=", "None", ",", "cores", "=", "1", ...
Generate and save the background and RMS maps as FITS files. They are saved in the current directly as aegean-background.fits and aegean-rms.fits. Parameters ---------- image_filename : str or HDUList Input image. hdu_index : int If fits file has more than one hdu, it can be specified here. Default = 0. bkgin, rmsin : str or HDUList Background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. rms, bkg : float A float that represents a constant rms/bkg level for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. outbase : str Basename for output files.
[ "Generate", "and", "save", "the", "background", "and", "RMS", "maps", "as", "FITS", "files", ".", "They", "are", "saved", "in", "the", "current", "directly", "as", "aegean", "-", "background", ".", "fits", "and", "aegean", "-", "rms", ".", "fits", "." ]
python
train
tylertreat/BigQuery-Python
bigquery/client.py
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1057-L1151
def export_data_to_uris( self, destination_uris, dataset, table, job=None, compression=None, destination_format=None, print_header=None, field_delimiter=None, project_id=None, ): """ Export data from a BigQuery table to cloud storage. Optional arguments that are not specified are determined by BigQuery as described: https://developers.google.com/bigquery/docs/reference/v2/jobs Parameters ---------- destination_uris : Union[str, list] ``str`` or ``list`` of ``str`` objects representing the URIs on cloud storage of the form: gs://bucket/filename dataset : str String id of the dataset table : str String id of the table job : str, optional String identifying the job (a unique jobid is automatically generated if not provided) compression : str, optional One of the JOB_COMPRESSION_* constants destination_format : str, optional One of the JOB_DESTination_FORMAT_* constants print_header : bool, optional Whether or not to print the header field_delimiter : str, optional Character separating fields in delimited file project_id: str, optional String id of the project Returns ------- dict A BigQuery job resource Raises ------ JobInsertException On http/auth failures or error in result """ destination_uris = destination_uris \ if isinstance(destination_uris, list) else [destination_uris] project_id = self._get_project_id(project_id) configuration = { "sourceTable": { "projectId": project_id, "tableId": table, "datasetId": dataset }, "destinationUris": destination_uris, } if compression: configuration['compression'] = compression if destination_format: configuration['destinationFormat'] = destination_format if print_header is not None: configuration['printHeader'] = print_header if field_delimiter: configuration['fieldDelimiter'] = field_delimiter if not job: hex = self._generate_hex_for_uris(destination_uris) job = "{dataset}-{table}-{digest}".format( dataset=dataset, table=table, digest=hex ) body = { "configuration": { 'extract': configuration }, "jobReference": self._get_job_reference(job) } logger.info("Creating export job %s" % body) job_resource = self._insert_job(body) self._raise_insert_exception_if_error(job_resource) return job_resource
[ "def", "export_data_to_uris", "(", "self", ",", "destination_uris", ",", "dataset", ",", "table", ",", "job", "=", "None", ",", "compression", "=", "None", ",", "destination_format", "=", "None", ",", "print_header", "=", "None", ",", "field_delimiter", "=", ...
Export data from a BigQuery table to cloud storage. Optional arguments that are not specified are determined by BigQuery as described: https://developers.google.com/bigquery/docs/reference/v2/jobs Parameters ---------- destination_uris : Union[str, list] ``str`` or ``list`` of ``str`` objects representing the URIs on cloud storage of the form: gs://bucket/filename dataset : str String id of the dataset table : str String id of the table job : str, optional String identifying the job (a unique jobid is automatically generated if not provided) compression : str, optional One of the JOB_COMPRESSION_* constants destination_format : str, optional One of the JOB_DESTination_FORMAT_* constants print_header : bool, optional Whether or not to print the header field_delimiter : str, optional Character separating fields in delimited file project_id: str, optional String id of the project Returns ------- dict A BigQuery job resource Raises ------ JobInsertException On http/auth failures or error in result
[ "Export", "data", "from", "a", "BigQuery", "table", "to", "cloud", "storage", ".", "Optional", "arguments", "that", "are", "not", "specified", "are", "determined", "by", "BigQuery", "as", "described", ":", "https", ":", "//", "developers", ".", "google", "."...
python
train
aio-libs/aiohttp
aiohttp/web_response.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/web_response.py#L179-L221
def set_cookie(self, name: str, value: str, *, expires: Optional[str]=None, domain: Optional[str]=None, max_age: Optional[Union[int, str]]=None, path: str='/', secure: Optional[str]=None, httponly: Optional[str]=None, version: Optional[str]=None) -> None: """Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None. """ old = self._cookies.get(name) if old is not None and old.coded_value == '': # deleted cookie self._cookies.pop(name, None) self._cookies[name] = value c = self._cookies[name] if expires is not None: c['expires'] = expires elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT': del c['expires'] if domain is not None: c['domain'] = domain if max_age is not None: c['max-age'] = str(max_age) elif 'max-age' in c: del c['max-age'] c['path'] = path if secure is not None: c['secure'] = secure if httponly is not None: c['httponly'] = httponly if version is not None: c['version'] = version
[ "def", "set_cookie", "(", "self", ",", "name", ":", "str", ",", "value", ":", "str", ",", "*", ",", "expires", ":", "Optional", "[", "str", "]", "=", "None", ",", "domain", ":", "Optional", "[", "str", "]", "=", "None", ",", "max_age", ":", "Opti...
Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None.
[ "Set", "or", "update", "response", "cookie", "." ]
python
train
pyviz/holoviews
holoviews/core/layout.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/layout.py#L168-L183
def relabel(self, label=None, group=None, depth=1): """Clone object and apply new group and/or label. Applies relabeling to child up to the supplied depth. Args: label (str, optional): New label to apply to returned object group (str, optional): New group to apply to returned object depth (int, optional): Depth to which relabel will be applied If applied to container allows applying relabeling to contained objects up to the specified depth Returns: Returns relabelled object """ return super(AdjointLayout, self).relabel(label=label, group=group, depth=depth)
[ "def", "relabel", "(", "self", ",", "label", "=", "None", ",", "group", "=", "None", ",", "depth", "=", "1", ")", ":", "return", "super", "(", "AdjointLayout", ",", "self", ")", ".", "relabel", "(", "label", "=", "label", ",", "group", "=", "group"...
Clone object and apply new group and/or label. Applies relabeling to child up to the supplied depth. Args: label (str, optional): New label to apply to returned object group (str, optional): New group to apply to returned object depth (int, optional): Depth to which relabel will be applied If applied to container allows applying relabeling to contained objects up to the specified depth Returns: Returns relabelled object
[ "Clone", "object", "and", "apply", "new", "group", "and", "/", "or", "label", "." ]
python
train
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/query.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/query.py#L98-L130
def page(self, language=values.unset, model_build=values.unset, status=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of QueryInstance records from the API. Request is executed immediately :param unicode language: The ISO language-country string that specifies the language used by the Query resources to read :param unicode model_build: The SID or unique name of the Model Build to be queried :param unicode status: The status of the resources to read :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of QueryInstance :rtype: twilio.rest.autopilot.v1.assistant.query.QueryPage """ params = values.of({ 'Language': language, 'ModelBuild': model_build, 'Status': status, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return QueryPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "language", "=", "values", ".", "unset", ",", "model_build", "=", "values", ".", "unset", ",", "status", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", "....
Retrieve a single page of QueryInstance records from the API. Request is executed immediately :param unicode language: The ISO language-country string that specifies the language used by the Query resources to read :param unicode model_build: The SID or unique name of the Model Build to be queried :param unicode status: The status of the resources to read :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of QueryInstance :rtype: twilio.rest.autopilot.v1.assistant.query.QueryPage
[ "Retrieve", "a", "single", "page", "of", "QueryInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
pgmpy/pgmpy
pgmpy/inference/ExactInference.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/inference/ExactInference.py#L533-L627
def _query(self, variables, operation, evidence=None, joint=True): """ This is a generalized query method that can be used for both query and map query. Parameters ---------- variables: list list of variables for which you want to compute the probability operation: str ('marginalize' | 'maximize') The operation to do for passing messages between nodes. evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.inference import BeliefPropagation >>> from pgmpy.models import BayesianModel >>> import numpy as np >>> import pandas as pd >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> inference = BeliefPropagation(model) >>> phi_query = inference.query(['A', 'B']) References ---------- Algorithm 10.4 Out-of-clique inference in clique tree Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman. """ is_calibrated = self._is_converged(operation=operation) # Calibrate the junction tree if not calibrated if not is_calibrated: self.calibrate() if not isinstance(variables, (list, tuple, set)): query_variables = [variables] else: query_variables = list(variables) query_variables.extend(evidence.keys() if evidence else []) # Find a tree T' such that query_variables are a subset of scope(T') nodes_with_query_variables = set() for var in query_variables: nodes_with_query_variables.update(filter(lambda x: var in x, self.junction_tree.nodes())) subtree_nodes = nodes_with_query_variables # Conversion of set to tuple just for indexing nodes_with_query_variables = tuple(nodes_with_query_variables) # As junction tree is a tree, that means that there would be only path between any two nodes in the tree # thus we can just take the path between any two nodes; no matter there order is for i in range(len(nodes_with_query_variables) - 1): subtree_nodes.update(nx.shortest_path(self.junction_tree, nodes_with_query_variables[i], nodes_with_query_variables[i + 1])) subtree_undirected_graph = self.junction_tree.subgraph(subtree_nodes) # Converting subtree into a junction tree if len(subtree_nodes) == 1: subtree = JunctionTree() subtree.add_node(subtree_nodes.pop()) else: subtree = JunctionTree(subtree_undirected_graph.edges()) # Selecting a node is root node. Root node would be having only one neighbor if len(subtree.nodes()) == 1: root_node = list(subtree.nodes())[0] else: root_node = tuple(filter(lambda x: len(list(subtree.neighbors(x))) == 1, subtree.nodes()))[0] clique_potential_list = [self.clique_beliefs[root_node]] # For other nodes in the subtree compute the clique potentials as follows # As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with' # all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the # internal elements within it. parent_nodes = set([root_node]) nodes_traversed = set() while parent_nodes: parent_node = parent_nodes.pop() for child_node in set(subtree.neighbors(parent_node)) - nodes_traversed: clique_potential_list.append(self.clique_beliefs[child_node] / self.sepset_beliefs[frozenset([parent_node, child_node])]) parent_nodes.update([child_node]) nodes_traversed.update([parent_node]) # Add factors to the corresponding junction tree subtree.add_factors(*clique_potential_list) # Sum product variable elimination on the subtree variable_elimination = VariableElimination(subtree) if operation == 'marginalize': return variable_elimination.query(variables=variables, evidence=evidence, joint=joint) elif operation == 'maximize': return variable_elimination.map_query(variables=variables, evidence=evidence)
[ "def", "_query", "(", "self", ",", "variables", ",", "operation", ",", "evidence", "=", "None", ",", "joint", "=", "True", ")", ":", "is_calibrated", "=", "self", ".", "_is_converged", "(", "operation", "=", "operation", ")", "# Calibrate the junction tree if ...
This is a generalized query method that can be used for both query and map query. Parameters ---------- variables: list list of variables for which you want to compute the probability operation: str ('marginalize' | 'maximize') The operation to do for passing messages between nodes. evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence Examples -------- >>> from pgmpy.inference import BeliefPropagation >>> from pgmpy.models import BayesianModel >>> import numpy as np >>> import pandas as pd >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> inference = BeliefPropagation(model) >>> phi_query = inference.query(['A', 'B']) References ---------- Algorithm 10.4 Out-of-clique inference in clique tree Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
[ "This", "is", "a", "generalized", "query", "method", "that", "can", "be", "used", "for", "both", "query", "and", "map", "query", "." ]
python
train
jazzband/django-ddp
dddp/accounts/ddp.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/accounts/ddp.py#L255-L301
def update_subs(new_user_id): """Update subs to send added/removed for collections with user_rel.""" for sub in Subscription.objects.filter(connection=this.ws.connection): params = loads(sub.params_ejson) pub = API.get_pub_by_name(sub.publication) # calculate the querysets prior to update pre = collections.OrderedDict([ (col, query) for col, query in API.sub_unique_objects(sub, params, pub) ]) # save the subscription with the updated user_id sub.user_id = new_user_id sub.save() # calculate the querysets after the update post = collections.OrderedDict([ (col, query) for col, query in API.sub_unique_objects(sub, params, pub) ]) # first pass, send `added` for objs unique to `post` for col_post, query in post.items(): try: qs_pre = pre[col_post] query = query.exclude( pk__in=qs_pre.order_by().values('pk'), ) except KeyError: # collection not included pre-auth, everything is added. pass for obj in query: this.ws.send(col_post.obj_change_as_msg(obj, ADDED)) # second pass, send `removed` for objs unique to `pre` for col_pre, query in pre.items(): try: qs_post = post[col_pre] query = query.exclude( pk__in=qs_post.order_by().values('pk'), ) except KeyError: # collection not included post-auth, everything is removed. pass for obj in query: this.ws.send(col_pre.obj_change_as_msg(obj, REMOVED))
[ "def", "update_subs", "(", "new_user_id", ")", ":", "for", "sub", "in", "Subscription", ".", "objects", ".", "filter", "(", "connection", "=", "this", ".", "ws", ".", "connection", ")", ":", "params", "=", "loads", "(", "sub", ".", "params_ejson", ")", ...
Update subs to send added/removed for collections with user_rel.
[ "Update", "subs", "to", "send", "added", "/", "removed", "for", "collections", "with", "user_rel", "." ]
python
test
librosa/librosa
examples/beat_tracker.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/beat_tracker.py#L16-L45
def beat_track(input_file, output_csv): '''Beat tracking function :parameters: - input_file : str Path to input audio file (wav, mp3, m4a, flac, etc.) - output_file : str Path to save beat event timestamps as a CSV file ''' print('Loading ', input_file) y, sr = librosa.load(input_file, sr=22050) # Use a default hop size of 512 samples @ 22KHz ~= 23ms hop_length = 512 # This is the window length used by default in stft print('Tracking beats') tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length) print('Estimated tempo: {:0.2f} beats per minute'.format(tempo)) # save output # 'beats' will contain the frame numbers of beat events. beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=hop_length) print('Saving output to ', output_csv) librosa.output.times_csv(output_csv, beat_times) print('done!')
[ "def", "beat_track", "(", "input_file", ",", "output_csv", ")", ":", "print", "(", "'Loading '", ",", "input_file", ")", "y", ",", "sr", "=", "librosa", ".", "load", "(", "input_file", ",", "sr", "=", "22050", ")", "# Use a default hop size of 512 samples @ 22...
Beat tracking function :parameters: - input_file : str Path to input audio file (wav, mp3, m4a, flac, etc.) - output_file : str Path to save beat event timestamps as a CSV file
[ "Beat", "tracking", "function" ]
python
test
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L949-L974
def task_pivot(self, task_resource): """Pivot point on Tasks for this resource. This method will return all *resources* (group, indicators, victims, etc) for this resource that are associated with the provided task id. **Example Endpoints URI's** +--------------+-------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+=============================================================+ | GET | /v2/tasks/{resourceId}/groups/{resourceType} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/indicators/{resourceType} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+-------------------------------------------------------------+ Args: resource_id (integer): The resource pivot id (task id). """ resource = self.copy() resource._request_uri = '{}/{}'.format(task_resource.request_uri, resource._request_uri) return resource
[ "def", "task_pivot", "(", "self", ",", "task_resource", ")", ":", "resource", "=", "self", ".", "copy", "(", ")", "resource", ".", "_request_uri", "=", "'{}/{}'", ".", "format", "(", "task_resource", ".", "request_uri", ",", "resource", ".", "_request_uri", ...
Pivot point on Tasks for this resource. This method will return all *resources* (group, indicators, victims, etc) for this resource that are associated with the provided task id. **Example Endpoints URI's** +--------------+-------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+=============================================================+ | GET | /v2/tasks/{resourceId}/groups/{resourceType} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/groups/{resourceType}/{uniqueId} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/indicators/{resourceType} | +--------------+-------------------------------------------------------------+ | GET | /v2/tasks/{resourceId}/indicators/{resourceType}/{uniqueId} | +--------------+-------------------------------------------------------------+ Args: resource_id (integer): The resource pivot id (task id).
[ "Pivot", "point", "on", "Tasks", "for", "this", "resource", "." ]
python
train
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L241-L263
def addToLayout(self, analysis, position=None): """ Adds the analysis passed in to the worksheet's layout """ # TODO Redux layout = self.getLayout() container_uid = self.get_container_for(analysis) if IRequestAnalysis.providedBy(analysis) and \ not IDuplicateAnalysis.providedBy(analysis): container_uids = map(lambda slot: slot['container_uid'], layout) if container_uid in container_uids: position = [int(slot['position']) for slot in layout if slot['container_uid'] == container_uid][0] elif not position: used_positions = [0, ] + [int(slot['position']) for slot in layout] position = [pos for pos in range(1, max(used_positions) + 2) if pos not in used_positions][0] an_type = self.get_analysis_type(analysis) self.setLayout(layout + [{'position': position, 'type': an_type, 'container_uid': container_uid, 'analysis_uid': api.get_uid(analysis)}, ])
[ "def", "addToLayout", "(", "self", ",", "analysis", ",", "position", "=", "None", ")", ":", "# TODO Redux", "layout", "=", "self", ".", "getLayout", "(", ")", "container_uid", "=", "self", ".", "get_container_for", "(", "analysis", ")", "if", "IRequestAnalys...
Adds the analysis passed in to the worksheet's layout
[ "Adds", "the", "analysis", "passed", "in", "to", "the", "worksheet", "s", "layout" ]
python
train
spacetelescope/drizzlepac
drizzlepac/outputimage.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/outputimage.py#L680-L694
def getTemplates(fnames, blend=True): """ Process all headers to produce a set of combined headers that follows the rules defined by each instrument. """ if not blend: newhdrs = blendheaders.getSingleTemplate(fnames[0]) newtab = None else: # apply rules to create final version of headers, plus table newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames) cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3]) return newhdrs, newtab
[ "def", "getTemplates", "(", "fnames", ",", "blend", "=", "True", ")", ":", "if", "not", "blend", ":", "newhdrs", "=", "blendheaders", ".", "getSingleTemplate", "(", "fnames", "[", "0", "]", ")", "newtab", "=", "None", "else", ":", "# apply rules to create ...
Process all headers to produce a set of combined headers that follows the rules defined by each instrument.
[ "Process", "all", "headers", "to", "produce", "a", "set", "of", "combined", "headers", "that", "follows", "the", "rules", "defined", "by", "each", "instrument", "." ]
python
train
sdispater/orator
orator/query/builder.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L1229-L1242
def exists(self): """ Determine if any rows exist for the current query. :return: Whether the rows exist or not :rtype: bool """ limit = self.limit_ result = self.limit(1).count() > 0 self.limit(limit) return result
[ "def", "exists", "(", "self", ")", ":", "limit", "=", "self", ".", "limit_", "result", "=", "self", ".", "limit", "(", "1", ")", ".", "count", "(", ")", ">", "0", "self", ".", "limit", "(", "limit", ")", "return", "result" ]
Determine if any rows exist for the current query. :return: Whether the rows exist or not :rtype: bool
[ "Determine", "if", "any", "rows", "exist", "for", "the", "current", "query", "." ]
python
train
proycon/pynlpl
pynlpl/search.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/search.py#L263-L268
def searchtop(self,n=10): """Return the top n best resulta (or possibly less if not enough is found)""" solutions = PriorityQueue([], lambda x: x.score, self.minimize, length=n, blockworse=False, blockequal=False,duplicates=False) for solution in self: solutions.append(solution) return solutions
[ "def", "searchtop", "(", "self", ",", "n", "=", "10", ")", ":", "solutions", "=", "PriorityQueue", "(", "[", "]", ",", "lambda", "x", ":", "x", ".", "score", ",", "self", ".", "minimize", ",", "length", "=", "n", ",", "blockworse", "=", "False", ...
Return the top n best resulta (or possibly less if not enough is found)
[ "Return", "the", "top", "n", "best", "resulta", "(", "or", "possibly", "less", "if", "not", "enough", "is", "found", ")" ]
python
train
YeoLab/anchor
anchor/infotheory.py
https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L46-L77
def binify(data, bins): """Makes a histogram of each column the provided binsize Parameters ---------- data : pandas.DataFrame A samples x features dataframe. Each feature (column) will be binned into the provided bins bins : iterable Bins you would like to use for this data. Must include the final bin value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1). nbins = len(bins) - 1 Returns ------- binned : pandas.DataFrame An nbins x features DataFrame of each column binned across rows """ if bins is None: raise ValueError('Must specify "bins"') if isinstance(data, pd.DataFrame): binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins, range=(0, 1))[0])) elif isinstance(data, pd.Series): binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0]) else: raise ValueError('`data` must be either a 1d vector or 2d matrix') binned.index = bin_range_strings(bins) # Normalize so each column sums to 1 binned = binned / binned.sum().astype(float) return binned
[ "def", "binify", "(", "data", ",", "bins", ")", ":", "if", "bins", "is", "None", ":", "raise", "ValueError", "(", "'Must specify \"bins\"'", ")", "if", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ":", "binned", "=", "data", ".", "apply...
Makes a histogram of each column the provided binsize Parameters ---------- data : pandas.DataFrame A samples x features dataframe. Each feature (column) will be binned into the provided bins bins : iterable Bins you would like to use for this data. Must include the final bin value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1). nbins = len(bins) - 1 Returns ------- binned : pandas.DataFrame An nbins x features DataFrame of each column binned across rows
[ "Makes", "a", "histogram", "of", "each", "column", "the", "provided", "binsize" ]
python
train
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/spv.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/spv.py#L699-L729
def verify_header_chain(cls, path, chain=None): """ Verify that a given chain of block headers has sufficient proof of work. """ if chain is None: chain = SPVClient.load_header_chain( path ) prev_header = chain[0] for i in xrange(1, len(chain)): header = chain[i] height = header.get('block_height') prev_hash = prev_header.get('hash') if prev_hash != header.get('prev_block_hash'): log.error("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash'))) return False bits, target = SPVClient.get_target( path, height/BLOCK_DIFFICULTY_CHUNK_SIZE, chain) if bits != header.get('bits'): log.error("bits mismatch: %s vs %s" % (bits, header.get('bits'))) return False _hash = header.get('hash') if int('0x'+_hash, 16) > target: log.error("insufficient proof of work: %s vs target %s" % (int('0x'+_hash, 16), target)) return False prev_header = header return True
[ "def", "verify_header_chain", "(", "cls", ",", "path", ",", "chain", "=", "None", ")", ":", "if", "chain", "is", "None", ":", "chain", "=", "SPVClient", ".", "load_header_chain", "(", "path", ")", "prev_header", "=", "chain", "[", "0", "]", "for", "i",...
Verify that a given chain of block headers has sufficient proof of work.
[ "Verify", "that", "a", "given", "chain", "of", "block", "headers", "has", "sufficient", "proof", "of", "work", "." ]
python
train
IBMStreams/pypi.streamsx
streamsx/scripts/extract.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/scripts/extract.py#L219-L246
def _copy_globalization_resources(self): '''Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings''' rootDir = os.path.join(_topology_tk_dir(), "impl", "nl") languageList = [] for dirName in os.listdir(rootDir): srcDir = os.path.join(_topology_tk_dir(), "impl", "nl", dirName) if (os.path.isdir(srcDir)) and (dirName != "include"): dstDir = os.path.join(self._tk_dir, "impl", "nl", dirName) try: print("Copy globalization resources " + dirName) os.makedirs(dstDir) except OSError as e: if (e.errno == 17) and (os.path.isdir(dstDir)): if self._cmd_args.verbose: print("Directory", dstDir, "exists") else: raise srcFile = os.path.join(srcDir, "TopologySplpyResource.xlf") if os.path.isfile(srcFile): res = shutil.copy2(srcFile, dstDir) languageList.append(dirName) if self._cmd_args.verbose: print("Written: " + res) return languageList
[ "def", "_copy_globalization_resources", "(", "self", ")", ":", "rootDir", "=", "os", ".", "path", ".", "join", "(", "_topology_tk_dir", "(", ")", ",", "\"impl\"", ",", "\"nl\"", ")", "languageList", "=", "[", "]", "for", "dirName", "in", "os", ".", "list...
Copy the language resource files for python api functions This function copies the TopologySplpy Resource files from Topology toolkit directory into the impl/nl folder of the project. Returns: the list with the copied locale strings
[ "Copy", "the", "language", "resource", "files", "for", "python", "api", "functions", "This", "function", "copies", "the", "TopologySplpy", "Resource", "files", "from", "Topology", "toolkit", "directory", "into", "the", "impl", "/", "nl", "folder", "of", "the", ...
python
train
knipknap/SpiffWorkflow
SpiffWorkflow/task.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/task.py#L405-L419
def _is_descendant_of(self, parent): """ Returns True if parent is in the list of ancestors, returns False otherwise. :type parent: Task :param parent: The parent that is searched in the ancestors. :rtype: bool :returns: Whether the parent was found. """ if self.parent is None: return False if self.parent == parent: return True return self.parent._is_descendant_of(parent)
[ "def", "_is_descendant_of", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "parent", "is", "None", ":", "return", "False", "if", "self", ".", "parent", "==", "parent", ":", "return", "True", "return", "self", ".", "parent", ".", "_is_descendant...
Returns True if parent is in the list of ancestors, returns False otherwise. :type parent: Task :param parent: The parent that is searched in the ancestors. :rtype: bool :returns: Whether the parent was found.
[ "Returns", "True", "if", "parent", "is", "in", "the", "list", "of", "ancestors", "returns", "False", "otherwise", "." ]
python
valid
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L382-L394
def from_legacy(cls, filename, add_compute_legacy=True, add_compute_phoebe=True): """Load a bundle from a PHOEBE 1.0 Legacy file. This is a constructor so should be called as: >>> b = Bundle.from_legacy('myfile.phoebe') :parameter str filename: relative or full path to the file :return: instantiated :class:`Bundle` object """ logger.warning("importing from legacy is experimental until official 1.0 release") filename = os.path.expanduser(filename) return io.load_legacy(filename, add_compute_legacy, add_compute_phoebe)
[ "def", "from_legacy", "(", "cls", ",", "filename", ",", "add_compute_legacy", "=", "True", ",", "add_compute_phoebe", "=", "True", ")", ":", "logger", ".", "warning", "(", "\"importing from legacy is experimental until official 1.0 release\"", ")", "filename", "=", "o...
Load a bundle from a PHOEBE 1.0 Legacy file. This is a constructor so should be called as: >>> b = Bundle.from_legacy('myfile.phoebe') :parameter str filename: relative or full path to the file :return: instantiated :class:`Bundle` object
[ "Load", "a", "bundle", "from", "a", "PHOEBE", "1", ".", "0", "Legacy", "file", "." ]
python
train
CellProfiler/centrosome
centrosome/threshold.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/threshold.py#L436-L468
def get_background_threshold(image, mask = None): """Get threshold based on the mode of the image The threshold is calculated by calculating the mode and multiplying by 2 (an arbitrary empirical factor). The user will presumably adjust the multiplication factor as needed.""" cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)==0: return 0 img_min = np.min(cropped_image) img_max = np.max(cropped_image) if img_min == img_max: return cropped_image[0] # Only do the histogram between values a bit removed from saturation robust_min = 0.02 * (img_max - img_min) + img_min robust_max = 0.98 * (img_max - img_min) + img_min nbins = 256 cropped_image = cropped_image[np.logical_and(cropped_image > robust_min, cropped_image < robust_max)] if len(cropped_image) == 0: return robust_min h = scipy.ndimage.histogram(cropped_image, robust_min, robust_max, nbins) index = np.argmax(h) cutoff = float(index) / float(nbins-1) # # If we have a low (or almost no) background, the cutoff will be # zero since the background falls into the lowest bin. We want to # offset by the robust cutoff factor of .02. We rescale by 1.04 # to account for the 0.02 at the top and bottom. # cutoff = (cutoff + 0.02) / 1.04 return img_min + cutoff * 2 * (img_max - img_min)
[ "def", "get_background_threshold", "(", "image", ",", "mask", "=", "None", ")", ":", "cropped_image", "=", "np", ".", "array", "(", "image", ".", "flat", ")", "if", "mask", "is", "None", "else", "image", "[", "mask", "]", "if", "np", ".", "product", ...
Get threshold based on the mode of the image The threshold is calculated by calculating the mode and multiplying by 2 (an arbitrary empirical factor). The user will presumably adjust the multiplication factor as needed.
[ "Get", "threshold", "based", "on", "the", "mode", "of", "the", "image", "The", "threshold", "is", "calculated", "by", "calculating", "the", "mode", "and", "multiplying", "by", "2", "(", "an", "arbitrary", "empirical", "factor", ")", ".", "The", "user", "wi...
python
train
dcos/shakedown
shakedown/dcos/package.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/package.py#L352-L383
def add_package_repo( repo_name, repo_url, index=None, wait_for_package=None, expect_prev_version=None): """ Add a repository to the list of package sources :param repo_name: name of the repository to add :type repo_name: str :param repo_url: location of the repository to add :type repo_url: str :param index: index (precedence) for this repository :type index: int :param wait_for_package: the package whose version should change after the repo is added :type wait_for_package: str, or None :return: True if successful, False otherwise :rtype: bool """ package_manager = _get_package_manager() if wait_for_package: prev_version = package_manager.get_package_version(wait_for_package, None) if not package_manager.add_repo(repo_name, repo_url, index): return False if wait_for_package: try: spinner.time_wait(lambda: package_version_changed_predicate(package_manager, wait_for_package, prev_version)) except TimeoutExpired: return False return True
[ "def", "add_package_repo", "(", "repo_name", ",", "repo_url", ",", "index", "=", "None", ",", "wait_for_package", "=", "None", ",", "expect_prev_version", "=", "None", ")", ":", "package_manager", "=", "_get_package_manager", "(", ")", "if", "wait_for_package", ...
Add a repository to the list of package sources :param repo_name: name of the repository to add :type repo_name: str :param repo_url: location of the repository to add :type repo_url: str :param index: index (precedence) for this repository :type index: int :param wait_for_package: the package whose version should change after the repo is added :type wait_for_package: str, or None :return: True if successful, False otherwise :rtype: bool
[ "Add", "a", "repository", "to", "the", "list", "of", "package", "sources" ]
python
train
nyergler/hieroglyph
src/hieroglyph/directives.py
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/directives.py#L217-L224
def apply(self, builder): """Apply the Slide Configuration to a Builder.""" if 'theme' in self.attributes: builder.apply_theme( self.attributes['theme'], builder.theme_options, )
[ "def", "apply", "(", "self", ",", "builder", ")", ":", "if", "'theme'", "in", "self", ".", "attributes", ":", "builder", ".", "apply_theme", "(", "self", ".", "attributes", "[", "'theme'", "]", ",", "builder", ".", "theme_options", ",", ")" ]
Apply the Slide Configuration to a Builder.
[ "Apply", "the", "Slide", "Configuration", "to", "a", "Builder", "." ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L446-L453
def delete_database(self, name_or_obj): """ Deletes the specified database. If no database by that name exists, no exception will be raised; instead, nothing at all is done. """ name = utils.get_name(name_or_obj) self._database_manager.delete(name)
[ "def", "delete_database", "(", "self", ",", "name_or_obj", ")", ":", "name", "=", "utils", ".", "get_name", "(", "name_or_obj", ")", "self", ".", "_database_manager", ".", "delete", "(", "name", ")" ]
Deletes the specified database. If no database by that name exists, no exception will be raised; instead, nothing at all is done.
[ "Deletes", "the", "specified", "database", ".", "If", "no", "database", "by", "that", "name", "exists", "no", "exception", "will", "be", "raised", ";", "instead", "nothing", "at", "all", "is", "done", "." ]
python
train
estnltk/estnltk
estnltk/converters/gt_conversion.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/converters/gt_conversion.py#L613-L669
def get_original_vs_converted_diff( original ,converted ): ''' Compares the *original* text to *converted* text, and detects changes/differences in morphological annotations. The method constructs line-by-line comparison string, where lines are separated by newline, and '***' at the beginning of the line indicates the difference. Returns a pair: results of the line-by-line comparison as a string, and boolean value indicating whether there were any differences. ''' from estnltk.syntax.syntax_preprocessing import convert_Text_to_mrf old_layer_mrf = convert_Text_to_mrf( original ) new_layer_mrf = convert_Text_to_mrf( converted ) max_len_1 = max([len(l) for l in old_layer_mrf ]) max_len_2 = max([len(l) for l in new_layer_mrf ]) max_len = max( max_len_1, max_len_2 ) format_str = '{:<'+str(max_len+1)+'}' i = 0 j = 0 comp_lines = [] diff_found = False while(i < len(old_layer_mrf) or j < len(new_layer_mrf)): l1 = old_layer_mrf[i] l2 = new_layer_mrf[j] # 1) Output line containing tokens if not l1.startswith(' ') and not l2.startswith(' '): diff = '*** ' if format_str.format(l1) != format_str.format(l2) else ' ' comp_lines.append( diff+format_str.format(l1)+format_str.format(l2) ) if diff == '*** ': diff_found = True i += 1 j += 1 else: # 2) Output analysis line(s) while(i < len(old_layer_mrf) or j < len(new_layer_mrf)): l1 = old_layer_mrf[i] l2 = new_layer_mrf[j] if l1.startswith(' ') and l2.startswith(' '): diff = '*** ' if format_str.format(l1) != format_str.format(l2) else ' ' comp_lines.append( diff+format_str.format(l1)+format_str.format(l2) ) if diff == '*** ': diff_found = True i += 1 j += 1 elif l1.startswith(' ') and not l2.startswith(' '): diff = '*** ' comp_lines.append( diff+format_str.format(l1)+format_str.format(' ') ) diff_found = True i += 1 elif not l1.startswith(' ') and l2.startswith(' '): diff = '*** ' comp_lines.append( diff+format_str.format(' ')+format_str.format(l2) ) diff_found = True j += 1 else: break return '\n'.join( comp_lines ), diff_found
[ "def", "get_original_vs_converted_diff", "(", "original", ",", "converted", ")", ":", "from", "estnltk", ".", "syntax", ".", "syntax_preprocessing", "import", "convert_Text_to_mrf", "old_layer_mrf", "=", "convert_Text_to_mrf", "(", "original", ")", "new_layer_mrf", "=",...
Compares the *original* text to *converted* text, and detects changes/differences in morphological annotations. The method constructs line-by-line comparison string, where lines are separated by newline, and '***' at the beginning of the line indicates the difference. Returns a pair: results of the line-by-line comparison as a string, and boolean value indicating whether there were any differences.
[ "Compares", "the", "*", "original", "*", "text", "to", "*", "converted", "*", "text", "and", "detects", "changes", "/", "differences", "in", "morphological", "annotations", "." ]
python
train
ska-sa/purr
Purr/Plugins/local_pychart/basecanvas.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/basecanvas.py#L155-L167
def _path_polygon(self, points): "Low-level polygon-drawing routine." (xmin, ymin, xmax, ymax) = _compute_bounding_box(points) if invisible_p(xmax, ymax): return self.setbb(xmin, ymin) self.setbb(xmax, ymax) self.newpath() self.moveto(xscale(points[0][0]), yscale(points[0][1])) for point in points[1:]: self.lineto(xscale(point[0]), yscale(point[1])) self.closepath()
[ "def", "_path_polygon", "(", "self", ",", "points", ")", ":", "(", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", ")", "=", "_compute_bounding_box", "(", "points", ")", "if", "invisible_p", "(", "xmax", ",", "ymax", ")", ":", "return", "self", ".", "...
Low-level polygon-drawing routine.
[ "Low", "-", "level", "polygon", "-", "drawing", "routine", "." ]
python
train
Toilal/rebulk
rebulk/rebulk.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/rebulk.py#L307-L319
def _execute_rules(self, matches, context): """ Execute rules for this rebulk and children. :param matches: :type matches: :param context: :type context: :return: :rtype: """ if not self.disabled(context): rules = self.effective_rules(context) rules.execute_all_rules(matches, context)
[ "def", "_execute_rules", "(", "self", ",", "matches", ",", "context", ")", ":", "if", "not", "self", ".", "disabled", "(", "context", ")", ":", "rules", "=", "self", ".", "effective_rules", "(", "context", ")", "rules", ".", "execute_all_rules", "(", "ma...
Execute rules for this rebulk and children. :param matches: :type matches: :param context: :type context: :return: :rtype:
[ "Execute", "rules", "for", "this", "rebulk", "and", "children", ".", ":", "param", "matches", ":", ":", "type", "matches", ":", ":", "param", "context", ":", ":", "type", "context", ":", ":", "return", ":", ":", "rtype", ":" ]
python
train
Loudr/asana-hub
asana_hub/actions/sync.py
https://github.com/Loudr/asana-hub/blob/af996ce890ed23d8ede5bf68dcd318e3438829cb/asana_hub/actions/sync.py#L54-L65
def apply_tasks_to_issue(self, issue, tasks, issue_body=None): """Applies task numbers to an issue.""" issue_body = issue_body or issue.body task_numbers = transport.format_task_numbers_with_links(tasks) if task_numbers: new_body = transport.ASANA_SECTION_RE.sub('', issue_body) new_body = new_body + "\n## Asana Tasks:\n\n%s" % task_numbers transport.issue_edit(issue, body=new_body) return new_body return issue_body
[ "def", "apply_tasks_to_issue", "(", "self", ",", "issue", ",", "tasks", ",", "issue_body", "=", "None", ")", ":", "issue_body", "=", "issue_body", "or", "issue", ".", "body", "task_numbers", "=", "transport", ".", "format_task_numbers_with_links", "(", "tasks", ...
Applies task numbers to an issue.
[ "Applies", "task", "numbers", "to", "an", "issue", "." ]
python
test
loli/medpy
doc/numpydoc/numpydoc/compiler_unparse.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/compiler_unparse.py#L244-L256
def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname)
[ "def", "_From", "(", "self", ",", "t", ")", ":", "# fixme: Are From and ImportFrom handled differently?", "self", ".", "_fill", "(", "\"from \"", ")", "self", ".", "_write", "(", "t", ".", "modname", ")", "self", ".", "_write", "(", "\" import \"", ")", "for...
Handle "from xyz import foo, bar as baz".
[ "Handle", "from", "xyz", "import", "foo", "bar", "as", "baz", "." ]
python
train
radjkarl/imgProcessor
imgProcessor/camera/PerspectiveCorrection.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L281-L372
def correctGrid(self, img, grid): ''' grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) ''' self.img = imread(img) h = self.homography # TODO: cleanup only needed to get newBorder attr. if self.opts['do_correctIntensity']: self.img = self.img / self._getTiltFactor(self.img.shape) s0, s1 = grid.shape[:2] n0, n1 = s0 - 1, s1 - 1 snew = self._newBorders b = self.opts['border'] sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1 out = np.empty(snew[::-1], dtype=self.img.dtype) def warp(ix, iy, objP, outcut): shape = outcut.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, outcut, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return quad objP = np.array([[0, 0], [sx, 0], [sx, sy], [0, sy]], dtype=np.float32) # INNER CELLS for ix in range(1, n0 - 1): for iy in range(1, n1 - 1): sub = out[iy * sy + b: (iy + 1) * sy + b, ix * sx + b: (ix + 1) * sx + b] # warp(ix, iy, objP, sub) shape = sub.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] # print(quad, objP) hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, sub, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) # return out # TOP CELLS objP[:, 1] += b for ix in range(1, n0 - 1): warp(ix, 0, objP, out[: sy + b, ix * sx + b: (ix + 1) * sx + b]) # BOTTOM CELLS objP[:, 1] -= b for ix in range(1, n0 - 1): iy = (n1 - 1) y = iy * sy + b x = ix * sx + b warp(ix, iy, objP, out[y: y + sy + b, x: x + sx]) # LEFT CELLS objP[:, 0] += b for iy in range(1, n1 - 1): y = iy * sy + b warp(0, iy, objP, out[y: y + sy, : sx + b]) # RIGHT CELLS objP[:, 0] -= b ix = (n0 - 1) x = ix * sx + b for iy in range(1, n1 - 1): y = iy * sy + b warp(ix, iy, objP, out[y: y + sy, x: x + sx + b]) # BOTTOM RIGHT CORNER warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b]) # #TOP LEFT CORNER objP += (b, b) warp(0, 0, objP, out[0: sy + b, 0: sx + b]) # TOP RIGHT CORNER objP[:, 0] -= b # x = (n0-1)*sx+b warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b]) # #BOTTOM LEFT CORNER objP += (b, -b) warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b]) return out
[ "def", "correctGrid", "(", "self", ",", "img", ",", "grid", ")", ":", "self", ".", "img", "=", "imread", "(", "img", ")", "h", "=", "self", ".", "homography", "# TODO: cleanup only needed to get newBorder attr.\r", "if", "self", ".", "opts", "[", "'do_correc...
grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,)
[ "grid", "-", ">", "array", "of", "polylines", "=", "((", "p0x", "p0y", ")", "(", "p1x", "p1y", ")", ")" ]
python
train
pauleveritt/kaybee
kaybee/plugins/widgets/handlers.py
https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/widgets/handlers.py#L63-L84
def render_widgets(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str, ): """ Go through docs and replace widget directive with rendering """ builder: StandaloneHTMLBuilder = sphinx_app.builder for node in doctree.traverse(widget): # Render the output w = sphinx_app.env.widgets.get(node.name) context = builder.globalcontext.copy() # Add in certain globals context['resources'] = sphinx_app.env.resources context['references'] = sphinx_app.env.references output = w.render(sphinx_app, context) # Put the output into the node contents listing = [nodes.raw('', output, format='html')] node.replace_self(listing)
[ "def", "render_widgets", "(", "kb_app", ":", "kb", ",", "sphinx_app", ":", "Sphinx", ",", "doctree", ":", "doctree", ",", "fromdocname", ":", "str", ",", ")", ":", "builder", ":", "StandaloneHTMLBuilder", "=", "sphinx_app", ".", "builder", "for", "node", "...
Go through docs and replace widget directive with rendering
[ "Go", "through", "docs", "and", "replace", "widget", "directive", "with", "rendering" ]
python
train
brunobord/tdaemon
tdaemon.py
https://github.com/brunobord/tdaemon/blob/733b5bddb4b12bc3db326a192ce5606f28768307/tdaemon.py#L167-L176
def include(self, path): """Returns `True` if the file is not ignored""" for extension in IGNORE_EXTENSIONS: if path.endswith(extension): return False parts = path.split(os.path.sep) for part in parts: if part in self.ignore_dirs: return False return True
[ "def", "include", "(", "self", ",", "path", ")", ":", "for", "extension", "in", "IGNORE_EXTENSIONS", ":", "if", "path", ".", "endswith", "(", "extension", ")", ":", "return", "False", "parts", "=", "path", ".", "split", "(", "os", ".", "path", ".", "...
Returns `True` if the file is not ignored
[ "Returns", "True", "if", "the", "file", "is", "not", "ignored" ]
python
train
dodger487/dplython
dplython/dplython.py
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L504-L517
def get_join_cols(by_entry): """ helper function used for joins builds left and right join list for join function """ left_cols = [] right_cols = [] for col in by_entry: if isinstance(col, str): left_cols.append(col) right_cols.append(col) else: left_cols.append(col[0]) right_cols.append(col[1]) return left_cols, right_cols
[ "def", "get_join_cols", "(", "by_entry", ")", ":", "left_cols", "=", "[", "]", "right_cols", "=", "[", "]", "for", "col", "in", "by_entry", ":", "if", "isinstance", "(", "col", ",", "str", ")", ":", "left_cols", ".", "append", "(", "col", ")", "right...
helper function used for joins builds left and right join list for join function
[ "helper", "function", "used", "for", "joins", "builds", "left", "and", "right", "join", "list", "for", "join", "function" ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/non_secrets.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/non_secrets.py#L326-L380
async def open_wallet_search(wallet_handle: int, type_: str, query_json: str, options_json: str) -> int: """ Search for wallet records :param wallet_handle: wallet handler (created by open_wallet). :param type_: allows to separate different record types collections :param query_json: MongoDB style query to wallet record tags: { "tagName": "tagValue", $or: { "tagName2": { $regex: 'pattern' }, "tagName3": { $gte: '123' }, }, } :param options_json: //TODO: FIXME: Think about replacing by bitmask { retrieveRecords: (optional, true by default) If false only "counts" will be calculated, retrieveTotalCount: (optional, false by default) Calculate total count, retrieveType: (optional, false by default) Retrieve record type, retrieveValue: (optional, true by default) Retrieve record value, retrieveTags: (optional, true by default) Retrieve record tags, } :return: search_handle: Wallet search handle that can be used later to fetch records by small batches (with fetch_wallet_search_next_records) """ logger = logging.getLogger(__name__) logger.debug("open_wallet_search: >>> wallet_handle: %r, type_: %r, query_json: %r, options_json: %r", wallet_handle, type_, query_json, options_json) if not hasattr(open_wallet_search, "cb"): logger.debug("open_wallet_search: Creating callback") open_wallet_search.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32)) c_wallet_handle = c_int32(wallet_handle) c_type = c_char_p(type_.encode('utf-8')) c_query_json = c_char_p(query_json.encode('utf-8')) c_options_json = c_char_p(options_json.encode('utf-8')) search_handle = await do_call('indy_open_wallet_search', c_wallet_handle, c_type, c_query_json, c_options_json, open_wallet_search.cb) res = search_handle logger.debug("open_wallet_search: <<< res: %r", res) return res
[ "async", "def", "open_wallet_search", "(", "wallet_handle", ":", "int", ",", "type_", ":", "str", ",", "query_json", ":", "str", ",", "options_json", ":", "str", ")", "->", "int", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "lo...
Search for wallet records :param wallet_handle: wallet handler (created by open_wallet). :param type_: allows to separate different record types collections :param query_json: MongoDB style query to wallet record tags: { "tagName": "tagValue", $or: { "tagName2": { $regex: 'pattern' }, "tagName3": { $gte: '123' }, }, } :param options_json: //TODO: FIXME: Think about replacing by bitmask { retrieveRecords: (optional, true by default) If false only "counts" will be calculated, retrieveTotalCount: (optional, false by default) Calculate total count, retrieveType: (optional, false by default) Retrieve record type, retrieveValue: (optional, true by default) Retrieve record value, retrieveTags: (optional, true by default) Retrieve record tags, } :return: search_handle: Wallet search handle that can be used later to fetch records by small batches (with fetch_wallet_search_next_records)
[ "Search", "for", "wallet", "records" ]
python
train
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L1861-L1877
def can_lookup_objective_prerequisites(self): """Tests if this user can perform Objective lookups. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer lookup operations to unauthorized users. return: (boolean) - false if lookup methods are not authorized, true otherwise compliance: mandatory - This method must be implemented. """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveRequisiteHints']['canLookup']
[ "def", "can_lookup_objective_prerequisites", "(", "self", ")", ":", "url_path", "=", "construct_url", "(", "'authorization'", ",", "bank_id", "=", "self", ".", "_catalog_idstr", ")", "return", "self", ".", "_get_request", "(", "url_path", ")", "[", "'objectiveRequ...
Tests if this user can perform Objective lookups. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer lookup operations to unauthorized users. return: (boolean) - false if lookup methods are not authorized, true otherwise compliance: mandatory - This method must be implemented.
[ "Tests", "if", "this", "user", "can", "perform", "Objective", "lookups", "." ]
python
train
pikepdf/pikepdf
src/pikepdf/models/metadata.py
https://github.com/pikepdf/pikepdf/blob/07154f4dec007e2e9c0c6a8c07b964fd06bc5f77/src/pikepdf/models/metadata.py#L127-L148
def decode_pdf_date(s: str) -> datetime: """Decode a pdfmark date to a Python datetime object A pdfmark date is a string in a paritcular format. See the pdfmark Reference for the specification. """ if isinstance(s, String): s = str(s) if s.startswith('D:'): s = s[2:] # Literal Z00'00', is incorrect but found in the wild, # probably made by OS X Quartz -- standardize if s.endswith("Z00'00'"): s = s.replace("Z00'00'", '+0000') elif s.endswith('Z'): s = s.replace('Z', '+0000') s = s.replace("'", "") # Remove apos from PDF time strings try: return datetime.strptime(s, r'%Y%m%d%H%M%S%z') except ValueError: return datetime.strptime(s, r'%Y%m%d%H%M%S')
[ "def", "decode_pdf_date", "(", "s", ":", "str", ")", "->", "datetime", ":", "if", "isinstance", "(", "s", ",", "String", ")", ":", "s", "=", "str", "(", "s", ")", "if", "s", ".", "startswith", "(", "'D:'", ")", ":", "s", "=", "s", "[", "2", "...
Decode a pdfmark date to a Python datetime object A pdfmark date is a string in a paritcular format. See the pdfmark Reference for the specification.
[ "Decode", "a", "pdfmark", "date", "to", "a", "Python", "datetime", "object" ]
python
train
kytos/kytos-utils
kytos/cli/commands/napps/api.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/cli/commands/napps/api.py#L213-L233
def list(cls, args): # pylint: disable=unused-argument """List all installed NApps and inform whether they are enabled.""" mgr = NAppsManager() # Add status napps = [napp + ('[ie]',) for napp in mgr.get_enabled()] napps += [napp + ('[i-]',) for napp in mgr.get_disabled()] # Sort, add description and reorder columns napps.sort() napps_ordered = [] for user, name, status in napps: description = mgr.get_description(user, name) version = mgr.get_version(user, name) napp_id = f'{user}/{name}' if version: napp_id += f':{version}' napps_ordered.append((status, napp_id, description)) cls.print_napps(napps_ordered)
[ "def", "list", "(", "cls", ",", "args", ")", ":", "# pylint: disable=unused-argument", "mgr", "=", "NAppsManager", "(", ")", "# Add status", "napps", "=", "[", "napp", "+", "(", "'[ie]'", ",", ")", "for", "napp", "in", "mgr", ".", "get_enabled", "(", ")"...
List all installed NApps and inform whether they are enabled.
[ "List", "all", "installed", "NApps", "and", "inform", "whether", "they", "are", "enabled", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7116-L7125
def validateDocumentFinal(self, doc): """Does the final step for the document validation once all the incremental validation steps have been completed basically it does the following checks described by the XML Rec Check all the IDREF/IDREFS attributes definition for validity """ if doc is None: doc__o = None else: doc__o = doc._o ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o) return ret
[ "def", "validateDocumentFinal", "(", "self", ",", "doc", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlValidateDocumentFinal", "(", "self", ".", "_o", "...
Does the final step for the document validation once all the incremental validation steps have been completed basically it does the following checks described by the XML Rec Check all the IDREF/IDREFS attributes definition for validity
[ "Does", "the", "final", "step", "for", "the", "document", "validation", "once", "all", "the", "incremental", "validation", "steps", "have", "been", "completed", "basically", "it", "does", "the", "following", "checks", "described", "by", "the", "XML", "Rec", "C...
python
train
boppreh/keyboard
keyboard/_darwinkeyboard.py
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/_darwinkeyboard.py#L187-L199
def vk_to_character(self, vk, modifiers=[]): """ Returns a character corresponding to the specified scan code (with given modifiers applied) """ if vk in self.non_layout_keys: # Not a character return self.non_layout_keys[vk] elif vk in self.layout_specific_keys: if 'shift' in modifiers: return self.layout_specific_keys[vk][1] return self.layout_specific_keys[vk][0] else: # Invalid vk raise ValueError("Invalid scan code: {}".format(vk))
[ "def", "vk_to_character", "(", "self", ",", "vk", ",", "modifiers", "=", "[", "]", ")", ":", "if", "vk", "in", "self", ".", "non_layout_keys", ":", "# Not a character", "return", "self", ".", "non_layout_keys", "[", "vk", "]", "elif", "vk", "in", "self",...
Returns a character corresponding to the specified scan code (with given modifiers applied)
[ "Returns", "a", "character", "corresponding", "to", "the", "specified", "scan", "code", "(", "with", "given", "modifiers", "applied", ")" ]
python
train
23andMe/Yamale
yamale/command_line.py
https://github.com/23andMe/Yamale/blob/0a75b4205624d9bccc52bda03efaf0d58c143c76/yamale/command_line.py#L51-L60
def _find_schema(data_path, schema_name): """ Checks if `schema_name` is a valid file, if not searches in `data_path` for it. """ path = glob.glob(schema_name) for p in path: if os.path.isfile(p): return p return _find_data_path_schema(data_path, schema_name)
[ "def", "_find_schema", "(", "data_path", ",", "schema_name", ")", ":", "path", "=", "glob", ".", "glob", "(", "schema_name", ")", "for", "p", "in", "path", ":", "if", "os", ".", "path", ".", "isfile", "(", "p", ")", ":", "return", "p", "return", "_...
Checks if `schema_name` is a valid file, if not searches in `data_path` for it.
[ "Checks", "if", "schema_name", "is", "a", "valid", "file", "if", "not", "searches", "in", "data_path", "for", "it", "." ]
python
train
google/transitfeed
gtfsscheduleviewer/marey_graph.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/gtfsscheduleviewer/marey_graph.py#L196-L213
def _BuildStations(self, stoplist): """Dispatches the best algorithm for calculating station line position. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X] """ stations = [] dists = self._EuclidianDistances(stoplist) stations = self._CalculateYLines(dists) return stations
[ "def", "_BuildStations", "(", "self", ",", "stoplist", ")", ":", "stations", "=", "[", "]", "dists", "=", "self", ".", "_EuclidianDistances", "(", "stoplist", ")", "stations", "=", "self", ".", "_CalculateYLines", "(", "dists", ")", "return", "stations" ]
Dispatches the best algorithm for calculating station line position. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
[ "Dispatches", "the", "best", "algorithm", "for", "calculating", "station", "line", "position", "." ]
python
train
boto/s3transfer
s3transfer/futures.py
https://github.com/boto/s3transfer/blob/2aead638c8385d8ae0b1756b2de17e8fad45fffa/s3transfer/futures.py#L301-L326
def submit(self, executor, task, tag=None): """Submits a task to a provided executor :type executor: s3transfer.futures.BoundedExecutor :param executor: The executor to submit the callable to :type task: s3transfer.tasks.Task :param task: The task to submit to the executor :type tag: s3transfer.futures.TaskTag :param tag: A tag to associate to the submitted task :rtype: concurrent.futures.Future :returns: A future representing the submitted task """ logger.debug( "Submitting task %s to executor %s for transfer request: %s." % ( task, executor, self.transfer_id) ) future = executor.submit(task, tag=tag) # Add this created future to the list of associated future just # in case it is needed during cleanups. self.add_associated_future(future) future.add_done_callback( FunctionContainer(self.remove_associated_future, future)) return future
[ "def", "submit", "(", "self", ",", "executor", ",", "task", ",", "tag", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Submitting task %s to executor %s for transfer request: %s.\"", "%", "(", "task", ",", "executor", ",", "self", ".", "transfer_id", "...
Submits a task to a provided executor :type executor: s3transfer.futures.BoundedExecutor :param executor: The executor to submit the callable to :type task: s3transfer.tasks.Task :param task: The task to submit to the executor :type tag: s3transfer.futures.TaskTag :param tag: A tag to associate to the submitted task :rtype: concurrent.futures.Future :returns: A future representing the submitted task
[ "Submits", "a", "task", "to", "a", "provided", "executor" ]
python
test
google/grr
grr/server/grr_response_server/gui/api_plugins/vfs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/vfs.py#L117-L167
def InitFromAff4Object(self, aff4_obj, aff4_cls, attr_blacklist): """Initializes the current instance from an Aff4Object. Iterates over all attributes of the Aff4Object defined by a given class and adds a representation of them to the current instance. Args: aff4_obj: An Aff4Object to take the attributes from. aff4_cls: A class in the inheritance hierarchy of the Aff4Object defining which attributes to take. attr_blacklist: A list of already added attributes as to not add attributes multiple times. Returns: A reference to the current instance. """ self.name = str(aff4_cls.__name__) self.attributes = [] schema = aff4_cls.SchemaCls for name, attribute in sorted(iteritems(schema.__dict__)): if not isinstance(attribute, aff4.Attribute): continue if name in attr_blacklist: continue attr_repr = ApiAff4ObjectAttribute() attr_repr.name = name attr_repr.description = attribute.description attr_repr.values = [] values = list(aff4_obj.GetValuesForAttribute(attribute)) for value in values: # This value is really a LazyDecoder() instance. We need to get at the # real data here. # TODO(user): Change GetValuesForAttribute to resolve # lazy decoders and directly yield the rdf value. if hasattr(value, "ToRDFValue"): value = value.ToRDFValue() value_repr = ApiAff4ObjectAttributeValue() value_repr.Set("type", compatibility.GetName(value.__class__)) value_repr.Set("age", value.age) value_repr.value = value attr_repr.values.append(value_repr) if attr_repr.values: self.attributes.append(attr_repr) return self
[ "def", "InitFromAff4Object", "(", "self", ",", "aff4_obj", ",", "aff4_cls", ",", "attr_blacklist", ")", ":", "self", ".", "name", "=", "str", "(", "aff4_cls", ".", "__name__", ")", "self", ".", "attributes", "=", "[", "]", "schema", "=", "aff4_cls", ".",...
Initializes the current instance from an Aff4Object. Iterates over all attributes of the Aff4Object defined by a given class and adds a representation of them to the current instance. Args: aff4_obj: An Aff4Object to take the attributes from. aff4_cls: A class in the inheritance hierarchy of the Aff4Object defining which attributes to take. attr_blacklist: A list of already added attributes as to not add attributes multiple times. Returns: A reference to the current instance.
[ "Initializes", "the", "current", "instance", "from", "an", "Aff4Object", "." ]
python
train
CellProfiler/centrosome
centrosome/cpmorphology.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2063-L2081
def distance2_to_line(pt, l0, l1): '''The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line ''' pt = np.atleast_1d(pt) l0 = np.atleast_1d(l0) l1 = np.atleast_1d(l1) reshape = pt.ndim == 1 if reshape: pt.shape = l0.shape = l1.shape = (1, pt.shape[0]) result = (((l0[:,0] - l1[:,0]) * (l0[:,1] - pt[:,1]) - (l0[:,0] - pt[:,0]) * (l0[:,1] - l1[:,1]))**2 / np.sum((l1-l0)**2, 1)) if reshape: result = result[0] return result
[ "def", "distance2_to_line", "(", "pt", ",", "l0", ",", "l1", ")", ":", "pt", "=", "np", ".", "atleast_1d", "(", "pt", ")", "l0", "=", "np", ".", "atleast_1d", "(", "l0", ")", "l1", "=", "np", ".", "atleast_1d", "(", "l1", ")", "reshape", "=", "...
The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line
[ "The", "perpendicular", "distance", "squared", "from", "a", "point", "to", "a", "line", "pt", "-", "point", "in", "question", "l0", "-", "one", "point", "on", "the", "line", "l1", "-", "another", "point", "on", "the", "line" ]
python
train
junzis/pyModeS
pyModeS/extra/aero.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L85-L109
def distance(lat1, lon1, lat2, lon2, H=0): """ Compute spherical distance from spherical coordinates. For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi') cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi' distance = rho * arc length """ # phi = 90 - latitude phi1 = np.radians(90.0 - lat1) phi2 = np.radians(90.0 - lat2) # theta = longitude theta1 = np.radians(lon1) theta2 = np.radians(lon2) cos = np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2) cos = np.where(cos>1, 1, cos) arc = np.arccos(cos) dist = arc * (r_earth + H) # meters, radius of earth return dist
[ "def", "distance", "(", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", ",", "H", "=", "0", ")", ":", "# phi = 90 - latitude", "phi1", "=", "np", ".", "radians", "(", "90.0", "-", "lat1", ")", "phi2", "=", "np", ".", "radians", "(", "90.0", "-", "...
Compute spherical distance from spherical coordinates. For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi') cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi' distance = rho * arc length
[ "Compute", "spherical", "distance", "from", "spherical", "coordinates", "." ]
python
train
wright-group/WrightTools
WrightTools/data/_data.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L1460-L1529
def rename_variables(self, *, implied=True, verbose=True, **kwargs): """Rename a set of variables. Parameters ---------- kwargs Keyword arguments of the form current:'new'. implied : boolean (optional) Toggle inclusion of other variables that start with the same name. Default is True. verbose : boolean (optional) Toggle talkback. Default is True """ # find all of the implied variables kwargs = collections.OrderedDict(kwargs) if implied: new = collections.OrderedDict() for k, v in kwargs.items(): for n in self.variable_names: if n.startswith(k): new[n] = n.replace(k, v, 1) kwargs = new # ensure that items will remain unique changed = kwargs.keys() for k, v in kwargs.items(): if v not in changed and v in self.keys(): raise wt_exceptions.NameNotUniqueError(v) # compile references to items that are changing new = {} for k, v in kwargs.items(): obj = self[k] index = self.variable_names.index(k) # rename new[v] = obj, index Group._instances.pop(obj.fullpath, None) obj.natural_name = str(v) # remove old references del self[k] # apply new references names = list(self.variable_names) for v, value in new.items(): obj, index = value self[v] = obj names[index] = v self.variable_names = names units = self.units new = list(self.axis_expressions) for i, v in enumerate(kwargs.keys()): for j, n in enumerate(new): new[j] = n.replace(v, "{%i}" % i) for i, n in enumerate(new): new[i] = n.format(*kwargs.values()) self.transform(*new) for a, u in zip(self._axes, units): a.convert(u) units = self.constant_units new = list(self.constant_expressions) for i, v in enumerate(kwargs.keys()): for j, n in enumerate(new): new[j] = n.replace(v, "{%i}" % i) for i, n in enumerate(new): new[i] = n.format(*kwargs.values()) self.set_constants(*new) for c, u in zip(self._constants, units): c.convert(u) # finish if verbose: print("{0} variable(s) renamed:".format(len(kwargs))) for k, v in kwargs.items(): print(" {0} --> {1}".format(k, v))
[ "def", "rename_variables", "(", "self", ",", "*", ",", "implied", "=", "True", ",", "verbose", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# find all of the implied variables", "kwargs", "=", "collections", ".", "OrderedDict", "(", "kwargs", ")", "if", ...
Rename a set of variables. Parameters ---------- kwargs Keyword arguments of the form current:'new'. implied : boolean (optional) Toggle inclusion of other variables that start with the same name. Default is True. verbose : boolean (optional) Toggle talkback. Default is True
[ "Rename", "a", "set", "of", "variables", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/prediction_metrics_manager.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/prediction_metrics_manager.py#L159-L174
def getMetrics(self): """ Gets the current metric values :returns: (dict) where each key is the metric-name, and the values are it scalar value. Same as the output of :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update` """ result = {} for metricObj, label in zip(self.__metrics, self.__metricLabels): value = metricObj.getMetric() result[label] = value['value'] return result
[ "def", "getMetrics", "(", "self", ")", ":", "result", "=", "{", "}", "for", "metricObj", ",", "label", "in", "zip", "(", "self", ".", "__metrics", ",", "self", ".", "__metricLabels", ")", ":", "value", "=", "metricObj", ".", "getMetric", "(", ")", "r...
Gets the current metric values :returns: (dict) where each key is the metric-name, and the values are it scalar value. Same as the output of :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`
[ "Gets", "the", "current", "metric", "values" ]
python
valid
cjdrake/pyeda
pyeda/boolalg/expr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L422-L431
def Xnor(*xs, simplify=True): """Expression exclusive nor (XNOR) operator If *simplify* is ``True``, return a simplified expression. """ xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.xor(*xs)) if simplify: y = y.simplify() return _expr(y)
[ "def", "Xnor", "(", "*", "xs", ",", "simplify", "=", "True", ")", ":", "xs", "=", "[", "Expression", ".", "box", "(", "x", ")", ".", "node", "for", "x", "in", "xs", "]", "y", "=", "exprnode", ".", "not_", "(", "exprnode", ".", "xor", "(", "*"...
Expression exclusive nor (XNOR) operator If *simplify* is ``True``, return a simplified expression.
[ "Expression", "exclusive", "nor", "(", "XNOR", ")", "operator" ]
python
train
h2oai/h2o-3
h2o-py/h2o/backend/cluster.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/backend/cluster.py#L34-L48
def from_kvs(keyvals): """ Create H2OCluster object from a list of key-value pairs. TODO: This method should be moved into the base H2OResponse class. """ obj = H2OCluster() obj._retrieved_at = time.time() for k, v in keyvals: if k in {"__meta", "_exclude_fields", "__schema"}: continue if k in _cloud_v3_valid_keys: obj._props[k] = v else: raise AttributeError("Attribute %s cannot be set on H2OCluster (= %r)" % (k, v)) return obj
[ "def", "from_kvs", "(", "keyvals", ")", ":", "obj", "=", "H2OCluster", "(", ")", "obj", ".", "_retrieved_at", "=", "time", ".", "time", "(", ")", "for", "k", ",", "v", "in", "keyvals", ":", "if", "k", "in", "{", "\"__meta\"", ",", "\"_exclude_fields\...
Create H2OCluster object from a list of key-value pairs. TODO: This method should be moved into the base H2OResponse class.
[ "Create", "H2OCluster", "object", "from", "a", "list", "of", "key", "-", "value", "pairs", "." ]
python
test
fudge-py/fudge
fudge/patcher.py
https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/patcher.py#L328-L341
def restore(self): """Restore the saved value for the attribute of the object.""" if self.proxy_object is None: if self.getter: setattr(self.getter_class, self.attr_name, self.getter) elif self.is_local: setattr(self.orig_object, self.attr_name, self.orig_value) else: # Was not a local, safe to delete: delattr(self.orig_object, self.attr_name) else: setattr(sys.modules[self.orig_object.__module__], self.orig_object.__name__, self.orig_object)
[ "def", "restore", "(", "self", ")", ":", "if", "self", ".", "proxy_object", "is", "None", ":", "if", "self", ".", "getter", ":", "setattr", "(", "self", ".", "getter_class", ",", "self", ".", "attr_name", ",", "self", ".", "getter", ")", "elif", "sel...
Restore the saved value for the attribute of the object.
[ "Restore", "the", "saved", "value", "for", "the", "attribute", "of", "the", "object", "." ]
python
train
jspricke/python-remind
remind.py
https://github.com/jspricke/python-remind/blob/dda2aa8fc20b87b9c9fcbca2b67bce73911d05d1/remind.py#L388-L418
def _gen_msg(vevent, label, tail, sep): """Generate a Remind MSG from the given vevent. Opposite of _gen_description() """ rem = ['MSG'] msg = [] if label: msg.append(label) if hasattr(vevent, 'summary') and vevent.summary.value: msg.append(Remind._rem_clean(vevent.summary.value)) else: msg.append('empty reminder') if hasattr(vevent, 'location') and vevent.location.value: msg.append('at %s' % Remind._rem_clean(vevent.location.value)) has_desc = hasattr(vevent, 'description') and vevent.description.value if tail or has_desc: rem.append('%%"%s%%"' % ' '.join(msg)) else: rem.append(' '.join(msg)) if tail: rem.append(tail) if has_desc: rem[-1] += sep + Remind._rem_clean(vevent.description.value) return ' '.join(rem)
[ "def", "_gen_msg", "(", "vevent", ",", "label", ",", "tail", ",", "sep", ")", ":", "rem", "=", "[", "'MSG'", "]", "msg", "=", "[", "]", "if", "label", ":", "msg", ".", "append", "(", "label", ")", "if", "hasattr", "(", "vevent", ",", "'summary'",...
Generate a Remind MSG from the given vevent. Opposite of _gen_description()
[ "Generate", "a", "Remind", "MSG", "from", "the", "given", "vevent", ".", "Opposite", "of", "_gen_description", "()" ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L932-L935
def p_expression_unot(self, p): 'expression : NOT expression %prec UNOT' p[0] = Unot(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_unot", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Unot", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(...
expression : NOT expression %prec UNOT
[ "expression", ":", "NOT", "expression", "%prec", "UNOT" ]
python
train
GemHQ/round-py
round/applications.py
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/applications.py#L107-L111
def get_wallets(self, fetch=False): """Return this Applications's wallets object, populating it if fetch is True.""" return Wallets( self.resource.wallets, self.client, populate=fetch, application=self)
[ "def", "get_wallets", "(", "self", ",", "fetch", "=", "False", ")", ":", "return", "Wallets", "(", "self", ".", "resource", ".", "wallets", ",", "self", ".", "client", ",", "populate", "=", "fetch", ",", "application", "=", "self", ")" ]
Return this Applications's wallets object, populating it if fetch is True.
[ "Return", "this", "Applications", "s", "wallets", "object", "populating", "it", "if", "fetch", "is", "True", "." ]
python
train
jay-johnson/spylunking
spylunking/splunk_publisher.py
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/splunk_publisher.py#L736-L742
def close( self): """close""" self.debug_log('close - start') self.shutdown() logging.Handler.close(self) self.debug_log('close - done')
[ "def", "close", "(", "self", ")", ":", "self", ".", "debug_log", "(", "'close - start'", ")", "self", ".", "shutdown", "(", ")", "logging", ".", "Handler", ".", "close", "(", "self", ")", "self", ".", "debug_log", "(", "'close - done'", ")" ]
close
[ "close" ]
python
train
kgori/treeCl
treeCl/partition.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/partition.py#L133-L152
def random(cls, alpha, size): """ Generate a random start using expected proportions, alpha. These are used to parameterise a random draw from a Dirichlet distribution. An example, to split a dataset of 20 items into 3 groups of [10, 6, 4] items: - alpha = [10, 6, 4], - alpha = [100, 60, 40], - alpha = [5, 3, 2], would all work. Variance is inversely related to sum(alpha) """ props = np.concatenate([[0], (scipy.stats.dirichlet.rvs(alpha) * size).cumsum().round().astype(int)]) indices = np.array(list(range(size))) random.shuffle(indices) x = [] for i in range(len(props)-1): ix = indices[props[i]:props[i+1]] x.append(ix) return cls.from_membership(x)
[ "def", "random", "(", "cls", ",", "alpha", ",", "size", ")", ":", "props", "=", "np", ".", "concatenate", "(", "[", "[", "0", "]", ",", "(", "scipy", ".", "stats", ".", "dirichlet", ".", "rvs", "(", "alpha", ")", "*", "size", ")", ".", "cumsum"...
Generate a random start using expected proportions, alpha. These are used to parameterise a random draw from a Dirichlet distribution. An example, to split a dataset of 20 items into 3 groups of [10, 6, 4] items: - alpha = [10, 6, 4], - alpha = [100, 60, 40], - alpha = [5, 3, 2], would all work. Variance is inversely related to sum(alpha)
[ "Generate", "a", "random", "start", "using", "expected", "proportions", "alpha", ".", "These", "are", "used", "to", "parameterise", "a", "random", "draw", "from", "a", "Dirichlet", "distribution", ".", "An", "example", "to", "split", "a", "dataset", "of", "2...
python
train
PolyJIT/benchbuild
benchbuild/utils/run.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/run.py#L338-L365
def in_builddir(sub='.'): """ Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into. """ from functools import wraps def wrap_in_builddir(func): """Wrap the function for the new build directory.""" @wraps(func) def wrap_in_builddir_func(self, *args, **kwargs): """The actual function inside the wrapper for the new builddir.""" p = local.path(self.builddir) / sub if not p.exists(): LOG.error("%s does not exist.", p) if p == local.cwd: LOG.debug("CWD already is %s", p) return func(self, *args, *kwargs) with local.cwd(p): return func(self, *args, **kwargs) return wrap_in_builddir_func return wrap_in_builddir
[ "def", "in_builddir", "(", "sub", "=", "'.'", ")", ":", "from", "functools", "import", "wraps", "def", "wrap_in_builddir", "(", "func", ")", ":", "\"\"\"Wrap the function for the new build directory.\"\"\"", "@", "wraps", "(", "func", ")", "def", "wrap_in_builddir_f...
Decorate a project phase with a local working directory change. Args: sub: An optional subdirectory to change into.
[ "Decorate", "a", "project", "phase", "with", "a", "local", "working", "directory", "change", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/opf_task_driver.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/opf_task_driver.py#L428-L441
def advance(self): """ Advances the iteration; Returns: True if more iterations remain; False if this is the final iteration. """ hasMore = True try: self.__iter.next() except StopIteration: self.__iter = None hasMore = False return hasMore
[ "def", "advance", "(", "self", ")", ":", "hasMore", "=", "True", "try", ":", "self", ".", "__iter", ".", "next", "(", ")", "except", "StopIteration", ":", "self", ".", "__iter", "=", "None", "hasMore", "=", "False", "return", "hasMore" ]
Advances the iteration; Returns: True if more iterations remain; False if this is the final iteration.
[ "Advances", "the", "iteration", ";" ]
python
valid
markchil/gptools
gptools/utils.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L2616-L2752
def plot_sampler_cov( sampler, method='corr', weights=None, cutoff_weight=None, labels=None, burn=0, chain_mask=None, temp_idx=0, cbar_label=None, title='', rot_x_labels=False, figsize=None, xlabel_on_top=True ): """Make a plot of the sampler's correlation or covariance matrix. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. method : {'corr', 'cov'} Whether to plot the correlation matrix ('corr') or the covariance matrix ('cov'). The covariance matrix is often not useful because different parameters have wildly different scales. Default is to plot the correlation matrix. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). cbar_label : str, optional The label to use for the colorbar. The default is chosen based on the value of the `method` keyword. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. xlabel_on_top : bool, optional If True, the x-axis labels are put on top (the way mathematicians present matrices). Default is True. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] # Process the samples: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] weights = weights[mask] else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) if labels is None: labels = [''] * k if cbar_label is None: cbar_label = r'$\mathrm{cov}(p_1, p_2)$' if method == 'cov' else r'$\mathrm{corr}(p_1, p_2)$' if weights is None: if method == 'corr': cov = scipy.corrcoef(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, aweights=weights) if method == 'corr': stds = scipy.sqrt(scipy.diag(cov)) STD_1, STD_2 = scipy.meshgrid(stds, stds) cov = cov / (STD_1 * STD_2) f_cov = plt.figure(figsize=figsize) a_cov = f_cov.add_subplot(1, 1, 1) a_cov.set_title(title) if method == 'cov': vmax = scipy.absolute(cov).max() else: vmax = 1.0 cax = a_cov.pcolor(cov, cmap='seismic', vmin=-1 * vmax, vmax=vmax) divider = make_axes_locatable(a_cov) a_cb = divider.append_axes("right", size="10%", pad=0.05) cbar = f_cov.colorbar(cax, cax=a_cb, label=cbar_label) a_cov.set_xlabel('parameter') a_cov.set_ylabel('parameter') a_cov.axis('square') a_cov.invert_yaxis() if xlabel_on_top: a_cov.xaxis.tick_top() a_cov.xaxis.set_label_position('top') a_cov.set_xticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_yticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_xticklabels(labels) if rot_x_labels: plt.setp(a_cov.xaxis.get_majorticklabels(), rotation=90) a_cov.set_yticklabels(labels) a_cov.set_xlim(0, flat_trace.shape[1]) a_cov.set_ylim(flat_trace.shape[1], 0) return f_cov, a_cov
[ "def", "plot_sampler_cov", "(", "sampler", ",", "method", "=", "'corr'", ",", "weights", "=", "None", ",", "cutoff_weight", "=", "None", ",", "labels", "=", "None", ",", "burn", "=", "0", ",", "chain_mask", "=", "None", ",", "temp_idx", "=", "0", ",", ...
Make a plot of the sampler's correlation or covariance matrix. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. method : {'corr', 'cov'} Whether to plot the correlation matrix ('corr') or the covariance matrix ('cov'). The covariance matrix is often not useful because different parameters have wildly different scales. Default is to plot the correlation matrix. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). cbar_label : str, optional The label to use for the colorbar. The default is chosen based on the value of the `method` keyword. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. xlabel_on_top : bool, optional If True, the x-axis labels are put on top (the way mathematicians present matrices). Default is True.
[ "Make", "a", "plot", "of", "the", "sampler", "s", "correlation", "or", "covariance", "matrix", ".", "Returns", "the", "figure", "and", "axis", "created", ".", "Parameters", "----------", "sampler", ":", ":", "py", ":", "class", ":", "emcee", ".", "Sampler"...
python
train
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1584-L1589
def _handle_end_relation(self): """ Handle closing relation element """ self._result.append(Relation(result=self._result, **self._curr)) self._curr = {}
[ "def", "_handle_end_relation", "(", "self", ")", ":", "self", ".", "_result", ".", "append", "(", "Relation", "(", "result", "=", "self", ".", "_result", ",", "*", "*", "self", ".", "_curr", ")", ")", "self", ".", "_curr", "=", "{", "}" ]
Handle closing relation element
[ "Handle", "closing", "relation", "element" ]
python
train
scikit-tda/kepler-mapper
kmapper/jupyter.py
https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/jupyter.py#L11-L61
def display(path_html="mapper_visualization_output.html"): """ Displays a html file inside a Jupyter Notebook output cell. .. note:: Must run ``KeplerMapper.visualize`` first to generate html. This function will then render that output from a file saved to disk. .. note:: Thanks to `smartinsightsfromdata <https://github.com/smartinsightsfromdata>`_ for the `github issue 10 <https://github.com/MLWave/kepler-mapper/issues/10>`_ that suggested this method. Parameters ============ path_html : str Path to html. Use file name for file inside current working directory. Use ``file://`` browser url-format for path to local file. Use ``https://`` urls for externally hosted resources. Examples ========= :: import numpy as np import kmapper as km from kmapper.jupyter import display data = np.random.random((2000, 2)) mapper = km.KeplerMapper() lens = km.project(data) graph = km.map(lens, data) _ = km.visualize(graph, path_html="filename.html") display("filename.html") The default filename is the same default as the ``.visualize`` method, so using both without arguments will show the last constructed graph: >>> _ = km.visualize(graph) >>> display() """ iframe = ( "<iframe src=" + path_html + ' width=100%% height=800 frameBorder="0"></iframe>' ) IPython.core.display.display(IPython.core.display.HTML(iframe))
[ "def", "display", "(", "path_html", "=", "\"mapper_visualization_output.html\"", ")", ":", "iframe", "=", "(", "\"<iframe src=\"", "+", "path_html", "+", "' width=100%% height=800 frameBorder=\"0\"></iframe>'", ")", "IPython", ".", "core", ".", "display", ".", "display"...
Displays a html file inside a Jupyter Notebook output cell. .. note:: Must run ``KeplerMapper.visualize`` first to generate html. This function will then render that output from a file saved to disk. .. note:: Thanks to `smartinsightsfromdata <https://github.com/smartinsightsfromdata>`_ for the `github issue 10 <https://github.com/MLWave/kepler-mapper/issues/10>`_ that suggested this method. Parameters ============ path_html : str Path to html. Use file name for file inside current working directory. Use ``file://`` browser url-format for path to local file. Use ``https://`` urls for externally hosted resources. Examples ========= :: import numpy as np import kmapper as km from kmapper.jupyter import display data = np.random.random((2000, 2)) mapper = km.KeplerMapper() lens = km.project(data) graph = km.map(lens, data) _ = km.visualize(graph, path_html="filename.html") display("filename.html") The default filename is the same default as the ``.visualize`` method, so using both without arguments will show the last constructed graph: >>> _ = km.visualize(graph) >>> display()
[ "Displays", "a", "html", "file", "inside", "a", "Jupyter", "Notebook", "output", "cell", ".", "..", "note", "::", "Must", "run", "KeplerMapper", ".", "visualize", "first", "to", "generate", "html", ".", "This", "function", "will", "then", "render", "that", ...
python
train
bitpay/bitpay-python
bitpay/client.py
https://github.com/bitpay/bitpay-python/blob/3f456118bef1c460adf5d4d5546f38dac1e2a5cc/bitpay/client.py#L85-L94
def verify_invoice_params(self, price, currency): """ Deprecated, will be made private in 2.4 """ if re.match("^[A-Z]{3,3}$", currency) is None: raise BitPayArgumentError("Currency is invalid.") try: float(price) except: raise BitPayArgumentError("Price must be formatted as a float")
[ "def", "verify_invoice_params", "(", "self", ",", "price", ",", "currency", ")", ":", "if", "re", ".", "match", "(", "\"^[A-Z]{3,3}$\"", ",", "currency", ")", "is", "None", ":", "raise", "BitPayArgumentError", "(", "\"Currency is invalid.\"", ")", "try", ":", ...
Deprecated, will be made private in 2.4
[ "Deprecated", "will", "be", "made", "private", "in", "2", ".", "4" ]
python
train
klmitch/aversion
aversion.py
https://github.com/klmitch/aversion/blob/90ca68e7d6426a77db8a926171f8d3bbeb00ee4c/aversion.py#L696-L734
def _proc_ctype_header(self, request, result): """ Process the Content-Type header rules for the request. Only the desired API version can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. """ if result: # Result has already been fully determined return try: ctype = request.headers['content-type'] except KeyError: # No content-type header to examine return # Parse the content type ctype, params = parse_ctype(ctype) # Is it a recognized content type? if ctype not in self.types: return # Get the mapped ctype and version mapped_ctype, mapped_version = self.types[ctype](params) # Update the content type header and set the version if mapped_ctype: request.environ['aversion.request_type'] = mapped_ctype request.environ['aversion.orig_request_type'] = ctype request.environ['aversion.content_type'] = \ request.headers['content-type'] if self.overwrite_headers: request.headers['content-type'] = mapped_ctype if mapped_version: result.set_version(mapped_version)
[ "def", "_proc_ctype_header", "(", "self", ",", "request", ",", "result", ")", ":", "if", "result", ":", "# Result has already been fully determined", "return", "try", ":", "ctype", "=", "request", ".", "headers", "[", "'content-type'", "]", "except", "KeyError", ...
Process the Content-Type header rules for the request. Only the desired API version can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in.
[ "Process", "the", "Content", "-", "Type", "header", "rules", "for", "the", "request", ".", "Only", "the", "desired", "API", "version", "can", "be", "determined", "from", "those", "rules", "." ]
python
train
SBRG/ssbio
ssbio/utils.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L889-L926
def remap( x, oMin, oMax, nMin, nMax ): """Map to a 0 to 1 scale http://stackoverflow.com/questions/929103/convert-a-number-range-to-another-range-maintaining-ratio """ #range check if oMin == oMax: log.warning("Zero input range, unable to rescale") return x if nMin == nMax: log.warning("Zero output range, unable to rescale") return x #check reversed input range reverseInput = False oldMin = min( oMin, oMax ) oldMax = max( oMin, oMax ) if not oldMin == oMin: reverseInput = True #check reversed output range reverseOutput = False newMin = min( nMin, nMax ) newMax = max( nMin, nMax ) if not newMin == nMin : reverseOutput = True portion = (x-oldMin)*(newMax-newMin)/(oldMax-oldMin) if reverseInput: portion = (oldMax-x)*(newMax-newMin)/(oldMax-oldMin) result = portion + newMin if reverseOutput: result = newMax - portion return result
[ "def", "remap", "(", "x", ",", "oMin", ",", "oMax", ",", "nMin", ",", "nMax", ")", ":", "#range check", "if", "oMin", "==", "oMax", ":", "log", ".", "warning", "(", "\"Zero input range, unable to rescale\"", ")", "return", "x", "if", "nMin", "==", "nMax"...
Map to a 0 to 1 scale http://stackoverflow.com/questions/929103/convert-a-number-range-to-another-range-maintaining-ratio
[ "Map", "to", "a", "0", "to", "1", "scale", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "929103", "/", "convert", "-", "a", "-", "number", "-", "range", "-", "to", "-", "another", "-", "range", "-", "maintaining", "-", "ra...
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L28530-L28540
def on_cpu_execution_cap_change(self, execution_cap): """Notification when the CPU execution cap changes. in execution_cap of type int The new CPU execution cap value. (1-100) """ if not isinstance(execution_cap, baseinteger): raise TypeError("execution_cap can only be an instance of type baseinteger") self._call("onCPUExecutionCapChange", in_p=[execution_cap])
[ "def", "on_cpu_execution_cap_change", "(", "self", ",", "execution_cap", ")", ":", "if", "not", "isinstance", "(", "execution_cap", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"execution_cap can only be an instance of type baseinteger\"", ")", "self", "."...
Notification when the CPU execution cap changes. in execution_cap of type int The new CPU execution cap value. (1-100)
[ "Notification", "when", "the", "CPU", "execution", "cap", "changes", "." ]
python
train