repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3157-L3173
def reshape_by_blocks(x, x_shape, memory_block_size): """Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth]. """ x = tf.reshape(x, [ x_shape[0], x_shape[1], x_shape[2] // memory_block_size, memory_block_size, x_shape[3] ]) return x
[ "def", "reshape_by_blocks", "(", "x", ",", "x_shape", ",", "memory_block_size", ")", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]", "//", "memory_bloc...
Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth].
[ "Reshapes", "input", "by", "splitting", "its", "length", "over", "blocks", "of", "memory_block_size", "." ]
python
train
senaite/senaite.core
bika/lims/browser/fields/proxyfield.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/fields/proxyfield.py#L68-L94
def get(self, instance, **kwargs): """retrieves the value of the same named field on the proxy object """ # The default value default = self.getDefault(instance) # Retrieve the proxy object proxy_object = self.get_proxy(instance) # Return None if we could not find a proxied object, e.g. through # the proxy expression 'context.getSample()' on an AR if proxy_object is None: logger.debug("Expression '{}' did not return a valid Proxy Object on {}" .format(self.proxy, instance)) return default # Lookup the proxied field by name field_name = self.getName() field = proxy_object.getField(field_name) # Bail out if the proxy object has no identical named field if field is None: raise KeyError("Object '{}' with id '{}' has no field named '{}'".format( proxy_object.portal_type, proxy_object.getId(), field_name)) # return the value of the proxy field return field.get(proxy_object)
[ "def", "get", "(", "self", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "# The default value", "default", "=", "self", ".", "getDefault", "(", "instance", ")", "# Retrieve the proxy object", "proxy_object", "=", "self", ".", "get_proxy", "(", "instance"...
retrieves the value of the same named field on the proxy object
[ "retrieves", "the", "value", "of", "the", "same", "named", "field", "on", "the", "proxy", "object" ]
python
train
jspricke/python-icstask
icstask.py
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L298-L305
def append_vobject(self, vtodo, project=None): """Add a task from vObject to Taskwarrior vtodo -- the iCalendar to add project -- the project to add (see get_filesnames() as well) """ if project: project = basename(project) return self.to_task(vtodo.vtodo, project)
[ "def", "append_vobject", "(", "self", ",", "vtodo", ",", "project", "=", "None", ")", ":", "if", "project", ":", "project", "=", "basename", "(", "project", ")", "return", "self", ".", "to_task", "(", "vtodo", ".", "vtodo", ",", "project", ")" ]
Add a task from vObject to Taskwarrior vtodo -- the iCalendar to add project -- the project to add (see get_filesnames() as well)
[ "Add", "a", "task", "from", "vObject", "to", "Taskwarrior", "vtodo", "--", "the", "iCalendar", "to", "add", "project", "--", "the", "project", "to", "add", "(", "see", "get_filesnames", "()", "as", "well", ")" ]
python
train
rosenbrockc/ci
pyci/scripts/ci.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/scripts/ci.py#L404-L430
def _list_repos(): """Lists all the installed repos as well as their last start and finish times from the cron's perspective. """ if not args["list"]: return #Just loop over the list of repos we have in a server instance. See if #they also exist in the db's status; if they do, include the start/end #times we have saved. from pyci.server import Server server = Server(testmode=args["nolive"]) output = ["Repository | Started | Finished | XML File Path", "--------------------------------------------------------------------------"] dbs = {} if "status" not in db else db["status"] fullfmt = "{0:<20} | {1:^16} | {2:^16} | {3}" for reponame, repo in server.repositories.items(): if reponame in dbs: start = _fmt_time(dbs[reponame]["start"]) end = _fmt_time(dbs[reponame]["end"]) else: start = "Never" end = "Never" output.append(fullfmt.format(reponame, start, end, repo.filepath)) info('\n'.join(output))
[ "def", "_list_repos", "(", ")", ":", "if", "not", "args", "[", "\"list\"", "]", ":", "return", "#Just loop over the list of repos we have in a server instance. See if", "#they also exist in the db's status; if they do, include the start/end", "#times we have saved.", "from", "pyci"...
Lists all the installed repos as well as their last start and finish times from the cron's perspective.
[ "Lists", "all", "the", "installed", "repos", "as", "well", "as", "their", "last", "start", "and", "finish", "times", "from", "the", "cron", "s", "perspective", "." ]
python
train
gtaylor/EVE-Market-Data-Structures
emds/formats/unified/__init__.py
https://github.com/gtaylor/EVE-Market-Data-Structures/blob/77d69b24f2aada3aeff8fba3d75891bfba8fdcf3/emds/formats/unified/__init__.py#L45-L60
def encode_to_json(order_or_history): """ Given an order or history entry, encode it to JSON and return. :type order_or_history: MarketOrderList or MarketHistoryList :param order_or_history: A MarketOrderList or MarketHistoryList instance to encode to JSON. :rtype: str :return: The encoded JSON string. """ if isinstance(order_or_history, MarketOrderList): return orders.encode_to_json(order_or_history) elif isinstance(order_or_history, MarketHistoryList): return history.encode_to_json(order_or_history) else: raise Exception("Must be one of MarketOrderList or MarketHistoryList.")
[ "def", "encode_to_json", "(", "order_or_history", ")", ":", "if", "isinstance", "(", "order_or_history", ",", "MarketOrderList", ")", ":", "return", "orders", ".", "encode_to_json", "(", "order_or_history", ")", "elif", "isinstance", "(", "order_or_history", ",", ...
Given an order or history entry, encode it to JSON and return. :type order_or_history: MarketOrderList or MarketHistoryList :param order_or_history: A MarketOrderList or MarketHistoryList instance to encode to JSON. :rtype: str :return: The encoded JSON string.
[ "Given", "an", "order", "or", "history", "entry", "encode", "it", "to", "JSON", "and", "return", "." ]
python
train
mapbox/mapbox-cli-py
mapboxcli/scripts/uploads.py
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/uploads.py#L17-L76
def upload(ctx, tileset, datasource, name, patch): """Upload data to Mapbox accounts. Uploaded data lands at https://www.mapbox.com/data/ and can be used in new or existing projects. All endpoints require authentication. You can specify the tileset id and input file $ mapbox upload username.data mydata.geojson Or specify just the tileset id and take an input file on stdin $ cat mydata.geojson | mapbox upload username.data The --name option defines the title as it appears in Studio and defaults to the last part of the tileset id, e.g. "data" Note that the tileset must start with your username. An access token with upload scope is required, see `mapbox --help`. Your account must be flagged in order to use the patch mode feature. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None service = mapbox.Uploader(access_token=access_token) if name is None: name = tileset.split(".")[-1] if datasource.startswith('https://'): # Skip staging. Note this this only works for specific buckets. res = service.create(datasource, tileset, name=name, patch=patch) else: sourcefile = click.File('rb')(datasource) if hasattr(sourcefile, 'name'): filelen = ( 1 if sourcefile.name == '<stdin>' else os.stat(sourcefile.name).st_size) else: filelen = (len(sourcefile.getbuffer()) if hasattr(sourcefile, 'getbuffer') else 1) with click.progressbar(length=filelen, label='Uploading data source', fill_char="#", empty_char='-', file=sys.stderr) as bar: def callback(num_bytes): """Update the progress bar""" bar.update(num_bytes) res = service.upload(sourcefile, tileset, name, patch=patch, callback=callback) if res.status_code == 201: click.echo(res.text) else: raise MapboxCLIException(res.text.strip())
[ "def", "upload", "(", "ctx", ",", "tileset", ",", "datasource", ",", "name", ",", "patch", ")", ":", "access_token", "=", "(", "ctx", ".", "obj", "and", "ctx", ".", "obj", ".", "get", "(", "'access_token'", ")", ")", "or", "None", "service", "=", "...
Upload data to Mapbox accounts. Uploaded data lands at https://www.mapbox.com/data/ and can be used in new or existing projects. All endpoints require authentication. You can specify the tileset id and input file $ mapbox upload username.data mydata.geojson Or specify just the tileset id and take an input file on stdin $ cat mydata.geojson | mapbox upload username.data The --name option defines the title as it appears in Studio and defaults to the last part of the tileset id, e.g. "data" Note that the tileset must start with your username. An access token with upload scope is required, see `mapbox --help`. Your account must be flagged in order to use the patch mode feature.
[ "Upload", "data", "to", "Mapbox", "accounts", "." ]
python
train
radjkarl/fancyTools
fancytools/math/line.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L22-L45
def cutToFitIntoPolygon(line, polygon): """ cut line so it fits into polygon polygon = (( x0,y0), (x1,y1) ,...) """ p0_inside = pointInsidePolygon(line[0], line[1], polygon) p1_inside = pointInsidePolygon(line[2], line[3], polygon) if not p0_inside or not p1_inside: for (i0, j0), (i1, j1) in zip(polygon[:-1], polygon[1:]): isec = segmentIntersection(line, (i0, j0, i1, j1)) if isec is not None: if not p0_inside: line = (isec[0], isec[1], line[2], line[3]) p0_inside = True elif not p1_inside: line = (line[0], line[1], isec[0], isec[1]) p1_inside = True if p0_inside and p1_inside: break return line
[ "def", "cutToFitIntoPolygon", "(", "line", ",", "polygon", ")", ":", "p0_inside", "=", "pointInsidePolygon", "(", "line", "[", "0", "]", ",", "line", "[", "1", "]", ",", "polygon", ")", "p1_inside", "=", "pointInsidePolygon", "(", "line", "[", "2", "]", ...
cut line so it fits into polygon polygon = (( x0,y0), (x1,y1) ,...)
[ "cut", "line", "so", "it", "fits", "into", "polygon", "polygon", "=", "((", "x0", "y0", ")", "(", "x1", "y1", ")", "...", ")" ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/parse.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L202-L217
def _check_for_legal_children(self, name, elt, mustqualify=1): '''Check if all children of this node are elements or whitespace-only text nodes. ''' inheader = name == "Header" for n in _children(elt): t = n.nodeType if t == _Node.COMMENT_NODE: continue if t != _Node.ELEMENT_NODE: if t == _Node.TEXT_NODE and n.nodeValue.strip() == "": continue raise ParseException("Non-element child in " + name, inheader, elt, self.dom) if mustqualify and not n.namespaceURI: raise ParseException('Unqualified element "' + \ n.nodeName + '" in ' + name, inheader, elt, self.dom)
[ "def", "_check_for_legal_children", "(", "self", ",", "name", ",", "elt", ",", "mustqualify", "=", "1", ")", ":", "inheader", "=", "name", "==", "\"Header\"", "for", "n", "in", "_children", "(", "elt", ")", ":", "t", "=", "n", ".", "nodeType", "if", ...
Check if all children of this node are elements or whitespace-only text nodes.
[ "Check", "if", "all", "children", "of", "this", "node", "are", "elements", "or", "whitespace", "-", "only", "text", "nodes", "." ]
python
train
galaxy-genome-annotation/python-apollo
arrow/cli.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/cli.py#L150-L161
def json_loads(data): """Load json data, allowing - to represent stdin.""" if data is None: return "" if data == "-": return json.load(sys.stdin) elif os.path.exists(data): with open(data, 'r') as handle: return json.load(handle) else: return json.loads(data)
[ "def", "json_loads", "(", "data", ")", ":", "if", "data", "is", "None", ":", "return", "\"\"", "if", "data", "==", "\"-\"", ":", "return", "json", ".", "load", "(", "sys", ".", "stdin", ")", "elif", "os", ".", "path", ".", "exists", "(", "data", ...
Load json data, allowing - to represent stdin.
[ "Load", "json", "data", "allowing", "-", "to", "represent", "stdin", "." ]
python
train
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L133-L142
def set_tracking_id(self, data, clear=False): """ Add tracking-id data to the tracking_id group. :param data: A dictionary of key/value pairs. Keys must be strings. Values can be strings or numeric values. :param clear: If set, any existing tracking-id data will be removed. """ self.assert_writeable() self._add_attributes(self.global_key + 'tracking_id', data, clear) return
[ "def", "set_tracking_id", "(", "self", ",", "data", ",", "clear", "=", "False", ")", ":", "self", ".", "assert_writeable", "(", ")", "self", ".", "_add_attributes", "(", "self", ".", "global_key", "+", "'tracking_id'", ",", "data", ",", "clear", ")", "re...
Add tracking-id data to the tracking_id group. :param data: A dictionary of key/value pairs. Keys must be strings. Values can be strings or numeric values. :param clear: If set, any existing tracking-id data will be removed.
[ "Add", "tracking", "-", "id", "data", "to", "the", "tracking_id", "group", ".", ":", "param", "data", ":", "A", "dictionary", "of", "key", "/", "value", "pairs", ".", "Keys", "must", "be", "strings", ".", "Values", "can", "be", "strings", "or", "numeri...
python
train
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L59-L69
def _load_scratch_orgs(self): """ Creates all scratch org configs for the project in the keychain if a keychain org doesn't already exist """ current_orgs = self.list_orgs() if not self.project_config.orgs__scratch: return for config_name in self.project_config.orgs__scratch.keys(): if config_name in current_orgs: # Don't overwrite an existing keychain org continue self.create_scratch_org(config_name, config_name)
[ "def", "_load_scratch_orgs", "(", "self", ")", ":", "current_orgs", "=", "self", ".", "list_orgs", "(", ")", "if", "not", "self", ".", "project_config", ".", "orgs__scratch", ":", "return", "for", "config_name", "in", "self", ".", "project_config", ".", "org...
Creates all scratch org configs for the project in the keychain if a keychain org doesn't already exist
[ "Creates", "all", "scratch", "org", "configs", "for", "the", "project", "in", "the", "keychain", "if", "a", "keychain", "org", "doesn", "t", "already", "exist" ]
python
train
mwickert/scikit-dsp-comm
sk_dsp_comm/sigsys.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L657-L684
def lp_tri(f, fb): """ Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb) """ x = np.zeros(len(f)) for k in range(len(f)): if abs(f[k]) <= fb: x[k] = 1 - abs(f[k])/float(fb) return x
[ "def", "lp_tri", "(", "f", ",", "fb", ")", ":", "x", "=", "np", ".", "zeros", "(", "len", "(", "f", ")", ")", "for", "k", "in", "range", "(", "len", "(", "f", ")", ")", ":", "if", "abs", "(", "f", "[", "k", "]", ")", "<=", "fb", ":", ...
Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb)
[ "Triangle", "spectral", "shape", "function", "used", "by", ":", "func", ":", "lp_samp", "." ]
python
valid
ryukinix/decorating
decorating/animation.py
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L235-L246
def start(self, autopush=True): """Start a new animation instance""" if self.enabled: if autopush: self.push_message(self.message) self.spinner.message = ' - '.join(self.animation.messages) if not self.spinner.running: self.animation.thread = threading.Thread(target=_spinner, args=(self.spinner,)) self.spinner.running = True self.animation.thread.start() sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
[ "def", "start", "(", "self", ",", "autopush", "=", "True", ")", ":", "if", "self", ".", "enabled", ":", "if", "autopush", ":", "self", ".", "push_message", "(", "self", ".", "message", ")", "self", ".", "spinner", ".", "message", "=", "' - '", ".", ...
Start a new animation instance
[ "Start", "a", "new", "animation", "instance" ]
python
train
leancloud/python-sdk
leancloud/relation.py
https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/relation.py#L45-L56
def add(self, *obj_or_objs): """ 添加一个新的 leancloud.Object 至 Relation。 :param obj_or_objs: 需要添加的对象或对象列表 """ objs = obj_or_objs if not isinstance(obj_or_objs, (list, tuple)): objs = (obj_or_objs, ) change = operation.Relation(objs, ()) self.parent.set(self.key, change) self.target_class_name = change._target_class_name
[ "def", "add", "(", "self", ",", "*", "obj_or_objs", ")", ":", "objs", "=", "obj_or_objs", "if", "not", "isinstance", "(", "obj_or_objs", ",", "(", "list", ",", "tuple", ")", ")", ":", "objs", "=", "(", "obj_or_objs", ",", ")", "change", "=", "operati...
添加一个新的 leancloud.Object 至 Relation。 :param obj_or_objs: 需要添加的对象或对象列表
[ "添加一个新的", "leancloud", ".", "Object", "至", "Relation。" ]
python
train
venmo/business-rules
business_rules/engine.py
https://github.com/venmo/business-rules/blob/6c79036c030e2c6b8de5524a95231fd30048defa/business_rules/engine.py#L48-L55
def check_condition(condition, defined_variables): """ Checks a single rule condition - the condition will be made up of variables, values, and the comparison operator. The defined_variables object must have a variable defined for any variables in this condition. """ name, op, value = condition['name'], condition['operator'], condition['value'] operator_type = _get_variable_value(defined_variables, name) return _do_operator_comparison(operator_type, op, value)
[ "def", "check_condition", "(", "condition", ",", "defined_variables", ")", ":", "name", ",", "op", ",", "value", "=", "condition", "[", "'name'", "]", ",", "condition", "[", "'operator'", "]", ",", "condition", "[", "'value'", "]", "operator_type", "=", "_...
Checks a single rule condition - the condition will be made up of variables, values, and the comparison operator. The defined_variables object must have a variable defined for any variables in this condition.
[ "Checks", "a", "single", "rule", "condition", "-", "the", "condition", "will", "be", "made", "up", "of", "variables", "values", "and", "the", "comparison", "operator", ".", "The", "defined_variables", "object", "must", "have", "a", "variable", "defined", "for"...
python
train
gaqzi/django-emoji
emoji/models.py
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L125-L136
def replace(cls, replacement_string): """Add in valid emojis in a string where a valid emoji is between ::""" e = cls() def _replace_emoji(match): val = match.group(1) if val in e: return e._image_string(match.group(1)) else: return match.group(0) return e._pattern.sub(_replace_emoji, replacement_string)
[ "def", "replace", "(", "cls", ",", "replacement_string", ")", ":", "e", "=", "cls", "(", ")", "def", "_replace_emoji", "(", "match", ")", ":", "val", "=", "match", ".", "group", "(", "1", ")", "if", "val", "in", "e", ":", "return", "e", ".", "_im...
Add in valid emojis in a string where a valid emoji is between ::
[ "Add", "in", "valid", "emojis", "in", "a", "string", "where", "a", "valid", "emoji", "is", "between", "::" ]
python
train
i3visio/osrframework
osrframework/api/twitter_api.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/api/twitter_api.py#L641-L667
def get_followers(self, query): """ Method to get the followers of a user. :param query: Query to be performed. :return: List of ids. """ # Connecting to the API api = self._connectToAPI() # Verifying the limits of the API self._rate_limit_status(api=api, mode="get_followers") # Making the call to the API try: friends_ids = api.followers_ids(query) except: return [] """res = [] # Extracting the information from each profile for a in aux: us= self.getUser(a) res.append(self._processUser(us))""" return friends_ids
[ "def", "get_followers", "(", "self", ",", "query", ")", ":", "# Connecting to the API", "api", "=", "self", ".", "_connectToAPI", "(", ")", "# Verifying the limits of the API", "self", ".", "_rate_limit_status", "(", "api", "=", "api", ",", "mode", "=", "\"get_f...
Method to get the followers of a user. :param query: Query to be performed. :return: List of ids.
[ "Method", "to", "get", "the", "followers", "of", "a", "user", "." ]
python
train
MultipedRobotics/pyxl320
pyxl320/Packet.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L156-L163
def makeWritePacket(ID, reg, values=None): """ Creates a packet that writes a value(s) to servo ID at location reg. Make sure the values are in little endian (use Packet.le() if necessary) for 16 b (word size) values. """ pkt = makePacket(ID, xl320.XL320_WRITE, reg, values) return pkt
[ "def", "makeWritePacket", "(", "ID", ",", "reg", ",", "values", "=", "None", ")", ":", "pkt", "=", "makePacket", "(", "ID", ",", "xl320", ".", "XL320_WRITE", ",", "reg", ",", "values", ")", "return", "pkt" ]
Creates a packet that writes a value(s) to servo ID at location reg. Make sure the values are in little endian (use Packet.le() if necessary) for 16 b (word size) values.
[ "Creates", "a", "packet", "that", "writes", "a", "value", "(", "s", ")", "to", "servo", "ID", "at", "location", "reg", ".", "Make", "sure", "the", "values", "are", "in", "little", "endian", "(", "use", "Packet", ".", "le", "()", "if", "necessary", ")...
python
train
exosite-labs/pyonep
pyonep/provision.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/provision.py#L263-L278
def content_upload(self, key, model, contentid, data, mimetype): """Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later """ headers = {"Content-Type": mimetype} path = PROVISION_MANAGE_CONTENT + model + '/' + contentid return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
[ "def", "content_upload", "(", "self", ",", "key", ",", "model", ",", "contentid", ",", "data", ",", "mimetype", ")", ":", "headers", "=", "{", "\"Content-Type\"", ":", "mimetype", "}", "path", "=", "PROVISION_MANAGE_CONTENT", "+", "model", "+", "'/'", "+",...
Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later
[ "Store", "the", "given", "data", "as", "a", "result", "of", "a", "query", "for", "content", "id", "given", "the", "model", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L927-L938
def add_semantic_data(self, path_as_list, value, key): """ Adds a semantic data entry. :param list path_as_list: The path in the vividict to enter the value :param value: The value of the new entry. :param key: The key of the new entry. :return: """ assert isinstance(key, string_types) target_dict = self.get_semantic_data(path_as_list) target_dict[key] = value return path_as_list + [key]
[ "def", "add_semantic_data", "(", "self", ",", "path_as_list", ",", "value", ",", "key", ")", ":", "assert", "isinstance", "(", "key", ",", "string_types", ")", "target_dict", "=", "self", ".", "get_semantic_data", "(", "path_as_list", ")", "target_dict", "[", ...
Adds a semantic data entry. :param list path_as_list: The path in the vividict to enter the value :param value: The value of the new entry. :param key: The key of the new entry. :return:
[ "Adds", "a", "semantic", "data", "entry", "." ]
python
train
zyga/json-schema-validator
json_schema_validator/schema.py
https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L56-L98
def type(self): """ Type of a valid object. Type may be a JSON type name or a list of such names. Valid JSON type names are ``string``, ``number``, ``integer``, ``boolean``, ``object``, ``array``, ``any`` (default). """ value = self._schema.get("type", "any") if not isinstance(value, (basestring, dict, list)): raise SchemaError( "type value {0!r} is not a simple type name, nested " "schema nor a list of those".format(value)) if isinstance(value, list): type_list = value # Union types have to have at least two alternatives if len(type_list) < 2: raise SchemaError( "union type {0!r} is too short".format(value)) else: type_list = [value] seen = set() for js_type in type_list: if isinstance(js_type, dict): # no nested validation here pass elif isinstance(js_type, list): # no nested validation here pass else: if js_type in seen: raise SchemaError( ("type value {0!r} contains duplicate element" " {1!r}").format(value, js_type)) else: seen.add(js_type) if js_type not in ( "string", "number", "integer", "boolean", "object", "array", "null", "any"): raise SchemaError( "type value {0!r} is not a simple type " "name".format(js_type)) return value
[ "def", "type", "(", "self", ")", ":", "value", "=", "self", ".", "_schema", ".", "get", "(", "\"type\"", ",", "\"any\"", ")", "if", "not", "isinstance", "(", "value", ",", "(", "basestring", ",", "dict", ",", "list", ")", ")", ":", "raise", "Schema...
Type of a valid object. Type may be a JSON type name or a list of such names. Valid JSON type names are ``string``, ``number``, ``integer``, ``boolean``, ``object``, ``array``, ``any`` (default).
[ "Type", "of", "a", "valid", "object", "." ]
python
train
hobson/pug-dj
pug/dj/crawler/models.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/models.py#L81-L83
def import_wiki_json(path='wikipedia_crawler_data.json', model=WikiItem, batch_len=100, db_alias='default', verbosity=2): """Read json file and create the appropriate records according to the given database model.""" return djdb.import_json(path=path, model=model, batch_len=batch_len, db_alias=db_alias, verbosity=verbosity)
[ "def", "import_wiki_json", "(", "path", "=", "'wikipedia_crawler_data.json'", ",", "model", "=", "WikiItem", ",", "batch_len", "=", "100", ",", "db_alias", "=", "'default'", ",", "verbosity", "=", "2", ")", ":", "return", "djdb", ".", "import_json", "(", "pa...
Read json file and create the appropriate records according to the given database model.
[ "Read", "json", "file", "and", "create", "the", "appropriate", "records", "according", "to", "the", "given", "database", "model", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rc.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rc.py#L31-L40
def send_rc_override(self): '''send RC override packet''' if self.sitl_output: buf = struct.pack('<HHHHHHHH', *self.override) self.sitl_output.write(buf) else: self.master.mav.rc_channels_override_send(self.target_system, self.target_component, *self.override)
[ "def", "send_rc_override", "(", "self", ")", ":", "if", "self", ".", "sitl_output", ":", "buf", "=", "struct", ".", "pack", "(", "'<HHHHHHHH'", ",", "*", "self", ".", "override", ")", "self", ".", "sitl_output", ".", "write", "(", "buf", ")", "else", ...
send RC override packet
[ "send", "RC", "override", "packet" ]
python
train
mikedh/trimesh
trimesh/exchange/load.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/load.py#L167-L228
def load_mesh(file_obj, file_type=None, resolver=None, **kwargs): """ Load a mesh file into a Trimesh object Parameters ----------- file_obj : str or file object File name or file with mesh data file_type : str or None Which file type, e.g. 'stl' kwargs : dict Passed to Trimesh constructor Returns ---------- mesh : trimesh.Trimesh or trimesh.Scene Loaded geometry data """ # parse the file arguments into clean loadable form (file_obj, # file- like object file_type, # str, what kind of file metadata, # dict, any metadata from file name opened, # bool, did we open the file ourselves resolver # object to load referenced resources ) = parse_file_args(file_obj=file_obj, file_type=file_type, resolver=resolver) try: # make sure we keep passed kwargs to loader # but also make sure loader keys override passed keys results = mesh_loaders[file_type](file_obj, file_type=file_type, resolver=resolver, **kwargs) if util.is_file(file_obj): file_obj.close() log.debug('loaded mesh using %s', mesh_loaders[file_type].__name__) if not isinstance(results, list): results = [results] loaded = [] for result in results: kwargs.update(result) loaded.append(load_kwargs(kwargs)) loaded[-1].metadata.update(metadata) if len(loaded) == 1: loaded = loaded[0] finally: # if we failed to load close file if opened: file_obj.close() return loaded
[ "def", "load_mesh", "(", "file_obj", ",", "file_type", "=", "None", ",", "resolver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# parse the file arguments into clean loadable form", "(", "file_obj", ",", "# file- like object", "file_type", ",", "# str, what ki...
Load a mesh file into a Trimesh object Parameters ----------- file_obj : str or file object File name or file with mesh data file_type : str or None Which file type, e.g. 'stl' kwargs : dict Passed to Trimesh constructor Returns ---------- mesh : trimesh.Trimesh or trimesh.Scene Loaded geometry data
[ "Load", "a", "mesh", "file", "into", "a", "Trimesh", "object" ]
python
train
python-openxml/python-docx
docx/parts/document.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/parts/document.py#L33-L37
def add_header_part(self): """Return (header_part, rId) pair for newly-created header part.""" header_part = HeaderPart.new(self.package) rId = self.relate_to(header_part, RT.HEADER) return header_part, rId
[ "def", "add_header_part", "(", "self", ")", ":", "header_part", "=", "HeaderPart", ".", "new", "(", "self", ".", "package", ")", "rId", "=", "self", ".", "relate_to", "(", "header_part", ",", "RT", ".", "HEADER", ")", "return", "header_part", ",", "rId" ...
Return (header_part, rId) pair for newly-created header part.
[ "Return", "(", "header_part", "rId", ")", "pair", "for", "newly", "-", "created", "header", "part", "." ]
python
train
Kromey/django-simplecaptcha
simplecaptcha/widgets.py
https://github.com/Kromey/django-simplecaptcha/blob/16dd401e3317daf78143e9250f98b48c22cabd2d/simplecaptcha/widgets.py#L68-L97
def _generate_question(self): """Generate a random arithmetic question This method randomly generates a simple addition, subtraction, or multiplication question with two integers between 1 and 10, and then returns both question (formatted as a string) and answer. """ x = random.randint(1, 10) y = random.randint(1, 10) operator = random.choice(('+', '-', '*',)) if operator == '+': answer = x + y elif operator == '-': # Ensure we'll get a non-negative answer if x < y: x, y = y, x answer = x - y else: # Multiplication is hard, make it easier x = math.ceil(x/2) y = math.ceil(y/2) answer = x * y # Use a prettied-up HTML multiplication character operator = '&times;' # Format the answer nicely, then mark it as safe so Django won't escape it question = '{} {} {}'.format(x, operator, y) return mark_safe(question), answer
[ "def", "_generate_question", "(", "self", ")", ":", "x", "=", "random", ".", "randint", "(", "1", ",", "10", ")", "y", "=", "random", ".", "randint", "(", "1", ",", "10", ")", "operator", "=", "random", ".", "choice", "(", "(", "'+'", ",", "'-'",...
Generate a random arithmetic question This method randomly generates a simple addition, subtraction, or multiplication question with two integers between 1 and 10, and then returns both question (formatted as a string) and answer.
[ "Generate", "a", "random", "arithmetic", "question" ]
python
train
udragon/pybrctl
pybrctl/pybrctl.py
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L73-L76
def setpathcost(self, port, cost): """ Set port path cost value for STP protocol. """ _runshell([brctlexe, 'setpathcost', self.name, port, str(cost)], "Could not set path cost in port %s in %s." % (port, self.name))
[ "def", "setpathcost", "(", "self", ",", "port", ",", "cost", ")", ":", "_runshell", "(", "[", "brctlexe", ",", "'setpathcost'", ",", "self", ".", "name", ",", "port", ",", "str", "(", "cost", ")", "]", ",", "\"Could not set path cost in port %s in %s.\"", ...
Set port path cost value for STP protocol.
[ "Set", "port", "path", "cost", "value", "for", "STP", "protocol", "." ]
python
train
pybel/pybel
src/pybel/manager/base_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/base_manager.py#L24-L78
def build_engine_session(connection: str, echo: bool = False, autoflush: Optional[bool] = None, autocommit: Optional[bool] = None, expire_on_commit: Optional[bool] = None, scopefunc=None) -> Tuple: """Build an engine and a session. :param connection: An RFC-1738 database connection string :param echo: Turn on echoing SQL :param autoflush: Defaults to True if not specified in kwargs or configuration. :param autocommit: Defaults to False if not specified in kwargs or configuration. :param expire_on_commit: Defaults to False if not specified in kwargs or configuration. :param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session` :rtype: tuple[Engine,Session] From the Flask-SQLAlchemy documentation: An extra key ``'scopefunc'`` can be set on the ``options`` dict to specify a custom scope function. If it's not provided, Flask's app context stack identity is used. This will ensure that sessions are created and removed with the request/response cycle, and should be fine in most cases. """ if connection is None: raise ValueError('can not build engine when connection is None') engine = create_engine(connection, echo=echo) if autoflush is None: autoflush = config.get('PYBEL_MANAGER_AUTOFLUSH', False) if autocommit is None: autocommit = config.get('PYBEL_MANAGER_AUTOCOMMIT', False) if expire_on_commit is None: expire_on_commit = config.get('PYBEL_MANAGER_AUTOEXPIRE', True) log.debug('auto flush: %s, auto commit: %s, expire on commmit: %s', autoflush, autocommit, expire_on_commit) #: A SQLAlchemy session maker session_maker = sessionmaker( bind=engine, autoflush=autoflush, autocommit=autocommit, expire_on_commit=expire_on_commit, ) #: A SQLAlchemy session object session = scoped_session( session_maker, scopefunc=scopefunc, ) return engine, session
[ "def", "build_engine_session", "(", "connection", ":", "str", ",", "echo", ":", "bool", "=", "False", ",", "autoflush", ":", "Optional", "[", "bool", "]", "=", "None", ",", "autocommit", ":", "Optional", "[", "bool", "]", "=", "None", ",", "expire_on_com...
Build an engine and a session. :param connection: An RFC-1738 database connection string :param echo: Turn on echoing SQL :param autoflush: Defaults to True if not specified in kwargs or configuration. :param autocommit: Defaults to False if not specified in kwargs or configuration. :param expire_on_commit: Defaults to False if not specified in kwargs or configuration. :param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session` :rtype: tuple[Engine,Session] From the Flask-SQLAlchemy documentation: An extra key ``'scopefunc'`` can be set on the ``options`` dict to specify a custom scope function. If it's not provided, Flask's app context stack identity is used. This will ensure that sessions are created and removed with the request/response cycle, and should be fine in most cases.
[ "Build", "an", "engine", "and", "a", "session", "." ]
python
train
explosion/spaCy
spacy/cli/converters/conllu2json.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/converters/conllu2json.py#L9-L37
def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None): """ Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme """ # by @dvsrepo, via #11 explosion/spacy-dev-resources # by @katarkor docs = [] sentences = [] conll_tuples = read_conllx(input_data, use_morphology=use_morphology) checked_for_ner = False has_ner_tags = False for i, (raw_text, tokens) in enumerate(conll_tuples): sentence, brackets = tokens[0] if not checked_for_ner: has_ner_tags = is_ner(sentence[5][0]) checked_for_ner = True sentences.append(generate_sentence(sentence, has_ner_tags)) # Real-sized documents could be extracted using the comments on the # conluu document if len(sentences) % n_sents == 0: doc = create_doc(sentences, i) docs.append(doc) sentences = [] return docs
[ "def", "conllu2json", "(", "input_data", ",", "n_sents", "=", "10", ",", "use_morphology", "=", "False", ",", "lang", "=", "None", ")", ":", "# by @dvsrepo, via #11 explosion/spacy-dev-resources", "# by @katarkor", "docs", "=", "[", "]", "sentences", "=", "[", "...
Convert conllu files into JSON format for use with train cli. use_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme
[ "Convert", "conllu", "files", "into", "JSON", "format", "for", "use", "with", "train", "cli", ".", "use_morphology", "parameter", "enables", "appending", "morphology", "to", "tags", "which", "is", "useful", "for", "languages", "such", "as", "Spanish", "where", ...
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4904-L4916
def getitem_via_sibseqs(ol,*sibseqs): ''' from elist.elist import * y = ['a',['b',["bb"]],'c'] y[1][1] getitem_via_sibseqs(y,1,1) ''' pathlist = list(sibseqs) this = ol for i in range(0,pathlist.__len__()): key = pathlist[i] this = this.__getitem__(key) return(this)
[ "def", "getitem_via_sibseqs", "(", "ol", ",", "*", "sibseqs", ")", ":", "pathlist", "=", "list", "(", "sibseqs", ")", "this", "=", "ol", "for", "i", "in", "range", "(", "0", ",", "pathlist", ".", "__len__", "(", ")", ")", ":", "key", "=", "pathlist...
from elist.elist import * y = ['a',['b',["bb"]],'c'] y[1][1] getitem_via_sibseqs(y,1,1)
[ "from", "elist", ".", "elist", "import", "*", "y", "=", "[", "a", "[", "b", "[", "bb", "]]", "c", "]", "y", "[", "1", "]", "[", "1", "]", "getitem_via_sibseqs", "(", "y", "1", "1", ")" ]
python
valid
higlass/higlass-python
higlass/server.py
https://github.com/higlass/higlass-python/blob/0a5bf2759cc0020844aefbf0df4f9e8f9137a0b7/higlass/server.py#L388-L395
def stop(self): """ Stop this server so that the calling process can exit """ # unsetup_fuse() self.fuse_process.teardown() for uuid in self.processes: self.processes[uuid].terminate()
[ "def", "stop", "(", "self", ")", ":", "# unsetup_fuse()", "self", ".", "fuse_process", ".", "teardown", "(", ")", "for", "uuid", "in", "self", ".", "processes", ":", "self", ".", "processes", "[", "uuid", "]", ".", "terminate", "(", ")" ]
Stop this server so that the calling process can exit
[ "Stop", "this", "server", "so", "that", "the", "calling", "process", "can", "exit" ]
python
train
couchbase/couchbase-python-client
couchbase/bucket.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L1185-L1192
def prepend_multi(self, keys, format=None, persist_to=0, replicate_to=0): """Prepend to multiple keys. Multi variant of :meth:`prepend` .. seealso:: :meth:`prepend`, :meth:`upsert_multi`, :meth:`upsert` """ return _Base.prepend_multi(self, keys, format=format, persist_to=persist_to, replicate_to=replicate_to)
[ "def", "prepend_multi", "(", "self", ",", "keys", ",", "format", "=", "None", ",", "persist_to", "=", "0", ",", "replicate_to", "=", "0", ")", ":", "return", "_Base", ".", "prepend_multi", "(", "self", ",", "keys", ",", "format", "=", "format", ",", ...
Prepend to multiple keys. Multi variant of :meth:`prepend` .. seealso:: :meth:`prepend`, :meth:`upsert_multi`, :meth:`upsert`
[ "Prepend", "to", "multiple", "keys", ".", "Multi", "variant", "of", ":", "meth", ":", "prepend" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_data_filter.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_data_filter.py#L75-L107
def _index_filter(index_data, filter_value, filter_operator, field_converter=None): """Post Filter Args: index_data (dictionary): The indexed data for the provided field. field (string): The field to filter on. filter_value (string | list): The value to match. filter_operator (string): The operator for comparison. field_converter (method): A method used to convert the field before comparison. Returns: (list): Matching data objects """ filtered_data = [] if filter_operator == operator.eq: if field_converter is not None: filter_value = field_converter(filter_value) # for data_obj in index_data: # yield data_obj.data filtered_data = index_data.get(filter_value) else: for field, data_obj_list in index_data.items(): if field_converter is not None: field = field_converter(field) if filter_operator(field, filter_value): # bcs enum filtered_data.extend(data_obj_list) # for data_obj in data_obj_list: # yield data_obj.data return filtered_data
[ "def", "_index_filter", "(", "index_data", ",", "filter_value", ",", "filter_operator", ",", "field_converter", "=", "None", ")", ":", "filtered_data", "=", "[", "]", "if", "filter_operator", "==", "operator", ".", "eq", ":", "if", "field_converter", "is", "no...
Post Filter Args: index_data (dictionary): The indexed data for the provided field. field (string): The field to filter on. filter_value (string | list): The value to match. filter_operator (string): The operator for comparison. field_converter (method): A method used to convert the field before comparison. Returns: (list): Matching data objects
[ "Post", "Filter" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/rl_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L77-L97
def evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the PPO agent in the real environment.""" tf.logging.info("Evaluating metric %s", get_metric_name( sampling_temp, max_num_noops, clipped=False )) eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params) env = setup_env( hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops, rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp) rollouts = env.current_epoch_rollouts() env.close() return tuple( compute_mean_reward(rollouts, clipped) for clipped in (True, False) )
[ "def", "evaluate_single_config", "(", "hparams", ",", "sampling_temp", ",", "max_num_noops", ",", "agent_model_dir", ",", "eval_fn", "=", "_eval_fn_with_learner", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Evaluating metric %s\"", ",", "get_metric_name", "...
Evaluate the PPO agent in the real environment.
[ "Evaluate", "the", "PPO", "agent", "in", "the", "real", "environment", "." ]
python
train
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L208-L227
def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs
[ "def", "_set_packages", "(", "self", ",", "node", ")", ":", "pkgs", "=", "etree", ".", "SubElement", "(", "node", ",", "'packages'", ")", "for", "pkg_name", ",", "pkg_version", "in", "sorted", "(", "self", ".", "_data", ".", "software", ".", "get", "("...
Set packages and collections. :param node: :return:
[ "Set", "packages", "and", "collections", "." ]
python
train
westurner/pgs
pgs/app.py
https://github.com/westurner/pgs/blob/1cc2bf2c41479d8d3ba50480f003183f1675e518/pgs/app.py#L460-L542
def git_static_file(filename, mimetype='auto', download=False, charset='UTF-8'): """ This method is derived from bottle.static_file: Open [a file] and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since`` [...]. :param filename: Name or path of the file to send. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ # root = os.path.abspath(root) + os.sep # filename = os.path.abspath(pathjoin(root, filename.strip('/\\'))) filename = filename.strip('/\\') headers = dict() FS = request.app.config['pgs.FS'] # if not filename.startswith(root): # return HTTPError(403, "Access denied.") if not FS.exists(filename): return HTTPError(404, "Not found.") # if not os.access(filename, os.R_OK): # return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': if download and download is not True: mimetype, encoding = mimetypes.guess_type(download) else: mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download # stats = os.stat(filename) info = FS.getinfo(filename) headers['Content-Length'] = clen = info['size'] lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(info['modified_time'])) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) mtime = info['modified_time'] if mtime and ims is not None and ims >= int(mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else FS.get_fileobj(filename) clen # headers["Accept-Ranges"] = "bytes" # ranges = request.environ.get('HTTP_RANGE') # if 'HTTP_RANGE' in request.environ: # ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) # if not ranges: # return HTTPError(416, "Requested Range Not Satisfiable") # offset, end = ranges[0] # headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen) # headers["Content-Length"] = str(end - offset) # if body: body = _file_iter_range(body, offset, end - offset) # return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers)
[ "def", "git_static_file", "(", "filename", ",", "mimetype", "=", "'auto'", ",", "download", "=", "False", ",", "charset", "=", "'UTF-8'", ")", ":", "# root = os.path.abspath(root) + os.sep", "# filename = os.path.abspath(pathjoin(root, filename.strip('/\\\\')))", "filename", ...
This method is derived from bottle.static_file: Open [a file] and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since`` [...]. :param filename: Name or path of the file to send. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8)
[ "This", "method", "is", "derived", "from", "bottle", ".", "static_file", ":" ]
python
valid
tensorflow/cleverhans
cleverhans/model_zoo/soft_nearest_neighbor_loss/SNNL_regularized_train.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/model_zoo/soft_nearest_neighbor_loss/SNNL_regularized_train.py#L37-L149
def SNNL_example(train_start=0, train_end=60000, test_start=0, test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, nb_filters=NB_FILTERS, SNNL_factor=SNNL_FACTOR, output_dir=OUTPUT_DIR): """ A simple model trained to minimize Cross Entropy and Maximize Soft Nearest Neighbor Loss at each internal layer. This outputs a TSNE of the sign of the adversarial gradients of a trained model. A model with a negative SNNL_factor will show little or no class clusters, while a model with a 0 SNNL_factor will have class clusters in the adversarial gradient direction. :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :param nb_epochs: number of epochs to train model :param batch_size: size of training batches :param learning_rate: learning rate for training :param SNNL_factor: multiplier for Soft Nearest Neighbor Loss :return: an AccuracyReport object """ # Object used to keep track of (and return) key accuracies report = AccuracyReport() # Set TF random seed to improve reproducibility tf.set_random_seed(1234) # Set logging level to see debug information set_log_level(logging.DEBUG) # Create TF session sess = tf.Session() # Get MNIST data mnist = MNIST(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) x_train, y_train = mnist.get_set('train') x_test, y_test = mnist.get_set('test') # Use Image Parameters img_rows, img_cols, nchannels = x_train.shape[1:4] nb_classes = y_train.shape[1] # Define input TF placeholder x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels)) y = tf.placeholder(tf.float32, shape=(None, nb_classes)) # Train an MNIST model train_params = { 'nb_epochs': nb_epochs, 'batch_size': batch_size, 'learning_rate': learning_rate } eval_params = {'batch_size': batch_size} rng = np.random.RandomState([2017, 8, 30]) def do_eval(preds, x_set, y_set, report_key): acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params) setattr(report, report_key, acc) print('Test accuracy on legitimate examples: %0.4f' % (acc)) model = ModelBasicCNN('model', nb_classes, nb_filters) preds = model.get_logits(x) cross_entropy_loss = CrossEntropy(model) if not SNNL_factor: loss = cross_entropy_loss else: loss = SNNLCrossEntropy(model, factor=SNNL_factor, optimize_temperature=False) def evaluate(): do_eval(preds, x_test, y_test, 'clean_train_clean_eval') train(sess, loss, x_train, y_train, evaluate=evaluate, args=train_params, rng=rng, var_list=model.get_params()) do_eval(preds, x_train, y_train, 'train_clean_train_clean_eval') def imscatter(points, images, ax=None, zoom=1, cmap="hot"): if ax is None: ax = plt.gca() artists = [] i = 0 if not isinstance(cmap, list): cmap = [cmap] * len(points) for x0, y0 in points: transformed = (images[i] - np.min(images[i])) / \ (np.max(images[i]) - np.min(images[i])) im = OffsetImage(transformed[:, :, 0], zoom=zoom, cmap=cmap[i]) ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False) artists.append(ax.add_artist(ab)) i += 1 ax.update_datalim(np.column_stack(np.transpose(points))) ax.autoscale() ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) return artists adv_grads = tf.sign(tf.gradients(cross_entropy_loss.fprop(x, y), x)) feed_dict = {x: x_test[:batch_size], y: y_test[:batch_size]} adv_grads_val = sess.run(adv_grads, feed_dict=feed_dict) adv_grads_val = np.reshape(adv_grads_val, (batch_size, img_rows * img_cols)) X_embedded = TSNE(n_components=2, verbose=0).fit_transform(adv_grads_val) plt.figure(num=None, figsize=(50, 50), dpi=40, facecolor='w', edgecolor='k') plt.title("TSNE of Sign of Adv Gradients, SNNLCrossEntropy Model, factor:" + str(FLAGS.SNNL_factor), fontsize=42) imscatter(X_embedded, x_test[:batch_size], zoom=2, cmap="Purples") plt.savefig(output_dir + 'adversarial_gradients_SNNL_factor_' + str(SNNL_factor) + '.png')
[ "def", "SNNL_example", "(", "train_start", "=", "0", ",", "train_end", "=", "60000", ",", "test_start", "=", "0", ",", "test_end", "=", "10000", ",", "nb_epochs", "=", "NB_EPOCHS", ",", "batch_size", "=", "BATCH_SIZE", ",", "learning_rate", "=", "LEARNING_RA...
A simple model trained to minimize Cross Entropy and Maximize Soft Nearest Neighbor Loss at each internal layer. This outputs a TSNE of the sign of the adversarial gradients of a trained model. A model with a negative SNNL_factor will show little or no class clusters, while a model with a 0 SNNL_factor will have class clusters in the adversarial gradient direction. :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :param nb_epochs: number of epochs to train model :param batch_size: size of training batches :param learning_rate: learning rate for training :param SNNL_factor: multiplier for Soft Nearest Neighbor Loss :return: an AccuracyReport object
[ "A", "simple", "model", "trained", "to", "minimize", "Cross", "Entropy", "and", "Maximize", "Soft", "Nearest", "Neighbor", "Loss", "at", "each", "internal", "layer", ".", "This", "outputs", "a", "TSNE", "of", "the", "sign", "of", "the", "adversarial", "gradi...
python
train
rm-hull/luma.core
luma/core/image_composition.py
https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/image_composition.py#L171-L180
def refresh(self): """ Clears the composition and renders all the images taking into account their position and offset. """ self._clear() for img in self.composed_images: self._background_image.paste(img.image(self._device.size), img.position) self._background_image.crop(box=self._device.bounding_box)
[ "def", "refresh", "(", "self", ")", ":", "self", ".", "_clear", "(", ")", "for", "img", "in", "self", ".", "composed_images", ":", "self", ".", "_background_image", ".", "paste", "(", "img", ".", "image", "(", "self", ".", "_device", ".", "size", ")"...
Clears the composition and renders all the images taking into account their position and offset.
[ "Clears", "the", "composition", "and", "renders", "all", "the", "images", "taking", "into", "account", "their", "position", "and", "offset", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/third/amqp/serialization.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/third/amqp/serialization.py#L330-L340
def write_longstr(self, s): """Write a string up to 2**32 bytes long after encoding. If passed a unicode string, encode as UTF-8. """ self._flushbits() if isinstance(s, text_t): s = s.encode('utf-8') self.write_long(len(s)) self.out.write(s)
[ "def", "write_longstr", "(", "self", ",", "s", ")", ":", "self", ".", "_flushbits", "(", ")", "if", "isinstance", "(", "s", ",", "text_t", ")", ":", "s", "=", "s", ".", "encode", "(", "'utf-8'", ")", "self", ".", "write_long", "(", "len", "(", "s...
Write a string up to 2**32 bytes long after encoding. If passed a unicode string, encode as UTF-8.
[ "Write", "a", "string", "up", "to", "2", "**", "32", "bytes", "long", "after", "encoding", "." ]
python
train
sci-bots/pygtkhelpers
pygtkhelpers/ui/objectlist/uuid_minimal.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/objectlist/uuid_minimal.py#L296-L305
def uuid4(): """Generate a random UUID.""" # Otherwise, get randomness from urandom or the 'random' module. try: import os return UUID(bytes=os.urandom(16), version=4) except Exception: import random bytes = [chr(random.randrange(256)) for i in range(16)] return UUID(bytes=bytes, version=4)
[ "def", "uuid4", "(", ")", ":", "# Otherwise, get randomness from urandom or the 'random' module.\r", "try", ":", "import", "os", "return", "UUID", "(", "bytes", "=", "os", ".", "urandom", "(", "16", ")", ",", "version", "=", "4", ")", "except", "Exception", ":...
Generate a random UUID.
[ "Generate", "a", "random", "UUID", "." ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L882-L891
def get_parameter(self, index): """Returns the ValueElement corresponding to the parameter at the specified index.""" result = None if index < len(self.paramorder): key = self.paramorder[index] if key in self._parameters: result = self._parameters[key] return result
[ "def", "get_parameter", "(", "self", ",", "index", ")", ":", "result", "=", "None", "if", "index", "<", "len", "(", "self", ".", "paramorder", ")", ":", "key", "=", "self", ".", "paramorder", "[", "index", "]", "if", "key", "in", "self", ".", "_par...
Returns the ValueElement corresponding to the parameter at the specified index.
[ "Returns", "the", "ValueElement", "corresponding", "to", "the", "parameter", "at", "the", "specified", "index", "." ]
python
train
timster/peewee-validates
peewee_validates.py
https://github.com/timster/peewee-validates/blob/417f0fafb87fe9209439d65bc279d86a3d9e8028/peewee_validates.py#L976-L1000
def perform_index_validation(self, data): """ Validate any unique indexes specified on the model. This should happen after all the normal fields have been validated. This can add error messages to multiple fields. :return: None """ # Build a list of dict containing query values for each unique index. index_data = [] for columns, unique in self.instance._meta.indexes: if not unique: continue index_data.append({col: data.get(col, None) for col in columns}) # Then query for each unique index to see if the value is unique. for index in index_data: query = self.instance.filter(**index) # If we have a primary key, need to exclude the current record from the check. if self.pk_field and self.pk_value: query = query.where(~(self.pk_field == self.pk_value)) if query.count(): err = ValidationError('index', fields=str.join(', ', index.keys())) for col in index.keys(): self.add_error(col, err)
[ "def", "perform_index_validation", "(", "self", ",", "data", ")", ":", "# Build a list of dict containing query values for each unique index.", "index_data", "=", "[", "]", "for", "columns", ",", "unique", "in", "self", ".", "instance", ".", "_meta", ".", "indexes", ...
Validate any unique indexes specified on the model. This should happen after all the normal fields have been validated. This can add error messages to multiple fields. :return: None
[ "Validate", "any", "unique", "indexes", "specified", "on", "the", "model", ".", "This", "should", "happen", "after", "all", "the", "normal", "fields", "have", "been", "validated", ".", "This", "can", "add", "error", "messages", "to", "multiple", "fields", "....
python
train
versae/neo4j-rest-client
neo4jrestclient/request.py
https://github.com/versae/neo4j-rest-client/blob/b03c09c8f598fa4dbad8ea8998ffb1c885805074/neo4jrestclient/request.py#L72-L77
def put(self, url, data, headers=None): """ Perform an HTTP PUT request for a given url. Returns the response object. """ return self._request('PUT', url, data, headers=headers)
[ "def", "put", "(", "self", ",", "url", ",", "data", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "'PUT'", ",", "url", ",", "data", ",", "headers", "=", "headers", ")" ]
Perform an HTTP PUT request for a given url. Returns the response object.
[ "Perform", "an", "HTTP", "PUT", "request", "for", "a", "given", "url", ".", "Returns", "the", "response", "object", "." ]
python
train
cisco-sas/kitty
kitty/model/low_level/container.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L769-L780
def get_rendered_fields(self, ctx=None): ''' :param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered ''' if ctx is None: ctx = RenderContext() ctx.push(self) current = self._fields[self._field_idx] res = current.get_rendered_fields(ctx) ctx.pop() return res
[ "def", "get_rendered_fields", "(", "self", ",", "ctx", "=", "None", ")", ":", "if", "ctx", "is", "None", ":", "ctx", "=", "RenderContext", "(", ")", "ctx", ".", "push", "(", "self", ")", "current", "=", "self", ".", "_fields", "[", "self", ".", "_f...
:param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered
[ ":", "param", "ctx", ":", "rendering", "context", "in", "which", "the", "method", "was", "called", ":", "return", ":", "ordered", "list", "of", "the", "fields", "that", "will", "be", "rendered" ]
python
train
jxtech/wechatpy
wechatpy/client/api/wxa.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/wxa.py#L337-L355
def add_template(self, template_short_id, keyword_id_list): """ 组合模板,并将其添加至账号下的模板列表里 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR :param template_short_id: 模板标题ID :param keyword_id_list: 按照顺序排列的模板关键词列表,最多10个 :type keyword_id_list: list[int] :return: 模板ID """ return self._post( 'cgi-bin/wxopen/template/add', data={ 'id': template_short_id, 'keyword_id_list': keyword_id_list, }, result_processor=lambda x: x['template_id'], )
[ "def", "add_template", "(", "self", ",", "template_short_id", ",", "keyword_id_list", ")", ":", "return", "self", ".", "_post", "(", "'cgi-bin/wxopen/template/add'", ",", "data", "=", "{", "'id'", ":", "template_short_id", ",", "'keyword_id_list'", ":", "keyword_i...
组合模板,并将其添加至账号下的模板列表里 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR :param template_short_id: 模板标题ID :param keyword_id_list: 按照顺序排列的模板关键词列表,最多10个 :type keyword_id_list: list[int] :return: 模板ID
[ "组合模板,并将其添加至账号下的模板列表里", "详情请参考", "https", ":", "//", "open", ".", "weixin", ".", "qq", ".", "com", "/", "cgi", "-", "bin", "/", "showdocument?action", "=", "dir_list&id", "=", "open1500465446_j4CgR" ]
python
train
gijzelaerr/python-snap7
snap7/partner.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/partner.py#L119-L129
def get_param(self, number): """ Reads an internal Partner object parameter. """ logger.debug("retreiving param number %s" % number) type_ = snap7.snap7types.param_types[number] value = type_() code = self.library.Par_GetParam(self.pointer, ctypes.c_int(number), ctypes.byref(value)) check_error(code) return value.value
[ "def", "get_param", "(", "self", ",", "number", ")", ":", "logger", ".", "debug", "(", "\"retreiving param number %s\"", "%", "number", ")", "type_", "=", "snap7", ".", "snap7types", ".", "param_types", "[", "number", "]", "value", "=", "type_", "(", ")", ...
Reads an internal Partner object parameter.
[ "Reads", "an", "internal", "Partner", "object", "parameter", "." ]
python
train
harlowja/fasteners
fasteners/process_lock.py
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L130-L171
def acquire(self, blocking=True, delay=DELAY_INCREMENT, max_delay=MAX_DELAY, timeout=None): """Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool """ if delay < 0: raise ValueError("Delay must be greater than or equal to zero") if timeout is not None and timeout < 0: raise ValueError("Timeout must be greater than or equal to zero") if delay >= max_delay: max_delay = delay self._do_open() watch = _utils.StopWatch(duration=timeout) r = _utils.Retry(delay, max_delay, sleep_func=self.sleep_func, watch=watch) with watch: gotten = r(self._try_acquire, blocking, watch) if not gotten: self.acquired = False return False else: self.acquired = True self.logger.log(_utils.BLATHER, "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]", self.path, watch.elapsed(), r.attempts) return True
[ "def", "acquire", "(", "self", ",", "blocking", "=", "True", ",", "delay", "=", "DELAY_INCREMENT", ",", "max_delay", "=", "MAX_DELAY", ",", "timeout", "=", "None", ")", ":", "if", "delay", "<", "0", ":", "raise", "ValueError", "(", "\"Delay must be greater...
Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool
[ "Attempt", "to", "acquire", "the", "given", "lock", "." ]
python
train
inveniosoftware/invenio-access
examples/app.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/examples/app.py#L109-L124
def index(): """Basic test view.""" identity = g.identity actions = {} for action in access.actions.values(): actions[action.value] = DynamicPermission(action).allows(identity) if current_user.is_anonymous: return render_template("invenio_access/open.html", actions=actions, identity=identity) else: return render_template("invenio_access/limited.html", message='', actions=actions, identity=identity)
[ "def", "index", "(", ")", ":", "identity", "=", "g", ".", "identity", "actions", "=", "{", "}", "for", "action", "in", "access", ".", "actions", ".", "values", "(", ")", ":", "actions", "[", "action", ".", "value", "]", "=", "DynamicPermission", "(",...
Basic test view.
[ "Basic", "test", "view", "." ]
python
train
JustinLovinger/optimal
optimal/optimize.py
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L198-L319
def optimize(self, problem, max_iterations=100, max_seconds=float('inf'), cache_encoded=True, cache_solution=False, clear_cache=True, logging_func=_print_fitnesses, n_processes=0): """Find the optimal inputs for a given fitness function. Args: problem: An instance of Problem. The problem to solve. max_iterations: The number of iterations to optimize before stopping. max_seconds: Maximum number of seconds to optimize for, before stopping. Note that condition is only checked one per iteration, meaning optimization can take more than max_seconds, especially if fitnesses take a long time to calculate. cache_encoded: bool; Whether or not to cache fitness of encoded strings. Encoded strings are produced directly by the optimizer. If an encoded string is found in cache, it will not be decoded. cache_solution: bool; Whether or not to cache fitness of decoded solutions. Decoded solution is provided by problems decode function. If problem does not provide a hash solution function, Various naive hashing methods will be attempted, including: tuple, tuple(sorted(dict.items)), str. clear_cache: bool; Whether or not to reset cache after optimization. Disable if you want to run optimize multiple times on the same problem. logging_func: func/None; Function taking: iteration, population, solutions, fitnesses, best_solution, best_fitness Called after every iteration. Use for custom logging, or set to None to disable logging. Note that best_solution and best_fitness are the best of all iterations so far. n_processes: int; Number of processes to use for multiprocessing. If <= 0, do not use multiprocessing. Returns: object; The best solution, after decoding. """ if not isinstance(problem, Problem): raise TypeError('problem must be an instance of Problem class') # Prepare pool for multiprocessing if n_processes > 0: try: pool = multiprocessing.Pool(processes=n_processes) except NameError: raise ImportError( 'pickle, dill, or multiprocessing library is not available.' ) else: pool = None # Set first, incase optimizer uses _max_iterations in initialization self.__max_iterations = max_iterations # Initialize algorithm self._reset() best_solution = {'solution': None, 'fitness': None} population = self.initial_population() try: # Begin optimization loop start = time.clock() for self.iteration in itertools.count(1): # Infinite sequence of iterations # Evaluate potential solutions solutions, fitnesses, finished = self._get_fitnesses( problem, population, cache_encoded=cache_encoded, cache_solution=cache_solution, pool=pool) # If the best fitness from this iteration is better than # the global best best_index, best_fitness = max( enumerate(fitnesses), key=operator.itemgetter(1)) if best_fitness > best_solution['fitness']: # Store the new best solution best_solution['fitness'] = best_fitness best_solution['solution'] = solutions[best_index] if logging_func: logging_func(self.iteration, population, solutions, fitnesses, best_solution['solution'], best_solution['fitness']) # Break if solution found if finished: self.solution_found = True break # Break if out of time if time.clock() - start >= max_seconds: break # Break if out of iterations if self.iteration >= max_iterations: break # Continue optimizing population = self.next_population(population, fitnesses) # Store best internally, before returning self.best_solution = best_solution['solution'] self.best_fitness = best_solution['fitness'] finally: # Clear caches if clear_cache: # Clear caches from memory self.__encoded_cache = {} self.__solution_cache = {} # Reset encoded, and decoded key functions self._get_encoded_key = self._get_encoded_key_type self._get_solution_key = self._get_solution_key_type # Clean up multiprocesses try: pool.terminate() # Kill outstanding work pool.close() # Close child processes except AttributeError: # No pool assert pool is None return self.best_solution
[ "def", "optimize", "(", "self", ",", "problem", ",", "max_iterations", "=", "100", ",", "max_seconds", "=", "float", "(", "'inf'", ")", ",", "cache_encoded", "=", "True", ",", "cache_solution", "=", "False", ",", "clear_cache", "=", "True", ",", "logging_f...
Find the optimal inputs for a given fitness function. Args: problem: An instance of Problem. The problem to solve. max_iterations: The number of iterations to optimize before stopping. max_seconds: Maximum number of seconds to optimize for, before stopping. Note that condition is only checked one per iteration, meaning optimization can take more than max_seconds, especially if fitnesses take a long time to calculate. cache_encoded: bool; Whether or not to cache fitness of encoded strings. Encoded strings are produced directly by the optimizer. If an encoded string is found in cache, it will not be decoded. cache_solution: bool; Whether or not to cache fitness of decoded solutions. Decoded solution is provided by problems decode function. If problem does not provide a hash solution function, Various naive hashing methods will be attempted, including: tuple, tuple(sorted(dict.items)), str. clear_cache: bool; Whether or not to reset cache after optimization. Disable if you want to run optimize multiple times on the same problem. logging_func: func/None; Function taking: iteration, population, solutions, fitnesses, best_solution, best_fitness Called after every iteration. Use for custom logging, or set to None to disable logging. Note that best_solution and best_fitness are the best of all iterations so far. n_processes: int; Number of processes to use for multiprocessing. If <= 0, do not use multiprocessing. Returns: object; The best solution, after decoding.
[ "Find", "the", "optimal", "inputs", "for", "a", "given", "fitness", "function", "." ]
python
train
Accelize/pycosio
pycosio/_core/functions_shutil.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/functions_shutil.py#L76-L118
def copy(src, dst): """ Copies a source file to a destination file or directory. Equivalent to "shutil.copy". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file or directory. Raises: IOError: Destination directory not found. """ # Handles path-like objects and checks if storage src, src_is_storage = format_and_is_storage(src) dst, dst_is_storage = format_and_is_storage(dst) # Local files: Redirects to "shutil.copy" if not src_is_storage and not dst_is_storage: return shutil_copy(src, dst) with handle_os_exceptions(): # Checks destination if not hasattr(dst, 'read'): try: # If destination is directory: defines an output file inside it if isdir(dst): dst = join(dst, basename(src)) # Checks if destination dir exists elif not isdir(dirname(dst)): raise IOError("No such file or directory: '%s'" % dst) except ObjectPermissionError: # Unable to check target directory due to missing read access, # but do not raise to allow to write if possible pass # Performs copy _copy(src, dst, src_is_storage, dst_is_storage)
[ "def", "copy", "(", "src", ",", "dst", ")", ":", "# Handles path-like objects and checks if storage", "src", ",", "src_is_storage", "=", "format_and_is_storage", "(", "src", ")", "dst", ",", "dst_is_storage", "=", "format_and_is_storage", "(", "dst", ")", "# Local f...
Copies a source file to a destination file or directory. Equivalent to "shutil.copy". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file or directory. Raises: IOError: Destination directory not found.
[ "Copies", "a", "source", "file", "to", "a", "destination", "file", "or", "directory", "." ]
python
train
mikedh/trimesh
trimesh/nsphere.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/nsphere.py#L170-L186
def is_nsphere(points): """ Check if a list of points is an nsphere. Parameters ----------- points : (n, dimension) float Points in space Returns ----------- check : bool True if input points are on an nsphere """ center, radius, error = fit_nsphere(points) check = error < tol.merge return check
[ "def", "is_nsphere", "(", "points", ")", ":", "center", ",", "radius", ",", "error", "=", "fit_nsphere", "(", "points", ")", "check", "=", "error", "<", "tol", ".", "merge", "return", "check" ]
Check if a list of points is an nsphere. Parameters ----------- points : (n, dimension) float Points in space Returns ----------- check : bool True if input points are on an nsphere
[ "Check", "if", "a", "list", "of", "points", "is", "an", "nsphere", "." ]
python
train
d0c-s4vage/gramfuzz
examples/example.py
https://github.com/d0c-s4vage/gramfuzz/blob/023727ac8744ae026d1105cc97c152bdf3abb8d6/examples/example.py#L18-L37
def generate(grammar=None, num=1, output=sys.stdout, max_recursion=10, seed=None): """Load and generate ``num`` number of top-level rules from the specified grammar. :param list grammar: The grammar file to load and generate data from :param int num: The number of times to generate data :param output: The output destination (an open, writable stream-type object. default=``sys.stdout``) :param int max_recursion: The maximum reference-recursion when generating data (default=``10``) :param int seed: The seed to initialize the PRNG with. If None, will not initialize it. """ if seed is not None: gramfuzz.rand.seed(seed) fuzzer = gramfuzz.GramFuzzer() fuzzer.load_grammar(grammar) cat_group = os.path.basename(grammar).replace(".py", "") results = fuzzer.gen(cat_group=cat_group, num=num, max_recursion=max_recursion) for res in results: output.write(res)
[ "def", "generate", "(", "grammar", "=", "None", ",", "num", "=", "1", ",", "output", "=", "sys", ".", "stdout", ",", "max_recursion", "=", "10", ",", "seed", "=", "None", ")", ":", "if", "seed", "is", "not", "None", ":", "gramfuzz", ".", "rand", ...
Load and generate ``num`` number of top-level rules from the specified grammar. :param list grammar: The grammar file to load and generate data from :param int num: The number of times to generate data :param output: The output destination (an open, writable stream-type object. default=``sys.stdout``) :param int max_recursion: The maximum reference-recursion when generating data (default=``10``) :param int seed: The seed to initialize the PRNG with. If None, will not initialize it.
[ "Load", "and", "generate", "num", "number", "of", "top", "-", "level", "rules", "from", "the", "specified", "grammar", "." ]
python
valid
mrcagney/gtfstk
gtfstk/trips.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/trips.py#L86-L138
def get_trips( feed: "Feed", date: Optional[str] = None, time: Optional[str] = None ) -> DataFrame: """ Return a subset of ``feed.trips``. Parameters ---------- feed : Feed date : string YYYYMMDD date string time : string HH:MM:SS time string, possibly with HH > 23 Returns ------- DataFrame The subset of ``feed.trips`` containing trips active (starting) on the given date at the given time. If no date or time are specified, then return the entire ``feed.trips``. """ if feed.trips is None or date is None: return feed.trips f = feed.trips.copy() f["is_active"] = f["trip_id"].map( lambda trip_id: feed.is_active_trip(trip_id, date) ) f = f[f["is_active"]].copy() del f["is_active"] if time is not None: # Get trips active during given time g = pd.merge(f, feed.stop_times[["trip_id", "departure_time"]]) def F(group): d = {} start = group["departure_time"].dropna().min() end = group["departure_time"].dropna().max() try: result = start <= time <= end except TypeError: result = False d["is_active"] = result return pd.Series(d) h = g.groupby("trip_id").apply(F).reset_index() f = pd.merge(f, h[h["is_active"]]) del f["is_active"] return f
[ "def", "get_trips", "(", "feed", ":", "\"Feed\"", ",", "date", ":", "Optional", "[", "str", "]", "=", "None", ",", "time", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "DataFrame", ":", "if", "feed", ".", "trips", "is", "None", "or", ...
Return a subset of ``feed.trips``. Parameters ---------- feed : Feed date : string YYYYMMDD date string time : string HH:MM:SS time string, possibly with HH > 23 Returns ------- DataFrame The subset of ``feed.trips`` containing trips active (starting) on the given date at the given time. If no date or time are specified, then return the entire ``feed.trips``.
[ "Return", "a", "subset", "of", "feed", ".", "trips", "." ]
python
train
xapple/plumbing
plumbing/databases/sqlite_database.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/sqlite_database.py#L100-L106
def tables(self): """The complete list of SQL tables.""" self.own_connection.row_factory = sqlite3.Row self.own_cursor.execute('SELECT name from sqlite_master where type="table";') result = [x[0].encode('ascii') for x in self.own_cursor.fetchall()] self.own_connection.row_factory = self.factory return result
[ "def", "tables", "(", "self", ")", ":", "self", ".", "own_connection", ".", "row_factory", "=", "sqlite3", ".", "Row", "self", ".", "own_cursor", ".", "execute", "(", "'SELECT name from sqlite_master where type=\"table\";'", ")", "result", "=", "[", "x", "[", ...
The complete list of SQL tables.
[ "The", "complete", "list", "of", "SQL", "tables", "." ]
python
train
asciimoo/exrex
exrex.py
https://github.com/asciimoo/exrex/blob/69733409042b526da584c675907a316ad708a8d4/exrex.py#L161-L243
def _gen(d, limit=20, count=False, grouprefs=None): """docstring for _gen""" if grouprefs is None: grouprefs = {} ret = [''] strings = 0 literal = False for i in d: if i[0] == sre_parse.IN: subs = _in(i[1]) if count: strings = (strings or 1) * len(subs) ret = comb(ret, subs) elif i[0] == sre_parse.LITERAL: literal = True ret = mappend(ret, unichr(i[1])) elif i[0] == sre_parse.CATEGORY: subs = CATEGORIES.get(i[1], ['']) if count: strings = (strings or 1) * len(subs) ret = comb(ret, subs) elif i[0] == sre_parse.ANY: subs = CATEGORIES['category_any'] if count: strings = (strings or 1) * len(subs) ret = comb(ret, subs) elif i[0] == sre_parse.MAX_REPEAT or i[0] == sre_parse.MIN_REPEAT: items = list(i[1][2]) if i[1][1] + 1 - i[1][0] >= limit: r1 = i[1][0] r2 = i[1][0] + limit else: r1 = i[1][0] r2 = i[1][1] + 1 ran = range(r1, r2) if count: branch_count = 0 for p in ran: branch_count += pow(_gen(items, limit, True, grouprefs), p) strings = (strings or 1) * branch_count ret = prods(ret, ran, items, limit, grouprefs) elif i[0] == sre_parse.BRANCH: if count: for x in i[1][1]: strings += _gen(x, limit, True, grouprefs) or 1 ret = concit(ret, i[1][1], limit, grouprefs) elif i[0] == sre_parse.SUBPATTERN or i[0] == sre_parse.ASSERT: subexpr = i[1][1] if IS_PY36_OR_GREATER and i[0] == sre_parse.SUBPATTERN: subexpr = i[1][3] if count: strings = ( strings or 1) * (sum(ggen([0], _gen, subexpr, limit=limit, count=True, grouprefs=grouprefs)) or 1) ret = ggen(ret, _gen, subexpr, limit=limit, count=False, grouprefs=grouprefs, groupref=i[1][0]) # ignore ^ and $ elif i[0] == sre_parse.AT: continue elif i[0] == sre_parse.NOT_LITERAL: subs = list(CATEGORIES['category_any']) if unichr(i[1]) in subs: subs.remove(unichr(i[1])) if count: strings = (strings or 1) * len(subs) ret = comb(ret, subs) elif i[0] == sre_parse.GROUPREF: ret = dappend(ret, grouprefs, i[1]) elif i[0] == sre_parse.ASSERT_NOT: pass else: print('[!] cannot handle expression ' + repr(i)) if count: if strings == 0 and literal: inc = True for i in d: if i[0] not in (sre_parse.AT, sre_parse.LITERAL): inc = False if inc: strings = 1 return strings return ret
[ "def", "_gen", "(", "d", ",", "limit", "=", "20", ",", "count", "=", "False", ",", "grouprefs", "=", "None", ")", ":", "if", "grouprefs", "is", "None", ":", "grouprefs", "=", "{", "}", "ret", "=", "[", "''", "]", "strings", "=", "0", "literal", ...
docstring for _gen
[ "docstring", "for", "_gen" ]
python
valid
wiredrive/wtframework
wtframework/wtf/email.py
https://github.com/wiredrive/wtframework/blob/ef7f86c4d4cf7fb17745fd627b3cc4a41f4c0216/wtframework/wtf/email.py#L190-L219
def __search_email_by_subject(self, subject, match_recipient): "Get a list of message numbers" if match_recipient is None: _, data = self._mail.uid('search', None, '(HEADER SUBJECT "{subject}")' .format(subject=subject)) uid_list = data[0].split() return uid_list else: _, data = self._mail.uid('search', None, '(HEADER SUBJECT "{subject}" TO "{recipient}")' .format(subject=subject, recipient=match_recipient)) filtered_list = [] uid_list = data[0].split() for uid in uid_list: # Those hard coded indexes [1][0][1] is a hard reference to the message email message headers # that's burried in all those wrapper objects that's associated # with fetching a message. to_addr = re.search( "[^-]To: (.*)", self._mail.uid('fetch', uid, "(RFC822)")[1][0][1]).group(1).strip() if (to_addr == match_recipient or to_addr == "<{0}>".format(match_recipient)): # Add matching entry to the list. filtered_list.append(uid) return filtered_list
[ "def", "__search_email_by_subject", "(", "self", ",", "subject", ",", "match_recipient", ")", ":", "if", "match_recipient", "is", "None", ":", "_", ",", "data", "=", "self", ".", "_mail", ".", "uid", "(", "'search'", ",", "None", ",", "'(HEADER SUBJECT \"{su...
Get a list of message numbers
[ "Get", "a", "list", "of", "message", "numbers" ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth2.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth2.py#L837-L881
def validate_bearer_token(self, token, scopes, request): """Validate access token. :param token: A string of random characters :param scopes: A list of scopes :param request: The Request object passed by oauthlib The validation validates: 1) if the token is available 2) if the token has expired 3) if the scopes are available """ log.debug('Validate bearer token %r', token) tok = self._tokengetter(access_token=token) if not tok: msg = 'Bearer token not found.' request.error_message = msg log.debug(msg) return False # validate expires if tok.expires is not None and \ datetime.datetime.utcnow() > tok.expires: msg = 'Bearer token is expired.' request.error_message = msg log.debug(msg) return False # validate scopes if scopes and not set(tok.scopes) & set(scopes): msg = 'Bearer token scope not valid.' request.error_message = msg log.debug(msg) return False request.access_token = tok request.user = tok.user request.scopes = scopes if hasattr(tok, 'client'): request.client = tok.client elif hasattr(tok, 'client_id'): request.client = self._clientgetter(tok.client_id) return True
[ "def", "validate_bearer_token", "(", "self", ",", "token", ",", "scopes", ",", "request", ")", ":", "log", ".", "debug", "(", "'Validate bearer token %r'", ",", "token", ")", "tok", "=", "self", ".", "_tokengetter", "(", "access_token", "=", "token", ")", ...
Validate access token. :param token: A string of random characters :param scopes: A list of scopes :param request: The Request object passed by oauthlib The validation validates: 1) if the token is available 2) if the token has expired 3) if the scopes are available
[ "Validate", "access", "token", "." ]
python
test
awslabs/serverless-application-model
samtranslator/model/sam_resources.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/sam_resources.py#L644-L688
def _construct_lambda_layer(self, intrinsics_resolver): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ # Resolve intrinsics if applicable: self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName') self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo') self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description') self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy, 'RetentionPolicy') retention_policy_value = self._get_retention_policy_value() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes['DeletionPolicy'] = retention_policy_value old_logical_id = self.logical_id new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen() self.logical_id = new_logical_id lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes) # Changing the LayerName property: when a layer is published, it is given an Arn # example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1 # where MyLayer is the LayerName property if it exists; otherwise, it is the # LogicalId of this resource. Since a LayerVersion is an immutable resource, when # CloudFormation updates this resource, it will ALWAYS create a new version then # delete the old version if the logical ids match. What this does is change the # logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the # LayerName property of the layer so that the Arn will still always be the same # with the exception of an incrementing version number. if not self.LayerName: self.LayerName = old_logical_id lambda_layer.LayerName = self.LayerName lambda_layer.Description = self.Description lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri') lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes lambda_layer.LicenseInfo = self.LicenseInfo return lambda_layer
[ "def", "_construct_lambda_layer", "(", "self", ",", "intrinsics_resolver", ")", ":", "# Resolve intrinsics if applicable:", "self", ".", "LayerName", "=", "self", ".", "_resolve_string_parameter", "(", "intrinsics_resolver", ",", "self", ".", "LayerName", ",", "'LayerNa...
Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list
[ "Constructs", "and", "returns", "the", "Lambda", "function", "." ]
python
train
RonenNess/Fileter
fileter/filters/extension_filter.py
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/filters/extension_filter.py#L24-L34
def match(self, filepath): """ The function to check file. Should return True if match, False otherwise. """ # no extension? if filepath.find(".") == -1: return False # match extension return filepath.lower().split(".")[-1] in self.__extensions
[ "def", "match", "(", "self", ",", "filepath", ")", ":", "# no extension?", "if", "filepath", ".", "find", "(", "\".\"", ")", "==", "-", "1", ":", "return", "False", "# match extension", "return", "filepath", ".", "lower", "(", ")", ".", "split", "(", "...
The function to check file. Should return True if match, False otherwise.
[ "The", "function", "to", "check", "file", ".", "Should", "return", "True", "if", "match", "False", "otherwise", "." ]
python
train
jameslyons/pycipher
pycipher/atbash.py
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/atbash.py#L16-L32
def encipher(self,string,keep_punct=False): """Encipher string using Atbash cipher. Example:: ciphertext = Atbash().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string. """ if not keep_punct: string = self.remove_punctuation(string) ret = '' for c in string.upper(): if c.isalpha(): ret += self.key[self.a2i(c)] else: ret += c return ret
[ "def", "encipher", "(", "self", ",", "string", ",", "keep_punct", "=", "False", ")", ":", "if", "not", "keep_punct", ":", "string", "=", "self", ".", "remove_punctuation", "(", "string", ")", "ret", "=", "''", "for", "c", "in", "string", ".", "upper", ...
Encipher string using Atbash cipher. Example:: ciphertext = Atbash().encipher(plaintext) :param string: The string to encipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The enciphered string.
[ "Encipher", "string", "using", "Atbash", "cipher", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L1128-L1157
def _infer_all_output_dims(self, inputs): """Calculate the output shape for `inputs` after a deconvolution. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: output_shape: A tensor of shape (`batch_size`, `conv_output_shape`). """ # Use tensorflow shape op to manipulate inputs shape, so that unknown batch # size - which can happen when using input placeholders - is handled # correcly. batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) out_channels = (self.output_channels,) # Height dim needs to be added to everything for 1D Conv # as we'll be using the 2D Conv Transpose op. if self._n == 1: out_shape = (1,) + self.output_shape else: out_shape = self.output_shape if self._data_format.startswith("NC"): out_shape_tuple = out_channels + out_shape elif self._data_format.startswith("N") and self._data_format.endswith("C"): out_shape_tuple = out_shape + out_channels output_shape = tf.concat([batch_size, out_shape_tuple], 0) return output_shape
[ "def", "_infer_all_output_dims", "(", "self", ",", "inputs", ")", ":", "# Use tensorflow shape op to manipulate inputs shape, so that unknown batch", "# size - which can happen when using input placeholders - is handled", "# correcly.", "batch_size", "=", "tf", ".", "expand_dims", "(...
Calculate the output shape for `inputs` after a deconvolution. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
[ "Calculate", "the", "output", "shape", "for", "inputs", "after", "a", "deconvolution", "." ]
python
train
rocky/python-xdis
xdis/main.py
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/main.py#L127-L151
def disco_loop(opc, version, queue, real_out, dup_lines=False, show_bytes=False): """Disassembles a queue of code objects. If we discover another code object which will be found in co_consts, we add the new code to the list. Note that the order of code discovery is in the order of first encountered which is not amenable for the format used by a disassembler where code objects should be defined before using them in other functions. However this is not recursive and will overall lead to less memory consumption at run time. """ while len(queue) > 0: co = queue.popleft() if co.co_name not in ('<module>', '?'): real_out.write("\n" + format_code_info(co, version) + "\n") bytecode = Bytecode(co, opc, dup_lines=dup_lines) real_out.write(bytecode.dis(show_bytes=show_bytes) + "\n") for c in co.co_consts: if iscode(c): queue.append(c) pass pass
[ "def", "disco_loop", "(", "opc", ",", "version", ",", "queue", ",", "real_out", ",", "dup_lines", "=", "False", ",", "show_bytes", "=", "False", ")", ":", "while", "len", "(", "queue", ")", ">", "0", ":", "co", "=", "queue", ".", "popleft", "(", ")...
Disassembles a queue of code objects. If we discover another code object which will be found in co_consts, we add the new code to the list. Note that the order of code discovery is in the order of first encountered which is not amenable for the format used by a disassembler where code objects should be defined before using them in other functions. However this is not recursive and will overall lead to less memory consumption at run time.
[ "Disassembles", "a", "queue", "of", "code", "objects", ".", "If", "we", "discover", "another", "code", "object", "which", "will", "be", "found", "in", "co_consts", "we", "add", "the", "new", "code", "to", "the", "list", ".", "Note", "that", "the", "order...
python
train
orbingol/NURBS-Python
geomdl/helpers.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/helpers.py#L727-L745
def knot_removal_alpha_i(u, degree, knotvector, num, idx): """ Computes :math:`\\alpha_{i}` coefficient for knot removal algorithm. Please refer to Eq. 5.29 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.184 for details. :param u: knot :type u: float :param degree: degree :type degree: int :param knotvector: knot vector :type knotvector: tuple :param num: knot removal index :type num: int :param idx: iterator index :type idx: int :return: coefficient value :rtype: float """ return (u - knotvector[idx]) / (knotvector[idx + degree + 1 + num] - knotvector[idx])
[ "def", "knot_removal_alpha_i", "(", "u", ",", "degree", ",", "knotvector", ",", "num", ",", "idx", ")", ":", "return", "(", "u", "-", "knotvector", "[", "idx", "]", ")", "/", "(", "knotvector", "[", "idx", "+", "degree", "+", "1", "+", "num", "]", ...
Computes :math:`\\alpha_{i}` coefficient for knot removal algorithm. Please refer to Eq. 5.29 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.184 for details. :param u: knot :type u: float :param degree: degree :type degree: int :param knotvector: knot vector :type knotvector: tuple :param num: knot removal index :type num: int :param idx: iterator index :type idx: int :return: coefficient value :rtype: float
[ "Computes", ":", "math", ":", "\\\\", "alpha_", "{", "i", "}", "coefficient", "for", "knot", "removal", "algorithm", "." ]
python
train
pandas-dev/pandas
pandas/io/stata.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2761-L2766
def _tag(val, tag): """Surround val with <tag></tag>""" if isinstance(val, str): val = bytes(val, 'utf-8') return (bytes('<' + tag + '>', 'utf-8') + val + bytes('</' + tag + '>', 'utf-8'))
[ "def", "_tag", "(", "val", ",", "tag", ")", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "val", "=", "bytes", "(", "val", ",", "'utf-8'", ")", "return", "(", "bytes", "(", "'<'", "+", "tag", "+", "'>'", ",", "'utf-8'", ")", "+", ...
Surround val with <tag></tag>
[ "Surround", "val", "with", "<tag", ">", "<", "/", "tag", ">" ]
python
train
saltstack/salt
salt/cloud/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1161-L1316
def create(self, vm_, local_master=True): ''' Create a single VM ''' output = {} minion_dict = salt.config.get_cloud_config_value( 'minion', vm_, self.opts, default={} ) alias, driver = vm_['provider'].split(':') fun = '{0}.create'.format(driver) if fun not in self.clouds: log.error( 'Creating \'%s\' using \'%s\' as the provider ' 'cannot complete since \'%s\' is not available', vm_['name'], vm_['provider'], driver ) return deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts) make_master = salt.config.get_cloud_config_value( 'make_master', vm_, self.opts ) if deploy: if not make_master and 'master' not in minion_dict: log.warning( 'There\'s no master defined on the \'%s\' VM settings.', vm_['name'] ) if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) priv, pub = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, self.opts ) ) vm_['pub_key'] = pub vm_['priv_key'] = priv else: # Note(pabelanger): We still reference pub_key and priv_key when # deploy is disabled. vm_['pub_key'] = None vm_['priv_key'] = None key_id = minion_dict.get('id', vm_['name']) domain = vm_.get('domain') if vm_.get('use_fqdn') and domain: minion_dict['append_domain'] = domain if 'append_domain' in minion_dict: key_id = '.'.join([key_id, minion_dict['append_domain']]) if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_: log.debug('Generating the master keys for \'%s\'', vm_['name']) master_priv, master_pub = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', vm_, self.opts ) ) vm_['master_pub'] = master_pub vm_['master_pem'] = master_priv if local_master is True and deploy is True: # Accept the key on the local master salt.utils.cloud.accept_key( self.opts['pki_dir'], vm_['pub_key'], key_id ) vm_['os'] = salt.config.get_cloud_config_value( 'script', vm_, self.opts ) try: vm_['inline_script'] = salt.config.get_cloud_config_value( 'inline_script', vm_, self.opts ) except KeyError: pass try: alias, driver = vm_['provider'].split(':') func = '{0}.create'.format(driver) with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): output = self.clouds[func](vm_) if output is not False and 'sync_after_install' in self.opts: if self.opts['sync_after_install'] not in ( 'all', 'modules', 'states', 'grains'): log.error('Bad option for sync_after_install') return output # A small pause helps the sync work more reliably time.sleep(3) start = int(time.time()) while int(time.time()) < start + 60: # We'll try every <timeout> seconds, up to a minute mopts_ = salt.config.DEFAULT_MASTER_OPTS conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1]) mopts_.update( salt.config.master_config( os.path.join(conf_path, 'master') ) ) client = salt.client.get_local_client(mopts=mopts_) ret = client.cmd( vm_['name'], 'saltutil.sync_{0}'.format(self.opts['sync_after_install']), timeout=self.opts['timeout'] ) if ret: log.info( six.u('Synchronized the following dynamic modules: ' ' {0}').format(ret) ) break except KeyError as exc: log.exception( 'Failed to create VM %s. Configuration value %s needs ' 'to be set', vm_['name'], exc ) # If it's a map then we need to respect the 'requires' # so we do it later try: opt_map = self.opts['map'] except KeyError: opt_map = False if self.opts['parallel'] and self.opts['start_action'] and not opt_map: log.info('Running %s on %s', self.opts['start_action'], vm_['name']) client = salt.client.get_local_client(mopts=self.opts) action_out = client.cmd( vm_['name'], self.opts['start_action'], timeout=self.opts['timeout'] * 60 ) output['ret'] = action_out return output
[ "def", "create", "(", "self", ",", "vm_", ",", "local_master", "=", "True", ")", ":", "output", "=", "{", "}", "minion_dict", "=", "salt", ".", "config", ".", "get_cloud_config_value", "(", "'minion'", ",", "vm_", ",", "self", ".", "opts", ",", "defaul...
Create a single VM
[ "Create", "a", "single", "VM" ]
python
train
priestc/giotto
giotto/contrib/auth/manifest.py
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/contrib/auth/manifest.py#L9-L89
def create_auth_manifest(**kwargs): """ Creates a basic authentication manifest for logging in, logging out and registering new accounts. """ class AuthProgram(Program): pre_input_middleware = [AuthenticationMiddleware] def register(username, password, password2): """ Decorated version of basic_register with a callback added. """ result = basic_register(username, password, password2) callback = kwargs.get('post_register_callback', None) if callback: user = User.objects.get(username=username) callback(user) return result return Manifest({ 'login': [ AuthProgram( """ Prints out the HTML form for logging in. """, name="Login (form)", input_middleware=[NotAuthenticatedOrRedirect('/')], view=BasicView( html=jinja_template('login.html'), ), ), AuthProgram( """ Matches up the username/password against the database, and adds the auth cookies. """, name="Login (post)", input_middleware=[NotAuthenticatedOrDie], controllers=['http-post', 'cmd'], model=[create_session, {'username': 'mock_user', 'session_key': 'XXXXXXXXXXXXXXX'}], view=BasicView( persist=lambda m: {'giotto_session': m['session_key']}, html=lambda m: Redirection('/'), ), ), ], 'logout': AuthProgram( """ Send the user here to log them out. Removes their cookies and deletes the auth session. """, name="Logout", view=BasicView( html=Redirection('/'), ), output_middleware=[LogoutMiddleware], ), 'register': [ AuthProgram( """ This program returns the HTML page with the form for registering a new account. HTTP-get only. """, name="Register (form)", input_middleware=[NotAuthenticatedOrRedirect('/')], view=BasicView( html=jinja_template('register.html'), ), ), AuthProgram( """ When you POST the register form, this program handles creating the new user, then redirecting you to '/' """, name="Register (post)", controllers=['http-post'], model=[register], view=BasicView( persist=lambda m: {'giotto_session': m['session_key']}, html=lambda m: Redirection('/'), ), ), ], })
[ "def", "create_auth_manifest", "(", "*", "*", "kwargs", ")", ":", "class", "AuthProgram", "(", "Program", ")", ":", "pre_input_middleware", "=", "[", "AuthenticationMiddleware", "]", "def", "register", "(", "username", ",", "password", ",", "password2", ")", "...
Creates a basic authentication manifest for logging in, logging out and registering new accounts.
[ "Creates", "a", "basic", "authentication", "manifest", "for", "logging", "in", "logging", "out", "and", "registering", "new", "accounts", "." ]
python
train
HumanBrainProject/hbp-service-client
hbp_service_client/storage_service/api.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/api.py#L717-L737
def delete_folder(self, folder): '''Delete a folder. It will recursively delete all the content. Args: folder_id (str): The UUID of the folder to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes ''' if not is_valid_uuid(folder): raise StorageArgumentException( 'Invalid UUID for folder: {0}'.format(folder)) self._authenticated_request \ .to_endpoint('folder/{}/'.format(folder)) \ .delete()
[ "def", "delete_folder", "(", "self", ",", "folder", ")", ":", "if", "not", "is_valid_uuid", "(", "folder", ")", ":", "raise", "StorageArgumentException", "(", "'Invalid UUID for folder: {0}'", ".", "format", "(", "folder", ")", ")", "self", ".", "_authenticated_...
Delete a folder. It will recursively delete all the content. Args: folder_id (str): The UUID of the folder to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes
[ "Delete", "a", "folder", ".", "It", "will", "recursively", "delete", "all", "the", "content", "." ]
python
test
textX/textX
textx/scoping/tools.py
https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/scoping/tools.py#L95-L108
def get_location(model_obj): """ Args: model_obj: the model object of interest Returns: the line, col and filename of the model element. The filename may be None. This function may be used to fill exceptions """ the_model = get_model(model_obj) line, col = the_model._tx_parser.pos_to_linecol( model_obj._tx_position) return {"line": line, "col": col, "filename": the_model._tx_filename}
[ "def", "get_location", "(", "model_obj", ")", ":", "the_model", "=", "get_model", "(", "model_obj", ")", "line", ",", "col", "=", "the_model", ".", "_tx_parser", ".", "pos_to_linecol", "(", "model_obj", ".", "_tx_position", ")", "return", "{", "\"line\"", ":...
Args: model_obj: the model object of interest Returns: the line, col and filename of the model element. The filename may be None. This function may be used to fill exceptions
[ "Args", ":", "model_obj", ":", "the", "model", "object", "of", "interest" ]
python
train
juju/python-libjuju
juju/client/_client2.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client2.py#L3931-L3944
async def ModelSet(self, config): ''' config : typing.Mapping[str, typing.Any] Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Client', request='ModelSet', version=2, params=_params) _params['config'] = config reply = await self.rpc(msg) return reply
[ "async", "def", "ModelSet", "(", "self", ",", "config", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Client'", ",", "request", "=", "'ModelSet'", ",", "version", "=", "2", ",", "param...
config : typing.Mapping[str, typing.Any] Returns -> None
[ "config", ":", "typing", ".", "Mapping", "[", "str", "typing", ".", "Any", "]", "Returns", "-", ">", "None" ]
python
train
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L550-L554
def set_ramp_rate(self, ramp_rate): """Set the X10 address for the current group/button.""" set_cmd = self._create_set_property_msg('_ramp_rate', 0x05, ramp_rate) self._send_method(set_cmd, self._property_set)
[ "def", "set_ramp_rate", "(", "self", ",", "ramp_rate", ")", ":", "set_cmd", "=", "self", ".", "_create_set_property_msg", "(", "'_ramp_rate'", ",", "0x05", ",", "ramp_rate", ")", "self", ".", "_send_method", "(", "set_cmd", ",", "self", ".", "_property_set", ...
Set the X10 address for the current group/button.
[ "Set", "the", "X10", "address", "for", "the", "current", "group", "/", "button", "." ]
python
train
tradenity/python-sdk
tradenity/resources/currency.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/currency.py#L651-L673
def list_all_currencies(cls, **kwargs): """List Currencies Return a list of Currencies This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_currencies(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Currency] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_currencies_with_http_info(**kwargs) else: (data) = cls._list_all_currencies_with_http_info(**kwargs) return data
[ "def", "list_all_currencies", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_currencies_with_http_info", "(", ...
List Currencies Return a list of Currencies This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_currencies(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Currency] If the method is called asynchronously, returns the request thread.
[ "List", "Currencies" ]
python
train
ratcashdev/mitemp
mitemp_bt/mitemp_bt_poller.py
https://github.com/ratcashdev/mitemp/blob/bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10/mitemp_bt/mitemp_bt_poller.py#L55-L73
def fill_cache(self): """Fill the cache with new data from the sensor.""" _LOGGER.debug('Filling cache with new sensor data.') try: self.firmware_version() except BluetoothBackendException: # If a sensor doesn't work, wait 5 minutes before retrying self._last_read = datetime.now() - self._cache_timeout + \ timedelta(seconds=300) raise with self._bt_interface.connect(self._mac) as connection: try: connection.wait_for_notification(_HANDLE_READ_WRITE_SENSOR_DATA, self, 10) # pylint: disable=no-member # If a sensor doesn't work, wait 5 minutes before retrying except BluetoothBackendException: self._last_read = datetime.now() - self._cache_timeout + \ timedelta(seconds=300) return
[ "def", "fill_cache", "(", "self", ")", ":", "_LOGGER", ".", "debug", "(", "'Filling cache with new sensor data.'", ")", "try", ":", "self", ".", "firmware_version", "(", ")", "except", "BluetoothBackendException", ":", "# If a sensor doesn't work, wait 5 minutes before re...
Fill the cache with new data from the sensor.
[ "Fill", "the", "cache", "with", "new", "data", "from", "the", "sensor", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L1713-L1756
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the Credential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the credential type or value are not defined. """ local_stream = BytearrayStream() if self._credential_type: self._credential_type.write( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Credential struct missing the credential type." ) if self._credential_value: self._credential_value.write( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Credential struct missing the credential value." ) self.length = local_stream.length() super(Credential, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
[ "def", "write", "(", "self", ",", "output_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "local_stream", "=", "BytearrayStream", "(", ")", "if", "self", ".", "_credential_type", ":", "self", ".", "_credential_type", ...
Write the data encoding the Credential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the credential type or value are not defined.
[ "Write", "the", "data", "encoding", "the", "Credential", "struct", "to", "a", "stream", "." ]
python
test
maxalbert/tohu
tohu/v6/set_special_methods.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/set_special_methods.py#L16-L28
def check_that_operator_can_be_applied_to_produces_items(op, g1, g2): """ Helper function to check that the operator `op` can be applied to items produced by g1 and g2. """ g1_tmp_copy = g1.spawn() g2_tmp_copy = g2.spawn() sample_item_1 = next(g1_tmp_copy) sample_item_2 = next(g2_tmp_copy) try: op(sample_item_1, sample_item_2) except TypeError: raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} " f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)")
[ "def", "check_that_operator_can_be_applied_to_produces_items", "(", "op", ",", "g1", ",", "g2", ")", ":", "g1_tmp_copy", "=", "g1", ".", "spawn", "(", ")", "g2_tmp_copy", "=", "g2", ".", "spawn", "(", ")", "sample_item_1", "=", "next", "(", "g1_tmp_copy", ")...
Helper function to check that the operator `op` can be applied to items produced by g1 and g2.
[ "Helper", "function", "to", "check", "that", "the", "operator", "op", "can", "be", "applied", "to", "items", "produced", "by", "g1", "and", "g2", "." ]
python
train
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L179-L211
def unpack(self, unpacker): """ Unpacks the constant pool from an unpacker stream """ (count, ) = unpacker.unpack_struct(_H) # first item is never present in the actual data buffer, but # the count number acts like it would be. items = [(None, None), ] count -= 1 # Long and Double const types will "consume" an item count, # but not data hackpass = False for _i in range(0, count): if hackpass: # previous item was a long or double hackpass = False items.append((None, None)) else: item = _unpack_const_item(unpacker) items.append(item) # if this item was a long or double, skip the next # counter. if item[0] in (CONST_Long, CONST_Double): hackpass = True self.consts = items
[ "def", "unpack", "(", "self", ",", "unpacker", ")", ":", "(", "count", ",", ")", "=", "unpacker", ".", "unpack_struct", "(", "_H", ")", "# first item is never present in the actual data buffer, but", "# the count number acts like it would be.", "items", "=", "[", "(",...
Unpacks the constant pool from an unpacker stream
[ "Unpacks", "the", "constant", "pool", "from", "an", "unpacker", "stream" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L2607-L2640
def get_hardware_breakpoint(self, dwThreadId, address): """ Returns the internally used breakpoint object, for the code breakpoint defined at the given address. @warning: It's usually best to call the L{Debug} methods instead of accessing the breakpoint objects directly. @see: L{define_hardware_breakpoint}, L{has_hardware_breakpoint}, L{get_code_breakpoint}, L{enable_hardware_breakpoint}, L{enable_one_shot_hardware_breakpoint}, L{disable_hardware_breakpoint}, L{erase_hardware_breakpoint} @type dwThreadId: int @param dwThreadId: Thread global ID. @type address: int @param address: Memory address where the breakpoint is defined. @rtype: L{HardwareBreakpoint} @return: The hardware breakpoint object. """ if dwThreadId not in self.__hardwareBP: msg = "No hardware breakpoints set for thread %d" raise KeyError(msg % dwThreadId) for bp in self.__hardwareBP[dwThreadId]: if bp.is_here(address): return bp msg = "No hardware breakpoint at thread %d, address %s" raise KeyError(msg % (dwThreadId, HexDump.address(address)))
[ "def", "get_hardware_breakpoint", "(", "self", ",", "dwThreadId", ",", "address", ")", ":", "if", "dwThreadId", "not", "in", "self", ".", "__hardwareBP", ":", "msg", "=", "\"No hardware breakpoints set for thread %d\"", "raise", "KeyError", "(", "msg", "%", "dwThr...
Returns the internally used breakpoint object, for the code breakpoint defined at the given address. @warning: It's usually best to call the L{Debug} methods instead of accessing the breakpoint objects directly. @see: L{define_hardware_breakpoint}, L{has_hardware_breakpoint}, L{get_code_breakpoint}, L{enable_hardware_breakpoint}, L{enable_one_shot_hardware_breakpoint}, L{disable_hardware_breakpoint}, L{erase_hardware_breakpoint} @type dwThreadId: int @param dwThreadId: Thread global ID. @type address: int @param address: Memory address where the breakpoint is defined. @rtype: L{HardwareBreakpoint} @return: The hardware breakpoint object.
[ "Returns", "the", "internally", "used", "breakpoint", "object", "for", "the", "code", "breakpoint", "defined", "at", "the", "given", "address", "." ]
python
train
LonamiWebs/Telethon
telethon/client/users.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/users.py#L131-L138
async def is_bot(self): """ Return ``True`` if the signed-in user is a bot, ``False`` otherwise. """ if self._bot is None: self._bot = (await self.get_me()).bot return self._bot
[ "async", "def", "is_bot", "(", "self", ")", ":", "if", "self", ".", "_bot", "is", "None", ":", "self", ".", "_bot", "=", "(", "await", "self", ".", "get_me", "(", ")", ")", ".", "bot", "return", "self", ".", "_bot" ]
Return ``True`` if the signed-in user is a bot, ``False`` otherwise.
[ "Return", "True", "if", "the", "signed", "-", "in", "user", "is", "a", "bot", "False", "otherwise", "." ]
python
train
serge-sans-paille/pythran
pythran/transformations/handle_import.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/transformations/handle_import.py#L37-L41
def is_builtin_module(module_name): """Test if a module is a builtin module (numpy, math, ...).""" module_name = module_name.split(".")[0] return (module_name in MODULES or (module_name in cxx_keywords and module_name + "_" in MODULES))
[ "def", "is_builtin_module", "(", "module_name", ")", ":", "module_name", "=", "module_name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "return", "(", "module_name", "in", "MODULES", "or", "(", "module_name", "in", "cxx_keywords", "and", "module_name", ...
Test if a module is a builtin module (numpy, math, ...).
[ "Test", "if", "a", "module", "is", "a", "builtin", "module", "(", "numpy", "math", "...", ")", "." ]
python
train
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L1665-L1676
def parseline(self, line: str) -> Tuple[str, str, str]: """Parse the line into a command name and a string containing the arguments. NOTE: This is an override of a parent class method. It is only used by other parent class methods. Different from the parent class method, this ignores self.identchars. :param line: line read by readline :return: tuple containing (command, args, line) """ statement = self.statement_parser.parse_command_only(line) return statement.command, statement.args, statement.command_and_args
[ "def", "parseline", "(", "self", ",", "line", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", ",", "str", "]", ":", "statement", "=", "self", ".", "statement_parser", ".", "parse_command_only", "(", "line", ")", "return", "statement", ".", "com...
Parse the line into a command name and a string containing the arguments. NOTE: This is an override of a parent class method. It is only used by other parent class methods. Different from the parent class method, this ignores self.identchars. :param line: line read by readline :return: tuple containing (command, args, line)
[ "Parse", "the", "line", "into", "a", "command", "name", "and", "a", "string", "containing", "the", "arguments", "." ]
python
train
doconix/django-mako-plus
django_mako_plus/util/reflect.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/util/reflect.py#L4-L11
def qualified_name(obj): '''Returns the fully-qualified name of the given object''' if not hasattr(obj, '__module__'): obj = obj.__class__ module = obj.__module__ if module is None or module == str.__class__.__module__: return obj.__qualname__ return '{}.{}'.format(module, obj.__qualname__)
[ "def", "qualified_name", "(", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'__module__'", ")", ":", "obj", "=", "obj", ".", "__class__", "module", "=", "obj", ".", "__module__", "if", "module", "is", "None", "or", "module", "==", "str", ...
Returns the fully-qualified name of the given object
[ "Returns", "the", "fully", "-", "qualified", "name", "of", "the", "given", "object" ]
python
train
UCL-INGI/INGInious
inginious/client/client.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/client/client.py#L253-L311
def new_job(self, task, inputdata, callback, launcher_name="Unknown", debug=False, ssh_callback=None): """ Add a new job. Every callback will be called once and only once. :type task: Task :param inputdata: input from the student :type inputdata: Storage or dict :param callback: a function that will be called asynchronously in the client's process, with the results. it's signature must be (result, grade, problems, tests, custom, archive), where: result is itself a tuple containing the result string and the main feedback (i.e. ('success', 'You succeeded'); grade is a number between 0 and 100 indicating the grade of the users; problems is a dict of tuple, in the form {'problemid': result}; test is a dict of tests made in the container custom is a dict containing random things set in the container archive is either None or a bytes containing a tgz archive of files from the job :type callback: __builtin__.function or __builtin__.instancemethod :param launcher_name: for informational use :type launcher_name: str :param debug: Either True(outputs more info), False(default), or "ssh" (starts a remote ssh server. ssh_callback needs to be defined) :type debug: bool or string :param ssh_callback: a callback function that will be called with (host, port, password), the needed credentials to connect to the remote ssh server. May be called with host, port, password being None, meaning no session was open. :type ssh_callback: __builtin__.function or __builtin__.instancemethod or None :return: the new job id """ job_id = str(uuid.uuid4()) if debug == "ssh" and ssh_callback is None: self._logger.error("SSH callback not set in %s/%s", task.get_course_id(), task.get_id()) callback(("crash", "SSH callback not set."), 0.0, {}, {}, {}, None, "", "") return # wrap ssh_callback to ensure it is called at most once, and that it can always be called to simplify code ssh_callback = _callable_once(ssh_callback if ssh_callback is not None else lambda _1, _2, _3: None) environment = task.get_environment() if environment not in self._available_containers: self._logger.warning("Env %s not available for task %s/%s", environment, task.get_course_id(), task.get_id()) ssh_callback(None, None, None) # ssh_callback must be called once callback(("crash", "Environment not available."), 0.0, {}, {}, "", {}, None, "", "") return enable_network = task.allow_network_access_grading() try: limits = task.get_limits() time_limit = int(limits.get('time', 20)) hard_time_limit = int(limits.get('hard_time', 3 * time_limit)) mem_limit = int(limits.get('memory', 200)) except: self._logger.exception("Cannot retrieve limits for task %s/%s", task.get_course_id(), task.get_id()) ssh_callback(None, None, None) # ssh_callback must be called once callback(("crash", "Error while reading task limits"), 0.0, {}, {}, "", {}, None, "", "") return msg = ClientNewJob(job_id, task.get_course_id(), task.get_id(), inputdata, environment, enable_network, time_limit, hard_time_limit, mem_limit, debug, launcher_name) self._loop.call_soon_threadsafe(asyncio.ensure_future, self._create_transaction(msg, task=task, callback=callback, ssh_callback=ssh_callback)) return job_id
[ "def", "new_job", "(", "self", ",", "task", ",", "inputdata", ",", "callback", ",", "launcher_name", "=", "\"Unknown\"", ",", "debug", "=", "False", ",", "ssh_callback", "=", "None", ")", ":", "job_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ...
Add a new job. Every callback will be called once and only once. :type task: Task :param inputdata: input from the student :type inputdata: Storage or dict :param callback: a function that will be called asynchronously in the client's process, with the results. it's signature must be (result, grade, problems, tests, custom, archive), where: result is itself a tuple containing the result string and the main feedback (i.e. ('success', 'You succeeded'); grade is a number between 0 and 100 indicating the grade of the users; problems is a dict of tuple, in the form {'problemid': result}; test is a dict of tests made in the container custom is a dict containing random things set in the container archive is either None or a bytes containing a tgz archive of files from the job :type callback: __builtin__.function or __builtin__.instancemethod :param launcher_name: for informational use :type launcher_name: str :param debug: Either True(outputs more info), False(default), or "ssh" (starts a remote ssh server. ssh_callback needs to be defined) :type debug: bool or string :param ssh_callback: a callback function that will be called with (host, port, password), the needed credentials to connect to the remote ssh server. May be called with host, port, password being None, meaning no session was open. :type ssh_callback: __builtin__.function or __builtin__.instancemethod or None :return: the new job id
[ "Add", "a", "new", "job", ".", "Every", "callback", "will", "be", "called", "once", "and", "only", "once", "." ]
python
train
tamasgal/km3pipe
km3pipe/db.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L879-L884
def upi(self): """A dict of CLBs with UPI as key""" parameter = 'UPI' if parameter not in self._by: self._populate(by=parameter) return self._by[parameter]
[ "def", "upi", "(", "self", ")", ":", "parameter", "=", "'UPI'", "if", "parameter", "not", "in", "self", ".", "_by", ":", "self", ".", "_populate", "(", "by", "=", "parameter", ")", "return", "self", ".", "_by", "[", "parameter", "]" ]
A dict of CLBs with UPI as key
[ "A", "dict", "of", "CLBs", "with", "UPI", "as", "key" ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_filters.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_filters.py#L253-L267
def domain_name_cleanse(raw_string): '''extract a lower-case, no-slashes domain name from a raw string that might be a URL ''' try: parts = urlparse(raw_string) domain = parts.netloc.split(':')[0] except: domain = '' if not domain: domain = raw_string if not domain: return '' domain = re.sub('\/', '', domain.strip().lower()) return domain
[ "def", "domain_name_cleanse", "(", "raw_string", ")", ":", "try", ":", "parts", "=", "urlparse", "(", "raw_string", ")", "domain", "=", "parts", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", "except", ":", "domain", "=", "''", "if", "n...
extract a lower-case, no-slashes domain name from a raw string that might be a URL
[ "extract", "a", "lower", "-", "case", "no", "-", "slashes", "domain", "name", "from", "a", "raw", "string", "that", "might", "be", "a", "URL" ]
python
test
linnarsson-lab/loompy
loompy/graph_manager.py
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/graph_manager.py#L7-L15
def _renumber(a: np.ndarray, keys: np.ndarray, values: np.ndarray) -> np.ndarray: """ Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values' """ ordering = np.argsort(keys) keys = keys[ordering] values = keys[ordering] index = np.digitize(a.ravel(), keys, right=True) return(values[index].reshape(a.shape))
[ "def", "_renumber", "(", "a", ":", "np", ".", "ndarray", ",", "keys", ":", "np", ".", "ndarray", ",", "values", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "ordering", "=", "np", ".", "argsort", "(", "keys", ")", "keys", "=",...
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
[ "Renumber", "a", "by", "replacing", "any", "occurrence", "of", "keys", "by", "the", "corresponding", "values" ]
python
train
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/map_execute_on_keys_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/map_execute_on_keys_codec.py#L23-L34
def encode_request(name, entry_processor, keys): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, entry_processor, keys)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_data(entry_processor) client_message.append_int(len(keys)) for keys_item in keys: client_message.append_data(keys_item) client_message.update_frame_length() return client_message
[ "def", "encode_request", "(", "name", ",", "entry_processor", ",", "keys", ")", ":", "client_message", "=", "ClientMessage", "(", "payload_size", "=", "calculate_size", "(", "name", ",", "entry_processor", ",", "keys", ")", ")", "client_message", ".", "set_messa...
Encode request into client_message
[ "Encode", "request", "into", "client_message" ]
python
train
nicferrier/md
src/mdlib/pull.py
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/pull.py#L128-L135
def filepull(maildir, localmaildir, noop=False, verbose=False, filterfile=None): """Pull one local maildir into another. The source need not be an md folder (it need not have a store). In this case filepull is kind of an import. """ store = _Store(maildir) _pull(store, localmaildir, noop, verbose, filterfile)
[ "def", "filepull", "(", "maildir", ",", "localmaildir", ",", "noop", "=", "False", ",", "verbose", "=", "False", ",", "filterfile", "=", "None", ")", ":", "store", "=", "_Store", "(", "maildir", ")", "_pull", "(", "store", ",", "localmaildir", ",", "no...
Pull one local maildir into another. The source need not be an md folder (it need not have a store). In this case filepull is kind of an import.
[ "Pull", "one", "local", "maildir", "into", "another", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/trial.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/trial.py#L77-L89
def report_final_result(metric): """Reports final result to tuner. metric: serializable object. """ assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result' metric = json_tricks.dumps({ 'parameter_id': _params['parameter_id'], 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, 'type': 'FINAL', 'sequence': 0, # TODO: may be unnecessary 'value': metric }) platform.send_metric(metric)
[ "def", "report_final_result", "(", "metric", ")", ":", "assert", "_params", "is", "not", "None", ",", "'nni.get_next_parameter() needs to be called before report_final_result'", "metric", "=", "json_tricks", ".", "dumps", "(", "{", "'parameter_id'", ":", "_params", "[",...
Reports final result to tuner. metric: serializable object.
[ "Reports", "final", "result", "to", "tuner", ".", "metric", ":", "serializable", "object", "." ]
python
train
koalalorenzo/python-digitalocean
digitalocean/Domain.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Domain.py#L39-L74
def create_new_domain_record(self, *args, **kwargs): """ Create new domain record. https://developers.digitalocean.com/#create-a-new-domain-record Args: type: The record type (A, MX, CNAME, etc). name: The host name, alias, or service being defined by the record data: Variable data depending on record type. Optional Args: priority: The priority of the host port: The port that the service is accessible on weight: The weight of records with the same priority """ data = { "type": kwargs.get("type", None), "name": kwargs.get("name", None), "data": kwargs.get("data", None) } #  Optional Args if kwargs.get("priority", None): data['priority'] = kwargs.get("priority", None) if kwargs.get("port", None): data['port'] = kwargs.get("port", None) if kwargs.get("weight", None): data['weight'] = kwargs.get("weight", None) return self.get_data( "domains/%s/records" % self.name, type=POST, params=data )
[ "def", "create_new_domain_record", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "\"type\"", ":", "kwargs", ".", "get", "(", "\"type\"", ",", "None", ")", ",", "\"name\"", ":", "kwargs", ".", "get", "(", "\"name\...
Create new domain record. https://developers.digitalocean.com/#create-a-new-domain-record Args: type: The record type (A, MX, CNAME, etc). name: The host name, alias, or service being defined by the record data: Variable data depending on record type. Optional Args: priority: The priority of the host port: The port that the service is accessible on weight: The weight of records with the same priority
[ "Create", "new", "domain", "record", ".", "https", ":", "//", "developers", ".", "digitalocean", ".", "com", "/", "#create", "-", "a", "-", "new", "-", "domain", "-", "record" ]
python
valid
horazont/aioxmpp
aioxmpp/stringprep.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stringprep.py#L81-L104
def check_bidi(chars): """ Check proper bidirectionality as per stringprep. Operates on a list of unicode characters provided in `chars`. """ # the empty string is valid, as it cannot violate the RandALCat constraints if not chars: return # first_is_RorAL = unicodedata.bidirectional(chars[0]) in {"R", "AL"} # if first_is_RorAL: has_RandALCat = any(is_RandALCat(c) for c in chars) if not has_RandALCat: return has_LCat = any(is_LCat(c) for c in chars) if has_LCat: raise ValueError("L and R/AL characters must not occur in the same" " string") if not is_RandALCat(chars[0]) or not is_RandALCat(chars[-1]): raise ValueError("R/AL string must start and end with R/AL character.")
[ "def", "check_bidi", "(", "chars", ")", ":", "# the empty string is valid, as it cannot violate the RandALCat constraints", "if", "not", "chars", ":", "return", "# first_is_RorAL = unicodedata.bidirectional(chars[0]) in {\"R\", \"AL\"}", "# if first_is_RorAL:", "has_RandALCat", "=", ...
Check proper bidirectionality as per stringprep. Operates on a list of unicode characters provided in `chars`.
[ "Check", "proper", "bidirectionality", "as", "per", "stringprep", ".", "Operates", "on", "a", "list", "of", "unicode", "characters", "provided", "in", "chars", "." ]
python
train
idank/bashlex
bashlex/parser.py
https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L509-L518
def p_pipeline(p): '''pipeline : pipeline BAR newline_list pipeline | pipeline BAR_AND newline_list pipeline | command''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(ast.node(kind='pipe', pipe=p[2], pos=p.lexspan(2))) p[0].extend(p[len(p) - 1])
[ "def", "p_pipeline", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "p", "[", "0", "]", ".", "append", ...
pipeline : pipeline BAR newline_list pipeline | pipeline BAR_AND newline_list pipeline | command
[ "pipeline", ":", "pipeline", "BAR", "newline_list", "pipeline", "|", "pipeline", "BAR_AND", "newline_list", "pipeline", "|", "command" ]
python
train
StanfordVL/robosuite
robosuite/scripts/demo_collect_and_playback_data.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/scripts/demo_collect_and_playback_data.py#L35-L61
def playback_trajectory(env, ep_dir): """Playback data from an episode. Args: ep_dir: The path to the directory containing data for an episode. """ # first reload the model from the xml xml_path = os.path.join(ep_dir, "model.xml") with open(xml_path, "r") as f: env.reset_from_xml_string(f.read()) state_paths = os.path.join(ep_dir, "state_*.npz") # read states back, load them one by one, and render t = 0 for state_file in sorted(glob(state_paths)): print(state_file) dic = np.load(state_file) states = dic["states"] for state in states: env.sim.set_state_from_flattened(state) env.sim.forward() env.render() t += 1 if t % 100 == 0: print(t)
[ "def", "playback_trajectory", "(", "env", ",", "ep_dir", ")", ":", "# first reload the model from the xml", "xml_path", "=", "os", ".", "path", ".", "join", "(", "ep_dir", ",", "\"model.xml\"", ")", "with", "open", "(", "xml_path", ",", "\"r\"", ")", "as", "...
Playback data from an episode. Args: ep_dir: The path to the directory containing data for an episode.
[ "Playback", "data", "from", "an", "episode", "." ]
python
train
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L800-L811
def _adjust_inserted_indices(inserted_indices_list, prune_indices_list): """Adjust inserted indices, if there are pruned elements.""" # Created a copy, to preserve cached property updated_inserted = [[i for i in dim_inds] for dim_inds in inserted_indices_list] pruned_and_inserted = zip(prune_indices_list, updated_inserted) for prune_inds, inserted_inds in pruned_and_inserted: # Only prune indices if they're not H&S (inserted) prune_inds = prune_inds[~np.in1d(prune_inds, inserted_inds)] for i, ind in enumerate(inserted_inds): ind -= np.sum(prune_inds < ind) inserted_inds[i] = ind return updated_inserted
[ "def", "_adjust_inserted_indices", "(", "inserted_indices_list", ",", "prune_indices_list", ")", ":", "# Created a copy, to preserve cached property", "updated_inserted", "=", "[", "[", "i", "for", "i", "in", "dim_inds", "]", "for", "dim_inds", "in", "inserted_indices_lis...
Adjust inserted indices, if there are pruned elements.
[ "Adjust", "inserted", "indices", "if", "there", "are", "pruned", "elements", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L80-L104
def _loadDescriptionFile(descriptionPyPath): """Loads a description file and returns it as a module. descriptionPyPath: path of description.py file to load """ global g_descriptionImportCount if not os.path.isfile(descriptionPyPath): raise RuntimeError(("Experiment description file %s does not exist or " + \ "is not a file") % (descriptionPyPath,)) mod = imp.load_source("pf_description%d" % g_descriptionImportCount, descriptionPyPath) g_descriptionImportCount += 1 if not hasattr(mod, "descriptionInterface"): raise RuntimeError("Experiment description file %s does not define %s" % \ (descriptionPyPath, "descriptionInterface")) if not isinstance(mod.descriptionInterface, exp_description_api.DescriptionIface): raise RuntimeError(("Experiment description file %s defines %s but it " + \ "is not DescriptionIface-based") % \ (descriptionPyPath, name)) return mod
[ "def", "_loadDescriptionFile", "(", "descriptionPyPath", ")", ":", "global", "g_descriptionImportCount", "if", "not", "os", ".", "path", ".", "isfile", "(", "descriptionPyPath", ")", ":", "raise", "RuntimeError", "(", "(", "\"Experiment description file %s does not exis...
Loads a description file and returns it as a module. descriptionPyPath: path of description.py file to load
[ "Loads", "a", "description", "file", "and", "returns", "it", "as", "a", "module", "." ]
python
valid
Galarzaa90/tibia.py
tibiapy/utils.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L210-L239
def try_date(obj) -> Optional[datetime.date]: """Attempts to convert an object into a date. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`datetime.datetime`, :class:`datetime.date` The object to convert. Returns ------- :class:`datetime.date`, optional The represented date. """ if obj is None: return None if isinstance(obj, datetime.datetime): return obj.date() if isinstance(obj, datetime.date): return obj res = parse_tibia_date(obj) if res is not None: return res res = parse_tibia_full_date(obj) if res is not None: return res res = parse_tibiadata_date(obj) return res
[ "def", "try_date", "(", "obj", ")", "->", "Optional", "[", "datetime", ".", "date", "]", ":", "if", "obj", "is", "None", ":", "return", "None", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "obj", ".", "date", ...
Attempts to convert an object into a date. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`datetime.datetime`, :class:`datetime.date` The object to convert. Returns ------- :class:`datetime.date`, optional The represented date.
[ "Attempts", "to", "convert", "an", "object", "into", "a", "date", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/config.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/config.py#L1346-L1358
def is_subdomains_enabled(blockstack_opts): """ Can we do subdomain operations? """ if not is_atlas_enabled(blockstack_opts): log.debug("Subdomains are disabled") return False if 'subdomaindb_path' not in blockstack_opts: log.debug("Subdomains are disabled: no 'subdomaindb_path' path set") return False return True
[ "def", "is_subdomains_enabled", "(", "blockstack_opts", ")", ":", "if", "not", "is_atlas_enabled", "(", "blockstack_opts", ")", ":", "log", ".", "debug", "(", "\"Subdomains are disabled\"", ")", "return", "False", "if", "'subdomaindb_path'", "not", "in", "blockstack...
Can we do subdomain operations?
[ "Can", "we", "do", "subdomain", "operations?" ]
python
train
yjzhang/uncurl_python
uncurl/state_estimation.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/state_estimation.py#L121-L132
def initialize_weights_nn(data, means, lognorm=True): """ Initializes the weights with a nearest-neighbor approach using the means. """ # TODO genes, cells = data.shape k = means.shape[1] if lognorm: data = log1p(cell_normalize(data)) for i in range(cells): for j in range(k): pass
[ "def", "initialize_weights_nn", "(", "data", ",", "means", ",", "lognorm", "=", "True", ")", ":", "# TODO", "genes", ",", "cells", "=", "data", ".", "shape", "k", "=", "means", ".", "shape", "[", "1", "]", "if", "lognorm", ":", "data", "=", "log1p", ...
Initializes the weights with a nearest-neighbor approach using the means.
[ "Initializes", "the", "weights", "with", "a", "nearest", "-", "neighbor", "approach", "using", "the", "means", "." ]
python
train
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2256-L2268
def strip_HTML(s): """Simple, clumsy, slow HTML tag stripper""" result = '' total = 0 for c in s: if c == '<': total = 1 elif c == '>': total = 0 result += ' ' elif total == 0: result += c return result
[ "def", "strip_HTML", "(", "s", ")", ":", "result", "=", "''", "total", "=", "0", "for", "c", "in", "s", ":", "if", "c", "==", "'<'", ":", "total", "=", "1", "elif", "c", "==", "'>'", ":", "total", "=", "0", "result", "+=", "' '", "elif", "tot...
Simple, clumsy, slow HTML tag stripper
[ "Simple", "clumsy", "slow", "HTML", "tag", "stripper" ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/basic_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L972-L989
def set_font_increment(self, value): """ Increments the document font size. :param value: Font size increment. :type value: int :return: Method success. :rtype: bool """ font = self.font() point_size = font.pointSize() + value if point_size < self.__minimum_font_point_size or point_size > self.__maximum_font_point_size: return False font.setPointSize(point_size) self.setFont(font) return True
[ "def", "set_font_increment", "(", "self", ",", "value", ")", ":", "font", "=", "self", ".", "font", "(", ")", "point_size", "=", "font", ".", "pointSize", "(", ")", "+", "value", "if", "point_size", "<", "self", ".", "__minimum_font_point_size", "or", "p...
Increments the document font size. :param value: Font size increment. :type value: int :return: Method success. :rtype: bool
[ "Increments", "the", "document", "font", "size", "." ]
python
train
d0c-s4vage/pfp
pfp/native/__init__.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/__init__.py#L5-L47
def native(name, ret, interp=None, send_interp=False): """Used as a decorator to add the decorated function to the pfp interpreter so that it can be used from within scripts. :param str name: The name of the function as it will be exposed in template scripts. :param pfp.fields.Field ret: The return type of the function (a class) :param pfp.interp.PfpInterp interp: The specific interpreter to add the function to :param bool send_interp: If the current interpreter should be passed to the function. Examples: The example below defines a ``Sum`` function that will return the sum of all parameters passed to the function: :: from pfp.fields import PYVAL @native(name="Sum", ret=pfp.fields.Int64) def sum_numbers(params, ctxt, scope, stream, coord): res = 0 for param in params: res += PYVAL(param) return res The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it requires that the interpreter be sent as a parameter: :: @native(name="Int3", ret=pfp.fields.Void, send_interp=True) def int3(params, ctxt, scope, stream, coord, interp): if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop() """ def native_decorator(func): @functools.wraps(func) def native_wrapper(*args, **kwargs): return func(*args, **kwargs) pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp) return native_wrapper return native_decorator
[ "def", "native", "(", "name", ",", "ret", ",", "interp", "=", "None", ",", "send_interp", "=", "False", ")", ":", "def", "native_decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "native_wrapper", "(", "*", "...
Used as a decorator to add the decorated function to the pfp interpreter so that it can be used from within scripts. :param str name: The name of the function as it will be exposed in template scripts. :param pfp.fields.Field ret: The return type of the function (a class) :param pfp.interp.PfpInterp interp: The specific interpreter to add the function to :param bool send_interp: If the current interpreter should be passed to the function. Examples: The example below defines a ``Sum`` function that will return the sum of all parameters passed to the function: :: from pfp.fields import PYVAL @native(name="Sum", ret=pfp.fields.Int64) def sum_numbers(params, ctxt, scope, stream, coord): res = 0 for param in params: res += PYVAL(param) return res The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it requires that the interpreter be sent as a parameter: :: @native(name="Int3", ret=pfp.fields.Void, send_interp=True) def int3(params, ctxt, scope, stream, coord, interp): if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop()
[ "Used", "as", "a", "decorator", "to", "add", "the", "decorated", "function", "to", "the", "pfp", "interpreter", "so", "that", "it", "can", "be", "used", "from", "within", "scripts", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiInterface.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiInterface.py#L197-L205
def update_channel(self, channel): """ Method to update a channel. :param channel: List containing channel's desired to be created on database. :return: Id. """ data = {'channels': channel} return super(ApiInterfaceRequest, self).put('api/v3/channel/', data)
[ "def", "update_channel", "(", "self", ",", "channel", ")", ":", "data", "=", "{", "'channels'", ":", "channel", "}", "return", "super", "(", "ApiInterfaceRequest", ",", "self", ")", ".", "put", "(", "'api/v3/channel/'", ",", "data", ")" ]
Method to update a channel. :param channel: List containing channel's desired to be created on database. :return: Id.
[ "Method", "to", "update", "a", "channel", ".", ":", "param", "channel", ":", "List", "containing", "channel", "s", "desired", "to", "be", "created", "on", "database", ".", ":", "return", ":", "Id", "." ]
python
train