repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
hearsaycorp/normalize
normalize/diff.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/diff.py#L290-L302
def normalize_object_slot(self, value=_nothing, prop=None, obj=None): """This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. """ if value is not _nothing and hasattr(prop, "compare_as"): method, nargs = getattr(prop, "compare_as_info", (False, 1)) args = [] if method: args.append(obj) if nargs: args.append(value) value = prop.compare_as(*args) return self.normalize_slot(value, prop)
[ "def", "normalize_object_slot", "(", "self", ",", "value", "=", "_nothing", ",", "prop", "=", "None", ",", "obj", "=", "None", ")", ":", "if", "value", "is", "not", "_nothing", "and", "hasattr", "(", "prop", ",", "\"compare_as\"", ")", ":", "method", "...
This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value.
[ "This", "hook", "wraps", "normalize_slot", "and", "performs", "clean", "-", "ups", "which", "require", "access", "to", "the", "object", "the", "slot", "is", "in", "as", "well", "as", "the", "value", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py#L176-L200
def update_service_endpoint(self, endpoint, project, endpoint_id, operation=None): """UpdateServiceEndpoint. [Preview API] Update a service endpoint. :param :class:`<ServiceEndpoint> <azure.devops.v5_0.service_endpoint.models.ServiceEndpoint>` endpoint: Service endpoint to update. :param str project: Project ID or project name :param str endpoint_id: Id of the service endpoint to update. :param str operation: Operation for the service endpoint. :rtype: :class:`<ServiceEndpoint> <azure.devops.v5_0.service_endpoint.models.ServiceEndpoint>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if endpoint_id is not None: route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str') query_parameters = {} if operation is not None: query_parameters['operation'] = self._serialize.query('operation', operation, 'str') content = self._serialize.body(endpoint, 'ServiceEndpoint') response = self._send(http_method='PUT', location_id='e85f1c62-adfc-4b74-b618-11a150fb195e', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('ServiceEndpoint', response)
[ "def", "update_service_endpoint", "(", "self", ",", "endpoint", ",", "project", ",", "endpoint_id", ",", "operation", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=...
UpdateServiceEndpoint. [Preview API] Update a service endpoint. :param :class:`<ServiceEndpoint> <azure.devops.v5_0.service_endpoint.models.ServiceEndpoint>` endpoint: Service endpoint to update. :param str project: Project ID or project name :param str endpoint_id: Id of the service endpoint to update. :param str operation: Operation for the service endpoint. :rtype: :class:`<ServiceEndpoint> <azure.devops.v5_0.service_endpoint.models.ServiceEndpoint>`
[ "UpdateServiceEndpoint", ".", "[", "Preview", "API", "]", "Update", "a", "service", "endpoint", ".", ":", "param", ":", "class", ":", "<ServiceEndpoint", ">", "<azure", ".", "devops", ".", "v5_0", ".", "service_endpoint", ".", "models", ".", "ServiceEndpoint",...
python
train
bspaans/python-mingus
mingus/extra/tablature.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tablature.py#L28-L41
def begin_track(tuning, padding=2): """Helper function that builds the first few characters of every bar.""" # find longest shorthand tuning base names = [x.to_shorthand() for x in tuning.tuning] basesize = len(max(names)) + 3 # Build result res = [] for x in names: r = ' %s' % x spaces = basesize - len(r) r += ' ' * spaces + '||' + '-' * padding res.append(r) return res
[ "def", "begin_track", "(", "tuning", ",", "padding", "=", "2", ")", ":", "# find longest shorthand tuning base", "names", "=", "[", "x", ".", "to_shorthand", "(", ")", "for", "x", "in", "tuning", ".", "tuning", "]", "basesize", "=", "len", "(", "max", "(...
Helper function that builds the first few characters of every bar.
[ "Helper", "function", "that", "builds", "the", "first", "few", "characters", "of", "every", "bar", "." ]
python
train
globality-corp/flake8-logging-format
logging_format/visitor.py
https://github.com/globality-corp/flake8-logging-format/blob/3c6ce53d0ff1ec369799cff0ed6d048343252e40/logging_format/visitor.py#L228-L239
def get_except_handler_name(self, node): """ Helper to get the exception name from an ExceptHandler node in both py2 and py3. """ name = node.name if not name: return None if version_info < (3,): return name.id return name
[ "def", "get_except_handler_name", "(", "self", ",", "node", ")", ":", "name", "=", "node", ".", "name", "if", "not", "name", ":", "return", "None", "if", "version_info", "<", "(", "3", ",", ")", ":", "return", "name", ".", "id", "return", "name" ]
Helper to get the exception name from an ExceptHandler node in both py2 and py3.
[ "Helper", "to", "get", "the", "exception", "name", "from", "an", "ExceptHandler", "node", "in", "both", "py2", "and", "py3", "." ]
python
test
mozilla/FoxPuppet
foxpuppet/windows/browser/notifications/base.py
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L95-L99
def close(self): """Close the notification.""" with self.selenium.context(self.selenium.CONTEXT_CHROME): self.find_close_button().click() self.window.wait_for_notification(None)
[ "def", "close", "(", "self", ")", ":", "with", "self", ".", "selenium", ".", "context", "(", "self", ".", "selenium", ".", "CONTEXT_CHROME", ")", ":", "self", ".", "find_close_button", "(", ")", ".", "click", "(", ")", "self", ".", "window", ".", "wa...
Close the notification.
[ "Close", "the", "notification", "." ]
python
train
chaoss/grimoirelab-toolkit
grimoirelab_toolkit/datetime.py
https://github.com/chaoss/grimoirelab-toolkit/blob/30f36e89f3070f1a7b9973ea3c8a31f4b792ee2b/grimoirelab_toolkit/datetime.py#L65-L94
def datetime_to_utc(ts): """Convert a timestamp to UTC+0 timezone. Returns the given datetime object converted to a date with UTC+0 timezone. For naive datetimes, it will be assumed that they are in UTC+0. When the timezone is wrong, UTC+0 will be set as default (using `dateutil.tz.tzutc` object). :param dt: timestamp to convert :returns: a datetime object :raises InvalidDateError: when the given parameter is not an instance of datetime """ if not isinstance(ts, datetime.datetime): msg = '<%s> object' % type(ts) raise InvalidDateError(date=msg) if not ts.tzinfo: ts = ts.replace(tzinfo=dateutil.tz.tzutc()) try: ts = ts.astimezone(dateutil.tz.tzutc()) except ValueError: logger.warning("Date %s str does not have a valid timezone", ts) logger.warning("Date converted to UTC removing timezone info") ts = ts.replace(tzinfo=dateutil.tz.tzutc()).astimezone(dateutil.tz.tzutc()) return ts
[ "def", "datetime_to_utc", "(", "ts", ")", ":", "if", "not", "isinstance", "(", "ts", ",", "datetime", ".", "datetime", ")", ":", "msg", "=", "'<%s> object'", "%", "type", "(", "ts", ")", "raise", "InvalidDateError", "(", "date", "=", "msg", ")", "if", ...
Convert a timestamp to UTC+0 timezone. Returns the given datetime object converted to a date with UTC+0 timezone. For naive datetimes, it will be assumed that they are in UTC+0. When the timezone is wrong, UTC+0 will be set as default (using `dateutil.tz.tzutc` object). :param dt: timestamp to convert :returns: a datetime object :raises InvalidDateError: when the given parameter is not an instance of datetime
[ "Convert", "a", "timestamp", "to", "UTC", "+", "0", "timezone", "." ]
python
train
alberanid/python-iplib
iplib.py
https://github.com/alberanid/python-iplib/blob/488b56fe57ad836b27feec9e76f51883db28faa6/iplib.py#L495-L518
def convert(ip, notation=IP_DOT, inotation=IP_UNKNOWN, check=True): """Convert among IP address notations. Given an IP address, this function returns the address in another notation. @param ip: the IP address. @type ip: integers, strings or object with an appropriate __str()__ method. @param notation: the notation of the output (default: IP_DOT). @type notation: one of the IP_* constants, or the equivalent strings. @param inotation: force the input to be considered in the given notation (default the notation of the input is autodetected). @type inotation: one of the IP_* constants, or the equivalent strings. @param check: force the notation check on the input. @type check: True force the check, False force not to check and None do the check only if the inotation is unknown. @return: a string representing the IP in the selected notation. @raise ValueError: raised when the input is in unknown notation.""" return _convert(ip, notation, inotation, _check=check, _isnm=False)
[ "def", "convert", "(", "ip", ",", "notation", "=", "IP_DOT", ",", "inotation", "=", "IP_UNKNOWN", ",", "check", "=", "True", ")", ":", "return", "_convert", "(", "ip", ",", "notation", ",", "inotation", ",", "_check", "=", "check", ",", "_isnm", "=", ...
Convert among IP address notations. Given an IP address, this function returns the address in another notation. @param ip: the IP address. @type ip: integers, strings or object with an appropriate __str()__ method. @param notation: the notation of the output (default: IP_DOT). @type notation: one of the IP_* constants, or the equivalent strings. @param inotation: force the input to be considered in the given notation (default the notation of the input is autodetected). @type inotation: one of the IP_* constants, or the equivalent strings. @param check: force the notation check on the input. @type check: True force the check, False force not to check and None do the check only if the inotation is unknown. @return: a string representing the IP in the selected notation. @raise ValueError: raised when the input is in unknown notation.
[ "Convert", "among", "IP", "address", "notations", "." ]
python
valid
ten10solutions/Geist
geist/vision.py
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L138-L201
def overlapped_convolution(bin_template, bin_image, tollerance=0.5, splits=(4, 2)): """ As each of these images are hold only binary values, and RFFT2 works on float64 greyscale values, we can make the convolution more efficient by breaking the image up into :splits: sectons. Each one of these sections then has its greyscale value adjusted and then stacked. We then apply the convolution to this 'stack' of images, and adjust the resultant position matches. """ th, tw = bin_template.shape ih, iw = bin_image.shape hs, ws = splits h = ih // hs w = iw // ws count = numpy.count_nonzero(bin_template) assert count > 0 assert h >= th assert w >= tw yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)] xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)] # image_stacks is Origin (x,y), array, z (height in stack) image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num))) for num, (x1, x2, y1, y2) in enumerate((x1, x2, y1, y2) for (x1, x2) in xoffset for (y1, y2) in yoffset)] pad_h = max(i.shape[0] for _, i, _ in image_stacks) pad_w = max(i.shape[1] for _, i, _ in image_stacks) # rfft metrics must be an even size - why ... maths? pad_w += pad_w % 2 pad_h += pad_h % 2 overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w)) * num for _, i, num in image_stacks) #print "Overlap splits %r, Image Size (%d,%d), #Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h) # Calculate the convolution of the FFT's of the overlapped image & template convolution_freqs = (rfft2(overlapped_image) * rfft2(bin_template[::-1, ::-1], overlapped_image.shape)) # Reverse the FFT to find the result overlapped image convolution_image = irfft2(convolution_freqs) # At this point, the maximum point in convolution_image should be the # bottom right (why?) of the area of greatest match results = set() for (x, y), _, num in image_stacks[::-1]: test = convolution_image / num filtered = ((test >= (count - tollerance)) & (test <= (count + tollerance))) match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right for (fy, fx) in match_points: if fx < (tw - 1) or fy < (th - 1): continue results.add((x + fx - (tw - 1), y + fy - (th - 1))) convolution_image %= num return list(results)
[ "def", "overlapped_convolution", "(", "bin_template", ",", "bin_image", ",", "tollerance", "=", "0.5", ",", "splits", "=", "(", "4", ",", "2", ")", ")", ":", "th", ",", "tw", "=", "bin_template", ".", "shape", "ih", ",", "iw", "=", "bin_image", ".", ...
As each of these images are hold only binary values, and RFFT2 works on float64 greyscale values, we can make the convolution more efficient by breaking the image up into :splits: sectons. Each one of these sections then has its greyscale value adjusted and then stacked. We then apply the convolution to this 'stack' of images, and adjust the resultant position matches.
[ "As", "each", "of", "these", "images", "are", "hold", "only", "binary", "values", "and", "RFFT2", "works", "on", "float64", "greyscale", "values", "we", "can", "make", "the", "convolution", "more", "efficient", "by", "breaking", "the", "image", "up", "into",...
python
train
saltstack/salt
salt/runners/manage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/manage.py#L471-L494
def not_alived(subset=None, show_ip=False, show_ipv4=None): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are NOT up according to Salt's presence detection (no commands will be sent) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.not_alived ''' show_ip = _show_ip_migration(show_ip, show_ipv4) return list_not_state(subset=subset, show_ip=show_ip)
[ "def", "not_alived", "(", "subset", "=", "None", ",", "show_ip", "=", "False", ",", "show_ipv4", "=", "None", ")", ":", "show_ip", "=", "_show_ip_migration", "(", "show_ip", ",", "show_ipv4", ")", "return", "list_not_state", "(", "subset", "=", "subset", "...
.. versionadded:: 2015.8.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are NOT up according to Salt's presence detection (no commands will be sent) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.not_alived
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0", "..", "versionchanged", "::", "2019", ".", "2", ".", "0", "The", "show_ipv4", "argument", "has", "been", "renamed", "to", "show_ip", "as", "it", "now", "includes", "IPv6", "addresses", "for", "IPv6"...
python
train
ray-project/ray
python/ray/tune/registry.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/registry.py#L24-L50
def register_trainable(name, trainable): """Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration. """ from ray.tune.trainable import Trainable from ray.tune.function_runner import wrap_function if isinstance(trainable, type): logger.debug("Detected class for trainable.") elif isinstance(trainable, FunctionType): logger.debug("Detected function for trainable.") trainable = wrap_function(trainable) elif callable(trainable): logger.warning( "Detected unknown callable for trainable. Converting to class.") trainable = wrap_function(trainable) if not issubclass(trainable, Trainable): raise TypeError("Second argument must be convertable to Trainable", trainable) _global_registry.register(TRAINABLE_CLASS, name, trainable)
[ "def", "register_trainable", "(", "name", ",", "trainable", ")", ":", "from", "ray", ".", "tune", ".", "trainable", "import", "Trainable", "from", "ray", ".", "tune", ".", "function_runner", "import", "wrap_function", "if", "isinstance", "(", "trainable", ",",...
Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration.
[ "Register", "a", "trainable", "function", "or", "class", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10175-L10190
def format_size(size, threshold=1536): """Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB' """ if size < threshold: return "%i B" % size for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'): size /= 1024.0 if size < threshold: return "%.2f %s" % (size, unit) return 'ginormous'
[ "def", "format_size", "(", "size", ",", "threshold", "=", "1536", ")", ":", "if", "size", "<", "threshold", ":", "return", "\"%i B\"", "%", "size", "for", "unit", "in", "(", "'KiB'", ",", "'MiB'", ",", "'GiB'", ",", "'TiB'", ",", "'PiB'", ")", ":", ...
Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB'
[ "Return", "file", "size", "as", "string", "from", "byte", "size", "." ]
python
train
StellarCN/py-stellar-base
stellar_base/base58.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/base58.py#L73-L77
def b58encode_check(v): """Encode a string using Base58 with a 4 character checksum""" digest = sha256(sha256(v).digest()).digest() return b58encode(v + digest[:4])
[ "def", "b58encode_check", "(", "v", ")", ":", "digest", "=", "sha256", "(", "sha256", "(", "v", ")", ".", "digest", "(", ")", ")", ".", "digest", "(", ")", "return", "b58encode", "(", "v", "+", "digest", "[", ":", "4", "]", ")" ]
Encode a string using Base58 with a 4 character checksum
[ "Encode", "a", "string", "using", "Base58", "with", "a", "4", "character", "checksum" ]
python
train
scanny/python-pptx
pptx/oxml/chart/shared.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/shared.py#L121-L126
def horz_offset(self, offset): """ Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor". """ self.get_or_add_xMode().val = ST_LayoutMode.FACTOR self.get_or_add_x().val = offset
[ "def", "horz_offset", "(", "self", ",", "offset", ")", ":", "self", ".", "get_or_add_xMode", "(", ")", ".", "val", "=", "ST_LayoutMode", ".", "FACTOR", "self", ".", "get_or_add_x", "(", ")", ".", "val", "=", "offset" ]
Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor".
[ "Set", "the", "value", "of", ".", "/", "c", ":", "x" ]
python
train
CEA-COSMIC/ModOpt
modopt/opt/algorithms.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/algorithms.py#L409-L429
def update_beta(self, beta): r"""Update beta This method updates beta only in the case of safeguarding (should only be done in the greedy restarting strategy). Parameters ---------- beta: float The beta parameter Returns ------- float: the new value for the beta parameter """ if self._safeguard: beta *= self.xi_restart beta = max(beta, self.min_beta) return beta
[ "def", "update_beta", "(", "self", ",", "beta", ")", ":", "if", "self", ".", "_safeguard", ":", "beta", "*=", "self", ".", "xi_restart", "beta", "=", "max", "(", "beta", ",", "self", ".", "min_beta", ")", "return", "beta" ]
r"""Update beta This method updates beta only in the case of safeguarding (should only be done in the greedy restarting strategy). Parameters ---------- beta: float The beta parameter Returns ------- float: the new value for the beta parameter
[ "r", "Update", "beta" ]
python
train
skulumani/kinematics
kinematics/attitude.py
https://github.com/skulumani/kinematics/blob/e8cb45efb40539982025ed0f85d6561f9f10fef0/kinematics/attitude.py#L273-L295
def dcmtoquat(dcm): """Convert DCM to quaternion This function will convert a rotation matrix, also called a direction cosine matrix into the equivalent quaternion. Parameters: ---------- dcm - (3,3) numpy array Numpy rotation matrix which defines a rotation from the b to a frame Returns: -------- quat - (4,) numpy array Array defining a quaterion where the quaternion is defined in terms of a vector and a scalar part. The vector is related to the eigen axis and equivalent in both reference frames [x y z w] """ quat = np.zeros(4) quat[-1] = 1/2*np.sqrt(np.trace(dcm)+1) quat[0:3] = 1/4/quat[-1]*vee_map(dcm-dcm.T) return quat
[ "def", "dcmtoquat", "(", "dcm", ")", ":", "quat", "=", "np", ".", "zeros", "(", "4", ")", "quat", "[", "-", "1", "]", "=", "1", "/", "2", "*", "np", ".", "sqrt", "(", "np", ".", "trace", "(", "dcm", ")", "+", "1", ")", "quat", "[", "0", ...
Convert DCM to quaternion This function will convert a rotation matrix, also called a direction cosine matrix into the equivalent quaternion. Parameters: ---------- dcm - (3,3) numpy array Numpy rotation matrix which defines a rotation from the b to a frame Returns: -------- quat - (4,) numpy array Array defining a quaterion where the quaternion is defined in terms of a vector and a scalar part. The vector is related to the eigen axis and equivalent in both reference frames [x y z w]
[ "Convert", "DCM", "to", "quaternion", "This", "function", "will", "convert", "a", "rotation", "matrix", "also", "called", "a", "direction", "cosine", "matrix", "into", "the", "equivalent", "quaternion", "." ]
python
train
ehansis/ozelot
ozelot/cache.py
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/cache.py#L204-L222
def clear(self, url=None, xpath=None): """Clear cache Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) """ if url is not None: query = self._query(url, xpath) if query.count() > 0: query.delete() self.session.commit() else: raise KeyError("Cannot clear URL, not in cache: " + str(url) + " xpath:" + str(xpath)) else: # remove the DB file self.close() if path.exists(self.db_path): remove(self.db_path)
[ "def", "clear", "(", "self", ",", "url", "=", "None", ",", "xpath", "=", "None", ")", ":", "if", "url", "is", "not", "None", ":", "query", "=", "self", ".", "_query", "(", "url", ",", "xpath", ")", "if", "query", ".", "count", "(", ")", ">", ...
Clear cache Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``)
[ "Clear", "cache" ]
python
train
bslatkin/dpxdt
dpxdt/server/work_queue.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L345-L377
def _query(queue_name=None, build_id=None, release_id=None, run_id=None, count=None): """Queries for work items based on their criteria. Args: queue_name: Optional queue name to restrict to. build_id: Optional build ID to restrict to. release_id: Optional release ID to restrict to. run_id: Optional run ID to restrict to. count: How many tasks to fetch. Defaults to None, which means all tasks are fetch that match the query. Returns: List of WorkQueue items. """ assert queue_name or build_id or release_id or run_id q = WorkQueue.query if queue_name: q = q.filter_by(queue_name=queue_name) if build_id: q = q.filter_by(build_id=build_id) if release_id: q = q.filter_by(release_id=release_id) if run_id: q = q.filter_by(run_id=run_id) q = q.order_by(WorkQueue.created.desc()) if count is not None: q = q.limit(count) return q.all()
[ "def", "_query", "(", "queue_name", "=", "None", ",", "build_id", "=", "None", ",", "release_id", "=", "None", ",", "run_id", "=", "None", ",", "count", "=", "None", ")", ":", "assert", "queue_name", "or", "build_id", "or", "release_id", "or", "run_id", ...
Queries for work items based on their criteria. Args: queue_name: Optional queue name to restrict to. build_id: Optional build ID to restrict to. release_id: Optional release ID to restrict to. run_id: Optional run ID to restrict to. count: How many tasks to fetch. Defaults to None, which means all tasks are fetch that match the query. Returns: List of WorkQueue items.
[ "Queries", "for", "work", "items", "based", "on", "their", "criteria", "." ]
python
train
elliterate/capybara.py
capybara/session_matchers.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/session_matchers.py#L58-L73
def has_current_path(self, path, **kwargs): """ Checks if the page has the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: bool: Whether it matches. """ try: return self.assert_current_path(path, **kwargs) except ExpectationNotMet: return False
[ "def", "has_current_path", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "self", ".", "assert_current_path", "(", "path", ",", "*", "*", "kwargs", ")", "except", "ExpectationNotMet", ":", "return", "False" ]
Checks if the page has the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: bool: Whether it matches.
[ "Checks", "if", "the", "page", "has", "the", "given", "path", "." ]
python
test
jesford/cluster-lensing
clusterlensing/clusters.py
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/clusters.py#L32-L47
def calc_delta_c(c200): """Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. """ top = (200. / 3.) * c200**3. bottom = np.log(1. + c200) - (c200 / (1. + c200)) return (top / bottom)
[ "def", "calc_delta_c", "(", "c200", ")", ":", "top", "=", "(", "200.", "/", "3.", ")", "*", "c200", "**", "3.", "bottom", "=", "np", ".", "log", "(", "1.", "+", "c200", ")", "-", "(", "c200", "/", "(", "1.", "+", "c200", ")", ")", "return", ...
Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200.
[ "Calculate", "characteristic", "overdensity", "from", "concentration", "." ]
python
train
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L705-L717
def get_template(self, template_id): ''' Gets a Template which includes a list of Accounts that can access it Args: template_id (str): The id of the template to retrieve Returns: A Template object ''' request = self._get_request() return request.get(self.TEMPLATE_GET_URL + template_id)
[ "def", "get_template", "(", "self", ",", "template_id", ")", ":", "request", "=", "self", ".", "_get_request", "(", ")", "return", "request", ".", "get", "(", "self", ".", "TEMPLATE_GET_URL", "+", "template_id", ")" ]
Gets a Template which includes a list of Accounts that can access it Args: template_id (str): The id of the template to retrieve Returns: A Template object
[ "Gets", "a", "Template", "which", "includes", "a", "list", "of", "Accounts", "that", "can", "access", "it" ]
python
train
genialis/django-rest-framework-reactive
src/rest_framework_reactive/views.py
https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/views.py#L7-L16
def post(self, request): """Handle a query observer unsubscription request.""" try: observer_id = request.query_params['observer'] session_id = request.query_params['subscriber'] except KeyError: return response.Response(status=400) observer.remove_subscriber(session_id, observer_id) return response.Response()
[ "def", "post", "(", "self", ",", "request", ")", ":", "try", ":", "observer_id", "=", "request", ".", "query_params", "[", "'observer'", "]", "session_id", "=", "request", ".", "query_params", "[", "'subscriber'", "]", "except", "KeyError", ":", "return", ...
Handle a query observer unsubscription request.
[ "Handle", "a", "query", "observer", "unsubscription", "request", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4878-L4893
def keyframe(self, keyframe): """Set keyframe.""" if self._keyframe == keyframe: return if self._keyframe is not None: raise RuntimeError('cannot reset keyframe') if len(self._offsetscounts[0]) != len(keyframe.dataoffsets): raise RuntimeError('incompatible keyframe') if keyframe.is_tiled: pass if keyframe.is_contiguous: self._offsetscounts = ([self._offsetscounts[0][0]], [keyframe.is_contiguous[1]]) else: self._offsetscounts = clean_offsetscounts(*self._offsetscounts) self._keyframe = keyframe
[ "def", "keyframe", "(", "self", ",", "keyframe", ")", ":", "if", "self", ".", "_keyframe", "==", "keyframe", ":", "return", "if", "self", ".", "_keyframe", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'cannot reset keyframe'", ")", "if", "len",...
Set keyframe.
[ "Set", "keyframe", "." ]
python
train
zestyping/star-destroyer
star_destroyer.py
https://github.com/zestyping/star-destroyer/blob/e23584c85d1e8b8f098e5c75977c6a98a41f3f68/star_destroyer.py#L70-L80
def find_module(modpath): """Determines whether a module exists with the given modpath.""" module_path = modpath.replace('.', '/') + '.py' init_path = modpath.replace('.', '/') + '/__init__.py' for root_path in sys.path: path = os.path.join(root_path, module_path) if os.path.isfile(path): return path path = os.path.join(root_path, init_path) if os.path.isfile(path): return path
[ "def", "find_module", "(", "modpath", ")", ":", "module_path", "=", "modpath", ".", "replace", "(", "'.'", ",", "'/'", ")", "+", "'.py'", "init_path", "=", "modpath", ".", "replace", "(", "'.'", ",", "'/'", ")", "+", "'/__init__.py'", "for", "root_path",...
Determines whether a module exists with the given modpath.
[ "Determines", "whether", "a", "module", "exists", "with", "the", "given", "modpath", "." ]
python
train
quantmind/pulsar
pulsar/apps/data/channels.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/data/channels.py#L135-L150
async def register(self, channel, event, callback): """Register a callback to ``channel_name`` and ``event``. A prefix will be added to the channel name if not already available or the prefix is an empty string :param channel: channel name :param event: event name :param callback: callback to execute when event on channel occurs :return: a coroutine which results in the channel where the callback was registered """ channel = self.channel(channel) event = channel.register(event, callback) await channel.connect(event.name) return channel
[ "async", "def", "register", "(", "self", ",", "channel", ",", "event", ",", "callback", ")", ":", "channel", "=", "self", ".", "channel", "(", "channel", ")", "event", "=", "channel", ".", "register", "(", "event", ",", "callback", ")", "await", "chann...
Register a callback to ``channel_name`` and ``event``. A prefix will be added to the channel name if not already available or the prefix is an empty string :param channel: channel name :param event: event name :param callback: callback to execute when event on channel occurs :return: a coroutine which results in the channel where the callback was registered
[ "Register", "a", "callback", "to", "channel_name", "and", "event", "." ]
python
train
amorison/loam
loam/manager.py
https://github.com/amorison/loam/blob/a566c943a75e068a4510099331a1ddfe5bbbdd94/loam/manager.py#L187-L200
def from_dict_(cls, conf_dict): """Use a dictionary to create a :class:`ConfigurationManager`. Args: conf_dict (dict of dict of :class:`ConfOpt`): the first level of keys should be the section names. The second level should be the option names. The values are the options metadata. Returns: :class:`ConfigurationManager`: a configuration manager with the requested sections and options. """ return cls(**{name: Section(**opts) for name, opts in conf_dict.items()})
[ "def", "from_dict_", "(", "cls", ",", "conf_dict", ")", ":", "return", "cls", "(", "*", "*", "{", "name", ":", "Section", "(", "*", "*", "opts", ")", "for", "name", ",", "opts", "in", "conf_dict", ".", "items", "(", ")", "}", ")" ]
Use a dictionary to create a :class:`ConfigurationManager`. Args: conf_dict (dict of dict of :class:`ConfOpt`): the first level of keys should be the section names. The second level should be the option names. The values are the options metadata. Returns: :class:`ConfigurationManager`: a configuration manager with the requested sections and options.
[ "Use", "a", "dictionary", "to", "create", "a", ":", "class", ":", "ConfigurationManager", "." ]
python
test
Jajcus/pyxmpp2
pyxmpp2/ext/legacyauth.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/legacyauth.py#L399-L417
def registration_form_received(self, stanza): """Handle registration form received. [client only] Call self.registration_callback with the registration form received as the argument. Use the value returned by the callback will be a filled-in form. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.iq.Iq`""" self.lock.acquire() try: self.__register = Register(stanza.get_query()) self.registration_callback(stanza, self.__register.get_form()) finally: self.lock.release()
[ "def", "registration_form_received", "(", "self", ",", "stanza", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "__register", "=", "Register", "(", "stanza", ".", "get_query", "(", ")", ")", "self", ".", "registration_c...
Handle registration form received. [client only] Call self.registration_callback with the registration form received as the argument. Use the value returned by the callback will be a filled-in form. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.iq.Iq`
[ "Handle", "registration", "form", "received", "." ]
python
valid
horazont/aioxmpp
aioxmpp/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/service.py#L1546-L1556
def is_attrsignal_handler(descriptor, signal_name, cb, *, defer=False): """ Return true if `cb` has been decorated with :func:`attrsignal` for the given signal, descriptor and connection mode. """ try: handlers = get_magic_attr(cb) except AttributeError: return False return _attrsignal_spec(descriptor, signal_name, cb, defer) in handlers
[ "def", "is_attrsignal_handler", "(", "descriptor", ",", "signal_name", ",", "cb", ",", "*", ",", "defer", "=", "False", ")", ":", "try", ":", "handlers", "=", "get_magic_attr", "(", "cb", ")", "except", "AttributeError", ":", "return", "False", "return", "...
Return true if `cb` has been decorated with :func:`attrsignal` for the given signal, descriptor and connection mode.
[ "Return", "true", "if", "cb", "has", "been", "decorated", "with", ":", "func", ":", "attrsignal", "for", "the", "given", "signal", "descriptor", "and", "connection", "mode", "." ]
python
train
saltstack/salt
salt/modules/napalm_netacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L858-L883
def get_filter_pillar(filter_name, pillar_key='acl', pillarenv=None, saltenv=None): ''' Helper that can be used inside a state SLS, in order to get the filter configuration given its name. filter_name The name of the filter. pillar_key The root key of the whole policy config. pillarenv Query the master to generate fresh pillar data on the fly, specifically from the requested pillar environment. saltenv Included only for compatibility with :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored. ''' return __salt__['capirca.get_filter_pillar'](filter_name, pillar_key=pillar_key, pillarenv=pillarenv, saltenv=saltenv)
[ "def", "get_filter_pillar", "(", "filter_name", ",", "pillar_key", "=", "'acl'", ",", "pillarenv", "=", "None", ",", "saltenv", "=", "None", ")", ":", "return", "__salt__", "[", "'capirca.get_filter_pillar'", "]", "(", "filter_name", ",", "pillar_key", "=", "p...
Helper that can be used inside a state SLS, in order to get the filter configuration given its name. filter_name The name of the filter. pillar_key The root key of the whole policy config. pillarenv Query the master to generate fresh pillar data on the fly, specifically from the requested pillar environment. saltenv Included only for compatibility with :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
[ "Helper", "that", "can", "be", "used", "inside", "a", "state", "SLS", "in", "order", "to", "get", "the", "filter", "configuration", "given", "its", "name", "." ]
python
train
ssalentin/plip
plip/modules/preparation.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L208-L213
def get_linkage(self, line): """Get the linkage information from a LINK entry PDB line.""" conf1, id1, chain1, pos1 = line[16].strip(), line[17:20].strip(), line[21].strip(), int(line[22:26]) conf2, id2, chain2, pos2 = line[46].strip(), line[47:50].strip(), line[51].strip(), int(line[52:56]) return self.covlinkage(id1=id1, chain1=chain1, pos1=pos1, conf1=conf1, id2=id2, chain2=chain2, pos2=pos2, conf2=conf2)
[ "def", "get_linkage", "(", "self", ",", "line", ")", ":", "conf1", ",", "id1", ",", "chain1", ",", "pos1", "=", "line", "[", "16", "]", ".", "strip", "(", ")", ",", "line", "[", "17", ":", "20", "]", ".", "strip", "(", ")", ",", "line", "[", ...
Get the linkage information from a LINK entry PDB line.
[ "Get", "the", "linkage", "information", "from", "a", "LINK", "entry", "PDB", "line", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/helpers.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/helpers.py#L52-L65
def _index_list(key_or_list, direction=None): """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. """ if direction is not None: return [(key_or_list, direction)] else: if isinstance(key_or_list, string_type): return [(key_or_list, ASCENDING)] elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, " "key_or_list must be an instance of list") return key_or_list
[ "def", "_index_list", "(", "key_or_list", ",", "direction", "=", "None", ")", ":", "if", "direction", "is", "not", "None", ":", "return", "[", "(", "key_or_list", ",", "direction", ")", "]", "else", ":", "if", "isinstance", "(", "key_or_list", ",", "stri...
Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction.
[ "Helper", "to", "generate", "a", "list", "of", "(", "key", "direction", ")", "pairs", "." ]
python
train
refindlyllc/rets
rets/session.py
https://github.com/refindlyllc/rets/blob/c615dfc272cff0825fd3b50863c46afc3e33916f/rets/session.py#L188-L195
def get_lookup_values(self, resource, lookup_name): """ Get possible lookup values for a given field :param resource: The name of the resource :param lookup_name: The name of the the field to get lookup values for :return: list """ return self._make_metadata_request(meta_id=resource + ':' + lookup_name, metadata_type='METADATA-LOOKUP_TYPE')
[ "def", "get_lookup_values", "(", "self", ",", "resource", ",", "lookup_name", ")", ":", "return", "self", ".", "_make_metadata_request", "(", "meta_id", "=", "resource", "+", "':'", "+", "lookup_name", ",", "metadata_type", "=", "'METADATA-LOOKUP_TYPE'", ")" ]
Get possible lookup values for a given field :param resource: The name of the resource :param lookup_name: The name of the the field to get lookup values for :return: list
[ "Get", "possible", "lookup", "values", "for", "a", "given", "field", ":", "param", "resource", ":", "The", "name", "of", "the", "resource", ":", "param", "lookup_name", ":", "The", "name", "of", "the", "the", "field", "to", "get", "lookup", "values", "fo...
python
train
srittau/python-json-get
jsonget/__init__.py
https://github.com/srittau/python-json-get/blob/eb21fa7a4ed7fbd324de1cb3ad73876297e49bf8/jsonget/__init__.py#L282-L304
def json_get_default(json: JsonValue, path: str, default: Any, expected_type: Any = ANY) -> Any: """Get a JSON value by path, optionally checking its type. This works exactly like json_get(), but instead of raising ValueError or IndexError when a path part is not found, return the provided default value: >>> json_get_default({}, "/foo", "I am a default value") 'I am a default value' TypeErrors will be raised as in json_get() if an expected_type is provided: >>> json_get_default({"foo": "bar"}, "/foo", 123, int) Traceback (most recent call last): ... TypeError: wrong JSON type int != str """ try: return json_get(json, path, expected_type) except (ValueError, IndexError): return default
[ "def", "json_get_default", "(", "json", ":", "JsonValue", ",", "path", ":", "str", ",", "default", ":", "Any", ",", "expected_type", ":", "Any", "=", "ANY", ")", "->", "Any", ":", "try", ":", "return", "json_get", "(", "json", ",", "path", ",", "expe...
Get a JSON value by path, optionally checking its type. This works exactly like json_get(), but instead of raising ValueError or IndexError when a path part is not found, return the provided default value: >>> json_get_default({}, "/foo", "I am a default value") 'I am a default value' TypeErrors will be raised as in json_get() if an expected_type is provided: >>> json_get_default({"foo": "bar"}, "/foo", 123, int) Traceback (most recent call last): ... TypeError: wrong JSON type int != str
[ "Get", "a", "JSON", "value", "by", "path", "optionally", "checking", "its", "type", "." ]
python
valid
yyuu/botornado
boto/mturk/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L712-L717
def get_qualification_score(self, qualification_type_id, worker_id): """TODO: Document.""" params = {'QualificationTypeId' : qualification_type_id, 'SubjectId' : worker_id} return self._process_request('GetQualificationScore', params, [('Qualification', Qualification),])
[ "def", "get_qualification_score", "(", "self", ",", "qualification_type_id", ",", "worker_id", ")", ":", "params", "=", "{", "'QualificationTypeId'", ":", "qualification_type_id", ",", "'SubjectId'", ":", "worker_id", "}", "return", "self", ".", "_process_request", ...
TODO: Document.
[ "TODO", ":", "Document", "." ]
python
train
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L702-L736
def subnet_get(name, virtual_network, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get details about a specific subnet. :param name: The name of the subnet to query. :param virtual_network: The virtual network name containing the subnet. :param resource_group: The resource group name assigned to the virtual network. CLI Example: .. code-block:: bash salt-call azurearm_network.subnet_get testsubnet testnet testgroup ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: subnet = netconn.subnets.get( resource_group_name=resource_group, virtual_network_name=virtual_network, subnet_name=name ) result = subnet.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "subnet_get", "(", "name", ",", "virtual_network", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "subnet", "=", ...
.. versionadded:: 2019.2.0 Get details about a specific subnet. :param name: The name of the subnet to query. :param virtual_network: The virtual network name containing the subnet. :param resource_group: The resource group name assigned to the virtual network. CLI Example: .. code-block:: bash salt-call azurearm_network.subnet_get testsubnet testnet testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
ponty/psidialogs
psidialogs/__init__.py
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/__init__.py#L48-L60
def text(text, message='', title=''): """ This function is suitable for displaying general text, which can be longer than in :func:`message` :ref:`screenshots<text>` :param text: (long) text to be displayed :param message: (short) message to be displayed. :param title: window title :rtype: None """ return backend_api.opendialog("text", dict(text=text, message=message, title=title))
[ "def", "text", "(", "text", ",", "message", "=", "''", ",", "title", "=", "''", ")", ":", "return", "backend_api", ".", "opendialog", "(", "\"text\"", ",", "dict", "(", "text", "=", "text", ",", "message", "=", "message", ",", "title", "=", "title", ...
This function is suitable for displaying general text, which can be longer than in :func:`message` :ref:`screenshots<text>` :param text: (long) text to be displayed :param message: (short) message to be displayed. :param title: window title :rtype: None
[ "This", "function", "is", "suitable", "for", "displaying", "general", "text", "which", "can", "be", "longer", "than", "in", ":", "func", ":", "message" ]
python
train
tensorlayer/tensorlayer
tensorlayer/files/utils.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/files/utils.py#L937-L974
def download_file_from_google_drive(ID, destination): """Download file from Google Drive. See ``tl.files.load_celebA_dataset`` for example. Parameters -------------- ID : str The driver ID. destination : str The destination for save file. """ def save_response_content(response, destination, chunk_size=32 * 1024): total_size = int(response.headers.get('content-length', 0)) with open(destination, "wb") as f: for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination): if chunk: # filter out keep-alive new chunks f.write(chunk) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': ID}, stream=True) token = get_confirm_token(response) if token: params = {'id': ID, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
[ "def", "download_file_from_google_drive", "(", "ID", ",", "destination", ")", ":", "def", "save_response_content", "(", "response", ",", "destination", ",", "chunk_size", "=", "32", "*", "1024", ")", ":", "total_size", "=", "int", "(", "response", ".", "header...
Download file from Google Drive. See ``tl.files.load_celebA_dataset`` for example. Parameters -------------- ID : str The driver ID. destination : str The destination for save file.
[ "Download", "file", "from", "Google", "Drive", "." ]
python
valid
agschwender/pilbox
pilbox/image.py
https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/image.py#L182-L206
def resize(self, width, height, **kwargs): """Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping """ opts = Image._normalize_options(kwargs) size = self._get_size(width, height) if opts["mode"] == "adapt": self._adapt(size, opts) elif opts["mode"] == "clip": self._clip(size, opts) elif opts["mode"] == "fill": self._fill(size, opts) elif opts["mode"] == "scale": self._scale(size, opts) else: self._crop(size, opts) return self
[ "def", "resize", "(", "self", ",", "width", ",", "height", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "Image", ".", "_normalize_options", "(", "kwargs", ")", "size", "=", "self", ".", "_get_size", "(", "width", ",", "height", ")", "if", "opts", ...
Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping
[ "Resizes", "the", "image", "to", "the", "supplied", "width", "/", "height", ".", "Returns", "the", "instance", ".", "Supports", "the", "following", "optional", "keyword", "arguments", ":" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/ordering.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/ordering.py#L40-L65
def get_packages_of_type(self, package_types, mask=None): """Get packages that match a certain type. Each ordering package has a type, so return all packages that match the types we are looking for :param list package_types: List of strings representing the package type keynames we are interested in. :param string mask: Mask to specify the properties we want to retrieve """ _filter = { 'type': { 'keyName': { 'operation': 'in', 'options': [ {'name': 'data', 'value': package_types} ], }, }, } packages = self.package_svc.getAllObjects(mask=mask, filter=_filter) packages = self.filter_outlet_packages(packages) return packages
[ "def", "get_packages_of_type", "(", "self", ",", "package_types", ",", "mask", "=", "None", ")", ":", "_filter", "=", "{", "'type'", ":", "{", "'keyName'", ":", "{", "'operation'", ":", "'in'", ",", "'options'", ":", "[", "{", "'name'", ":", "'data'", ...
Get packages that match a certain type. Each ordering package has a type, so return all packages that match the types we are looking for :param list package_types: List of strings representing the package type keynames we are interested in. :param string mask: Mask to specify the properties we want to retrieve
[ "Get", "packages", "that", "match", "a", "certain", "type", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L1626-L1659
def get_module_profile(module, name=None): """ Get or create a profile from a module and return it. If the name `module.profile` is present the value of that is returned. Otherwise, if the name `module.profile_factory` is present, a new profile is created using `module.profile_factory` and then `profile.auto_register` is called with the module namespace. If neither name is defined, the module is not considered a profile-module and None is returned. TODO: describe the `name` argument and better define the signature of `profile_factory`. The `module` argument is expected to behave like a python module. The optional `name` argument is used when `profile_factory` is called to give a name to the default section of the new profile. If name is not present `module.__name__` is the fallback. `profile_factory` is called like this: `profile = module.profile_factory(default_section=default_section)` """ try: # if profile is defined we just use it return module.profile except AttributeError: # > 'module' object has no attribute 'profile' # try to create one on the fly. # e.g. module.__name__ == "fontbakery.profiles.cmap" if 'profile_factory' not in module.__dict__: return None default_section = Section(name or module.__name__) profile = module.profile_factory(default_section=default_section) profile.auto_register(module.__dict__) return profile
[ "def", "get_module_profile", "(", "module", ",", "name", "=", "None", ")", ":", "try", ":", "# if profile is defined we just use it", "return", "module", ".", "profile", "except", "AttributeError", ":", "# > 'module' object has no attribute 'profile'", "# try to create one ...
Get or create a profile from a module and return it. If the name `module.profile` is present the value of that is returned. Otherwise, if the name `module.profile_factory` is present, a new profile is created using `module.profile_factory` and then `profile.auto_register` is called with the module namespace. If neither name is defined, the module is not considered a profile-module and None is returned. TODO: describe the `name` argument and better define the signature of `profile_factory`. The `module` argument is expected to behave like a python module. The optional `name` argument is used when `profile_factory` is called to give a name to the default section of the new profile. If name is not present `module.__name__` is the fallback. `profile_factory` is called like this: `profile = module.profile_factory(default_section=default_section)`
[ "Get", "or", "create", "a", "profile", "from", "a", "module", "and", "return", "it", "." ]
python
train
crdoconnor/strictyaml
hitch/key.py
https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/hitch/key.py#L85-L97
def regressfile(filename): """ Run all stories in filename 'filename' in python 2 and 3. """ _storybook({"rewrite": False}).in_filename(filename).with_params( **{"python version": "2.7.14"} ).filter( lambda story: not story.info.get("fails_on_python_2") ).ordered_by_name().play() _storybook({"rewrite": False}).with_params( **{"python version": "3.7.0"} ).in_filename(filename).ordered_by_name().play()
[ "def", "regressfile", "(", "filename", ")", ":", "_storybook", "(", "{", "\"rewrite\"", ":", "False", "}", ")", ".", "in_filename", "(", "filename", ")", ".", "with_params", "(", "*", "*", "{", "\"python version\"", ":", "\"2.7.14\"", "}", ")", ".", "fil...
Run all stories in filename 'filename' in python 2 and 3.
[ "Run", "all", "stories", "in", "filename", "filename", "in", "python", "2", "and", "3", "." ]
python
train
opennode/waldur-core
waldur_core/quotas/handlers.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/quotas/handlers.py#L35-L43
def init_quotas(sender, instance, created=False, **kwargs): """ Initialize new instances quotas """ if not created: return for field in sender.get_quotas_fields(): try: field.get_or_create_quota(scope=instance) except CreationConditionFailedQuotaError: pass
[ "def", "init_quotas", "(", "sender", ",", "instance", ",", "created", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "created", ":", "return", "for", "field", "in", "sender", ".", "get_quotas_fields", "(", ")", ":", "try", ":", "field", ...
Initialize new instances quotas
[ "Initialize", "new", "instances", "quotas" ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/epub.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/epub.py#L129-L160
def make_epub_base(location): """ Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built """ log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file out.write('application/epub+zip') #Create EPUB and META-INF directorys os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write('''\ <?xml version="1.0" encoding="UTF-8"?> <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
[ "def", "make_epub_base", "(", "location", ")", ":", "log", ".", "info", "(", "'Making EPUB base files in {0}'", ".", "format", "(", "location", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'mimetype'", ")", ",", "...
Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built
[ "Creates", "the", "base", "structure", "for", "an", "EPUB", "file", "in", "a", "specified", "location", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L7447-L7506
def get_assessment_taken_form_for_create(self, assessment_offered_id, assessment_taken_record_types): """Gets the assessment taken form for creating new assessments taken. A new form should be requested for each create transaction. arg: assessment_offered_id (osid.id.Id): the ``Id`` of the related ``AssessmentOffered`` arg: assessment_taken_record_types (osid.type.Type[]): array of assessment taken record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentTakenForm) - the assessment taken form raise: NotFound - ``assessment_offered_id`` is not found raise: NullArgument - ``assessment_offered_id`` or ``assessment_taken_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ if not isinstance(assessment_offered_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in assessment_taken_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') am = self._get_provider_manager('ASSESSMENT') aols = am.get_assessment_offered_lookup_session(proxy=self._proxy) aols.use_federated_bank_view() offered = aols.get_assessment_offered(assessment_offered_id) try: deadline = offered.get_deadline() nowutc = DateTime.utcnow() if nowutc > deadline: raise errors.PermissionDenied('you are passed the deadline for the offered') except errors.IllegalState: # no deadline set pass if assessment_taken_record_types == []: # WHY are we passing bank_id = self._catalog_id below, seems redundant: obj_form = objects.AssessmentTakenForm( bank_id=self._catalog_id, assessment_offered_id=assessment_offered_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.AssessmentTakenForm( bank_id=self._catalog_id, record_types=assessment_taken_record_types, assessment_offered_id=assessment_offered_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_assessment_taken_form_for_create", "(", "self", ",", "assessment_offered_id", ",", "assessment_taken_record_types", ")", ":", "if", "not", "isinstance", "(", "assessment_offered_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "...
Gets the assessment taken form for creating new assessments taken. A new form should be requested for each create transaction. arg: assessment_offered_id (osid.id.Id): the ``Id`` of the related ``AssessmentOffered`` arg: assessment_taken_record_types (osid.type.Type[]): array of assessment taken record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentTakenForm) - the assessment taken form raise: NotFound - ``assessment_offered_id`` is not found raise: NullArgument - ``assessment_offered_id`` or ``assessment_taken_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "assessment", "taken", "form", "for", "creating", "new", "assessments", "taken", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/text/umap_vis.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/text/umap_vis.py#L224-L247
def make_transformer(self, umap_kwargs={}): """ Creates an internal transformer pipeline to project the data set into 2D space using UMAP. This method will reset the transformer on the class. Parameters ---------- Returns ------- transformer : Pipeline Pipelined transformer for UMAP projections """ # Create the pipeline steps steps = [] # Add the UMAP manifold steps.append(('umap', UMAP( n_components=2, random_state=self.random_state, **umap_kwargs))) # return the pipeline return Pipeline(steps)
[ "def", "make_transformer", "(", "self", ",", "umap_kwargs", "=", "{", "}", ")", ":", "# Create the pipeline steps", "steps", "=", "[", "]", "# Add the UMAP manifold", "steps", ".", "append", "(", "(", "'umap'", ",", "UMAP", "(", "n_components", "=", "2", ","...
Creates an internal transformer pipeline to project the data set into 2D space using UMAP. This method will reset the transformer on the class. Parameters ---------- Returns ------- transformer : Pipeline Pipelined transformer for UMAP projections
[ "Creates", "an", "internal", "transformer", "pipeline", "to", "project", "the", "data", "set", "into", "2D", "space", "using", "UMAP", ".", "This", "method", "will", "reset", "the", "transformer", "on", "the", "class", "." ]
python
train
uber/tchannel-python
tchannel/event.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/event.py#L133-L151
def register_hook(self, hook, event_type=None): """ If ``event_type`` is provided, then ``hook`` will be called whenever that event is fired. If no ``event_type`` is specifid, but ``hook`` implements any methods with names matching an event hook, then those will be registered with their corresponding events. This allows for more stateful, class-based event handlers. """ if event_type is not None: assert type(event_type) is int, "register hooks with int values" return self.hooks[event_type].append(hook) for event_type in EventType._fields: func = getattr(hook, event_type, None) if callable(func): event_value = getattr(EventType, event_type) self.register_hook(func, event_value)
[ "def", "register_hook", "(", "self", ",", "hook", ",", "event_type", "=", "None", ")", ":", "if", "event_type", "is", "not", "None", ":", "assert", "type", "(", "event_type", ")", "is", "int", ",", "\"register hooks with int values\"", "return", "self", ".",...
If ``event_type`` is provided, then ``hook`` will be called whenever that event is fired. If no ``event_type`` is specifid, but ``hook`` implements any methods with names matching an event hook, then those will be registered with their corresponding events. This allows for more stateful, class-based event handlers.
[ "If", "event_type", "is", "provided", "then", "hook", "will", "be", "called", "whenever", "that", "event", "is", "fired", "." ]
python
train
sdispater/pytzdata
pytzdata/__init__.py
https://github.com/sdispater/pytzdata/blob/5707a44e425c0ab57cf9d1f6be83528accc31412/pytzdata/__init__.py#L88-L114
def get_timezones(): """ Get the supported timezones. The list will be cached unless you set the "fresh" attribute to True. :param fresh: Whether to get a fresh list or not :type fresh: bool :rtype: tuple """ base_dir = _DIRECTORY zones = () for root, dirs, files in os.walk(base_dir): for basename in files: zone = os.path.join(root, basename) if os.path.isdir(zone): continue zone = os.path.relpath(zone, base_dir) with open(os.path.join(root, basename), 'rb') as fd: if fd.read(4) == b'TZif' and zone not in INVALID_ZONES: zones = zones + (zone,) return tuple(sorted(zones))
[ "def", "get_timezones", "(", ")", ":", "base_dir", "=", "_DIRECTORY", "zones", "=", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "for", "basename", "in", "files", ":", "zone", "=", "os", "....
Get the supported timezones. The list will be cached unless you set the "fresh" attribute to True. :param fresh: Whether to get a fresh list or not :type fresh: bool :rtype: tuple
[ "Get", "the", "supported", "timezones", "." ]
python
train
kristianfoerster/melodist
melodist/util/util.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L145-L221
def get_sun_times(dates, lon, lat, time_zone): """Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours """ df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength']) doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year # Day angle and declination after Bourges (1985): day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346)) declination = np.deg2rad( 0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b) + 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b) - 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b) ) # Equation of time with day angle after Spencer (1971): day_angle_s = 2 * np.pi * (doy - 1) / 365. eq_time = 12. / np.pi * ( 0.000075 + 0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) - 0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s) ) # standard_meridian = time_zone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination) omega_nul = np.arccos(omega_nul_arg) sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time # as an approximation, solar noon is independent of the below mentioned # cases: sunnoon = 12. * (1.) - delta_lat_time - eq_time # $kf 2015-11-13: special case midnight sun and polar night # CASE 1: MIDNIGHT SUN # set sunrise and sunset to values that would yield the maximum day # length even though this a crude assumption pos = omega_nul_arg < -1 sunrise[pos] = sunnoon[pos] - 12 sunset[pos] = sunnoon[pos] + 12 # CASE 2: POLAR NIGHT # set sunrise and sunset to values that would yield the minmum day # length even though this a crude assumption pos = omega_nul_arg > 1 sunrise[pos] = sunnoon[pos] sunset[pos] = sunnoon[pos] daylength = sunset - sunrise # adjust if required sunrise[sunrise < 0] += 24 sunset[sunset > 24] -= 24 df.sunrise = sunrise df.sunnoon = sunnoon df.sunset = sunset df.daylength = daylength return df
[ "def", "get_sun_times", "(", "dates", ",", "lon", ",", "lat", ",", "time_zone", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "dates", ",", "columns", "=", "[", "'sunrise'", ",", "'sunnoon'", ",", "'sunset'", ",", "'daylength'", "]", ...
Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
[ "Computes", "the", "times", "of", "sunrise", "solar", "noon", "and", "sunset", "for", "each", "day", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/queue.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/queue.py#L126-L146
def drain_to(self, list, max_size=-1): """ Transfers all available items to the given `list`_ and removes these items from this queue. If a max_size is specified, it transfers at most the given number of items. In case of a failure, an item can exist in both collections or none of them. This operation may be more efficient than polling elements repeatedly and putting into collection. :param list: (`list`_), the list where the items in this queue will be transferred. :param max_size: (int), the maximum number items to transfer (optional). :return: (int), number of transferred items. .. _list: https://docs.python.org/2/library/functions.html#list """ def drain_result(f): resp = f.result() list.extend(resp) return len(resp) return self._encode_invoke(queue_drain_to_max_size_codec, max_size=max_size).continue_with( drain_result)
[ "def", "drain_to", "(", "self", ",", "list", ",", "max_size", "=", "-", "1", ")", ":", "def", "drain_result", "(", "f", ")", ":", "resp", "=", "f", ".", "result", "(", ")", "list", ".", "extend", "(", "resp", ")", "return", "len", "(", "resp", ...
Transfers all available items to the given `list`_ and removes these items from this queue. If a max_size is specified, it transfers at most the given number of items. In case of a failure, an item can exist in both collections or none of them. This operation may be more efficient than polling elements repeatedly and putting into collection. :param list: (`list`_), the list where the items in this queue will be transferred. :param max_size: (int), the maximum number items to transfer (optional). :return: (int), number of transferred items. .. _list: https://docs.python.org/2/library/functions.html#list
[ "Transfers", "all", "available", "items", "to", "the", "given", "list", "_", "and", "removes", "these", "items", "from", "this", "queue", ".", "If", "a", "max_size", "is", "specified", "it", "transfers", "at", "most", "the", "given", "number", "of", "items...
python
train
dschien/PyExcelModelingHelper
excel_helper/__init__.py
https://github.com/dschien/PyExcelModelingHelper/blob/d00d98ae2f28ad71cfcd2a365c3045e439517df2/excel_helper/__init__.py#L241-L301
def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ assert 'ref value' in self.kwargs # 1. Generate $\mu$ start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() ref_date = self.ref_date mu = self.generate_mu(end_date, ref_date, start_date) # 3. Generate $\sigma$ ## Prepare array with growth values $\sigma$ if self.sample_mean_value: sigma = np.zeros((len(self.times), self.size)) else: if self.kwargs['type'] == 'interp': def get_date(record): return datetime.datetime.strptime(record[0], "%Y-%m-%d") ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date) intial_value = ref_value_[0][1] else: intial_value = self.kwargs['ref value'] variability_ = intial_value * self.kwargs['initial_value_proportional_variation'] logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}') sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size)) ## 4. Prepare growth array for $\alpha_{sigma}$ alpha_sigma = growth_coefficients(start_date, end_date, ref_date, self.kwargs['ef_growth_factor'], 1) ### 5. Prepare DataFrame iterables = [self.times, range(self.size)] index_names = ['time', 'samples'] _multi_index = pd.MultiIndex.from_product(iterables, names=index_names) df = pd.DataFrame(index=_multi_index, dtype=float) from dateutil import relativedelta r = relativedelta.relativedelta(end_date, start_date) months = r.years * 12 + r.months + 1 name = kwargs['name'] ## Apply growth to $\sigma$ and add $\sigma$ to $\mu$ df[name] = ((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel() ## test if df has sub-zero values df_sigma__dropna = df[name].where(df[name] < 0).dropna() if not df_sigma__dropna.empty: logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}") return df[name]
[ "def", "generate_values", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "'ref value'", "in", "self", ".", "kwargs", "# 1. Generate $\\mu$", "start_date", "=", "self", ".", "times", "[", "0", "]", ".", "to_pydatetime", "(", "...
Instantiate a random variable and apply annual growth factors. :return:
[ "Instantiate", "a", "random", "variable", "and", "apply", "annual", "growth", "factors", "." ]
python
train
PaulHancock/Aegean
AegeanTools/msq2.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L45-L92
def step(self, x, y): """ Move from the current location to the next Parameters ---------- x, y : int The current location """ up_left = self.solid(x - 1, y - 1) up_right = self.solid(x, y - 1) down_left = self.solid(x - 1, y) down_right = self.solid(x, y) state = 0 self.prev = self.next # which cells are filled? if up_left: state |= 1 if up_right: state |= 2 if down_left: state |= 4 if down_right: state |= 8 # what is the next step? if state in [1, 5, 13]: self.next = self.UP elif state in [2, 3, 7]: self.next = self.RIGHT elif state in [4, 12, 14]: self.next = self.LEFT elif state in [8, 10, 11]: self.next = self.DOWN elif state == 6: if self.prev == self.UP: self.next = self.LEFT else: self.next = self.RIGHT elif state == 9: if self.prev == self.RIGHT: self.next = self.UP else: self.next = self.DOWN else: self.next = self.NOWHERE return
[ "def", "step", "(", "self", ",", "x", ",", "y", ")", ":", "up_left", "=", "self", ".", "solid", "(", "x", "-", "1", ",", "y", "-", "1", ")", "up_right", "=", "self", ".", "solid", "(", "x", ",", "y", "-", "1", ")", "down_left", "=", "self",...
Move from the current location to the next Parameters ---------- x, y : int The current location
[ "Move", "from", "the", "current", "location", "to", "the", "next" ]
python
train
turicas/rows
rows/plugins/xls.py
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L83-L143
def cell_value(sheet, row, col): """Return the cell value of the table passed by argument, based in row and column.""" cell = sheet.cell(row, col) field_type = CELL_TYPES[cell.ctype] # TODO: this approach will not work if using locale value = cell.value if field_type is None: return None elif field_type is fields.TextField: if cell.ctype != xlrd.XL_CELL_BLANK: return value else: return "" elif field_type is fields.DatetimeField: if value == 0.0: return None try: time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode) except xlrd.xldate.XLDateTooLarge: return None value = field_type.serialize(datetime.datetime(*time_tuple)) return value.split("T00:00:00")[0] elif field_type is fields.BoolField: if value == 0: return False elif value == 1: return True elif cell.xf_index is None: return value # TODO: test else: book = sheet.book xf = book.xf_list[cell.xf_index] fmt = book.format_map[xf.format_key] if fmt.format_str.endswith("%"): # TODO: we may optimize this approach: we're converting to string # and the library is detecting the type when we could just say to # the library this value is PercentField if value is not None: try: decimal_places = len(fmt.format_str[:-1].split(".")[-1]) except IndexError: decimal_places = 2 return "{}%".format(str(round(value * 100, decimal_places))) else: return None elif type(value) == float and int(value) == value: return int(value) else: return value
[ "def", "cell_value", "(", "sheet", ",", "row", ",", "col", ")", ":", "cell", "=", "sheet", ".", "cell", "(", "row", ",", "col", ")", "field_type", "=", "CELL_TYPES", "[", "cell", ".", "ctype", "]", "# TODO: this approach will not work if using locale", "valu...
Return the cell value of the table passed by argument, based in row and column.
[ "Return", "the", "cell", "value", "of", "the", "table", "passed", "by", "argument", "based", "in", "row", "and", "column", "." ]
python
train
pricingassistant/mrq
mrq/queue.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L164-L208
def all_known(cls, sources=None, prefixes=None): """ List all currently known queues """ sources = sources or ("config", "jobs", "raw_subqueues") queues = set() if "config" in sources and not prefixes: # Some queues are explicitly declared in the config (including all root raw queues) cfg = context.get_current_config() queues_from_config = [ t.get("queue") for t in (cfg.get("tasks") or {}).values() if t.get("queue") ] queues_from_config += Queue.get_queues_config().keys() queues_from_config += [ t.get("retry_queue") for t in Queue.get_queues_config().values() if t.get("retry_queue") ] queues |= set(queues_from_config) if "jobs" in sources: # This will get all queues from mongodb, including those where we have only non-queued jobs for q in context.connections.mongodb_jobs.mrq_jobs.distinct("queue"): if prefixes and not any(q.startswith(p) for p in prefixes): continue queues.add(q) if "raw_subqueues" in sources: for q in Queue.get_queues_config(): if prefixes and not any(q + "/" == p for p in prefixes): continue queue_obj = Queue(q) if queue_obj.is_raw and queue_obj.has_subqueues: # TODO: optimize this with a single SUNION on all keys queues |= queue_obj.get_known_subqueues() return queues
[ "def", "all_known", "(", "cls", ",", "sources", "=", "None", ",", "prefixes", "=", "None", ")", ":", "sources", "=", "sources", "or", "(", "\"config\"", ",", "\"jobs\"", ",", "\"raw_subqueues\"", ")", "queues", "=", "set", "(", ")", "if", "\"config\"", ...
List all currently known queues
[ "List", "all", "currently", "known", "queues" ]
python
train
apache/airflow
airflow/api/common/experimental/mark_tasks.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/mark_tasks.py#L57-L184
def set_state(task, execution_date, upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=False, session=None): """ Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from execution_date) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will as for subdag dag runs if needed). :param task: the task from which to work. task.task.dag needs to be set :param execution_date: the execution date from which to start looking :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated """ assert timezone.is_localized(execution_date) assert task.dag is not None dag = task.dag latest_execution_date = dag.latest_execution_date assert latest_execution_date is not None # determine date range of dag runs and tasks to consider end_date = latest_execution_date if future else execution_date if 'start_date' in dag.default_args: start_date = dag.default_args['start_date'] elif dag.start_date: start_date = dag.start_date else: start_date = execution_date start_date = execution_date if not past else start_date if dag.schedule_interval == '@once': dates = [start_date] else: dates = dag.date_range(start_date=start_date, end_date=end_date) # find relatives (siblings = downstream, parents = upstream) if needed task_ids = [task.task_id] if downstream: relatives = task.get_flat_relatives(upstream=False) task_ids += [t.task_id for t in relatives] if upstream: relatives = task.get_flat_relatives(upstream=True) task_ids += [t.task_id for t in relatives] # verify the integrity of the dag runs in case a task was added or removed # set the confirmed execution dates as they might be different # from what was provided confirmed_dates = [] drs = DagRun.find(dag_id=dag.dag_id, execution_date=dates) for dr in drs: dr.dag = dag dr.verify_integrity() confirmed_dates.append(dr.execution_date) # go through subdagoperators and create dag runs. We will only work # within the scope of the subdag. We wont propagate to the parent dag, # but we will propagate from parent to subdag. dags = [dag] sub_dag_ids = [] while len(dags) > 0: current_dag = dags.pop() for task_id in task_ids: if not current_dag.has_task(task_id): continue current_task = current_dag.get_task(task_id) if isinstance(current_task, SubDagOperator): # this works as a kind of integrity check # it creates missing dag runs for subdagoperators, # maybe this should be moved to dagrun.verify_integrity drs = _create_dagruns(current_task.subdag, execution_dates=confirmed_dates, state=State.RUNNING, run_id_template=BackfillJob.ID_FORMAT_PREFIX) for dr in drs: dr.dag = current_task.subdag dr.verify_integrity() if commit: dr.state = state session.merge(dr) dags.append(current_task.subdag) sub_dag_ids.append(current_task.subdag.dag_id) # now look for the task instances that are affected TI = TaskInstance # get all tasks of the main dag that will be affected by a state change qry_dag = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.execution_date.in_(confirmed_dates), TI.task_id.in_(task_ids)).filter( or_(TI.state.is_(None), TI.state != state) ) # get *all* tasks of the sub dags if len(sub_dag_ids) > 0: qry_sub_dag = session.query(TI).filter( TI.dag_id.in_(sub_dag_ids), TI.execution_date.in_(confirmed_dates)).filter( or_(TI.state.is_(None), TI.state != state) ) if commit: tis_altered = qry_dag.with_for_update().all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.with_for_update().all() for ti in tis_altered: ti.state = state else: tis_altered = qry_dag.all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.all() return tis_altered
[ "def", "set_state", "(", "task", ",", "execution_date", ",", "upstream", "=", "False", ",", "downstream", "=", "False", ",", "future", "=", "False", ",", "past", "=", "False", ",", "state", "=", "State", ".", "SUCCESS", ",", "commit", "=", "False", ","...
Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from execution_date) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will as for subdag dag runs if needed). :param task: the task from which to work. task.task.dag needs to be set :param execution_date: the execution date from which to start looking :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated
[ "Set", "the", "state", "of", "a", "task", "instance", "and", "if", "needed", "its", "relatives", ".", "Can", "set", "state", "for", "future", "tasks", "(", "calculated", "from", "execution_date", ")", "and", "retroactively", "for", "past", "tasks", ".", "W...
python
test
openstack/horizon
openstack_dashboard/dashboards/project/security_groups/tables.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/security_groups/tables.py#L105-L109
def filter(self, table, security_groups, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [security_group for security_group in security_groups if query in security_group.name.lower()]
[ "def", "filter", "(", "self", ",", "table", ",", "security_groups", ",", "filter_string", ")", ":", "query", "=", "filter_string", ".", "lower", "(", ")", "return", "[", "security_group", "for", "security_group", "in", "security_groups", "if", "query", "in", ...
Naive case-insensitive search.
[ "Naive", "case", "-", "insensitive", "search", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pytree.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L332-L339
def insert_child(self, i, child): """ Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.insert(i, child) self.changed()
[ "def", "insert_child", "(", "self", ",", "i", ",", "child", ")", ":", "child", ".", "parent", "=", "self", "self", ".", "children", ".", "insert", "(", "i", ",", "child", ")", "self", ".", "changed", "(", ")" ]
Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately.
[ "Equivalent", "to", "node", ".", "children", ".", "insert", "(", "i", "child", ")", ".", "This", "method", "also", "sets", "the", "child", "s", "parent", "attribute", "appropriately", "." ]
python
train
cloudmesh-cmd3/cmd3
cmd3/shell.py
https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/shell.py#L312-L320
def create_file(filename): """ Creates a new file if the file name does not exists :param filename: the name of the file """ expanded_filename = os.path.expanduser(os.path.expandvars(filename)) if not os.path.exists(expanded_filename): open(expanded_filename, "a").close()
[ "def", "create_file", "(", "filename", ")", ":", "expanded_filename", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "filename", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "expanded_filename"...
Creates a new file if the file name does not exists :param filename: the name of the file
[ "Creates", "a", "new", "file", "if", "the", "file", "name", "does", "not", "exists", ":", "param", "filename", ":", "the", "name", "of", "the", "file" ]
python
train
abseil/abseil-py
absl/app.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/app.py#L182-L226
def _register_and_parse_flags_with_usage( argv=None, flags_parser=parse_flags_with_usage, ): """Registers help flags, parses arguments and shows usage if appropriate. This also calls sys.exit(0) if flag --only_check_args is True. Args: argv: [str], a non-empty list of the command line arguments including program name, sys.argv is used if None. flags_parser: Callable[[List[Text]], Any], the function used to parse flags. The return value of this function is passed to `main` untouched. It must guarantee FLAGS is parsed after this function is called. Returns: The return value of `flags_parser`. When using the default `flags_parser`, it returns the following: [str], a non-empty list of remaining command line arguments after parsing flags, including program name. Raises: Error: Raised when flags_parser is called, but FLAGS is not parsed. SystemError: Raised when it's called more than once. """ if _register_and_parse_flags_with_usage.done: raise SystemError('Flag registration can be done only once.') define_help_flags() original_argv = sys.argv if argv is None else argv args_to_main = flags_parser(original_argv) if not FLAGS.is_parsed(): raise Error('FLAGS must be parsed after flags_parser is called.') # Exit when told so. if FLAGS.only_check_args: sys.exit(0) # Immediately after flags are parsed, bump verbosity to INFO if the flag has # not been set. if FLAGS['verbosity'].using_default_value: FLAGS.verbosity = 0 _register_and_parse_flags_with_usage.done = True return args_to_main
[ "def", "_register_and_parse_flags_with_usage", "(", "argv", "=", "None", ",", "flags_parser", "=", "parse_flags_with_usage", ",", ")", ":", "if", "_register_and_parse_flags_with_usage", ".", "done", ":", "raise", "SystemError", "(", "'Flag registration can be done only once...
Registers help flags, parses arguments and shows usage if appropriate. This also calls sys.exit(0) if flag --only_check_args is True. Args: argv: [str], a non-empty list of the command line arguments including program name, sys.argv is used if None. flags_parser: Callable[[List[Text]], Any], the function used to parse flags. The return value of this function is passed to `main` untouched. It must guarantee FLAGS is parsed after this function is called. Returns: The return value of `flags_parser`. When using the default `flags_parser`, it returns the following: [str], a non-empty list of remaining command line arguments after parsing flags, including program name. Raises: Error: Raised when flags_parser is called, but FLAGS is not parsed. SystemError: Raised when it's called more than once.
[ "Registers", "help", "flags", "parses", "arguments", "and", "shows", "usage", "if", "appropriate", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/gridfs/grid_file.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/gridfs/grid_file.py#L553-L578
def seek(self, pos, whence=_SEEK_SET): """Set the current position of this file. :Parameters: - `pos`: the position (or offset if using relative positioning) to seek to - `whence` (optional): where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end. """ if whence == _SEEK_SET: new_pos = pos elif whence == _SEEK_CUR: new_pos = self.__position + pos elif whence == _SEEK_END: new_pos = int(self.length) + pos else: raise IOError(22, "Invalid value for `whence`") if new_pos < 0: raise IOError(22, "Invalid value for `pos` - must be positive") self.__position = new_pos self.__buffer = EMPTY
[ "def", "seek", "(", "self", ",", "pos", ",", "whence", "=", "_SEEK_SET", ")", ":", "if", "whence", "==", "_SEEK_SET", ":", "new_pos", "=", "pos", "elif", "whence", "==", "_SEEK_CUR", ":", "new_pos", "=", "self", ".", "__position", "+", "pos", "elif", ...
Set the current position of this file. :Parameters: - `pos`: the position (or offset if using relative positioning) to seek to - `whence` (optional): where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end.
[ "Set", "the", "current", "position", "of", "this", "file", "." ]
python
train
gholt/swiftly
swiftly/client/client.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/client/client.py#L114-L148
def head_account(self, headers=None, query=None, cdn=False): """ HEADs the account and returns the results. Useful headers returned are: =========================== ================================= x-account-bytes-used Object storage used for the account, in bytes. x-account-container-count The number of containers in the account. x-account-object-count The number of objects in the account. =========================== ================================= Also, any user headers beginning with x-account-meta- are returned. These values can be delayed depending the Swift cluster. :param headers: Additional headers to send with the request. :param query: Set to a dict of query values to send on the query string of the request. :param cdn: If set True, the CDN management interface will be used. :returns: A tuple of (status, reason, headers, contents). :status: is an int for the HTTP status code. :reason: is the str for the HTTP status (ex: "Ok"). :headers: is a dict with all lowercase keys of the HTTP headers; if a header has multiple values, it will be a list. :contents: is the str for the HTTP body. """ return self.request( 'HEAD', '', '', headers, query=query, cdn=cdn)
[ "def", "head_account", "(", "self", ",", "headers", "=", "None", ",", "query", "=", "None", ",", "cdn", "=", "False", ")", ":", "return", "self", ".", "request", "(", "'HEAD'", ",", "''", ",", "''", ",", "headers", ",", "query", "=", "query", ",", ...
HEADs the account and returns the results. Useful headers returned are: =========================== ================================= x-account-bytes-used Object storage used for the account, in bytes. x-account-container-count The number of containers in the account. x-account-object-count The number of objects in the account. =========================== ================================= Also, any user headers beginning with x-account-meta- are returned. These values can be delayed depending the Swift cluster. :param headers: Additional headers to send with the request. :param query: Set to a dict of query values to send on the query string of the request. :param cdn: If set True, the CDN management interface will be used. :returns: A tuple of (status, reason, headers, contents). :status: is an int for the HTTP status code. :reason: is the str for the HTTP status (ex: "Ok"). :headers: is a dict with all lowercase keys of the HTTP headers; if a header has multiple values, it will be a list. :contents: is the str for the HTTP body.
[ "HEADs", "the", "account", "and", "returns", "the", "results", ".", "Useful", "headers", "returned", "are", ":" ]
python
test
cimm-kzn/CGRtools
CGRtools/algorithms/isomorphism.py
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/isomorphism.py#L56-L62
def get_mapping(self, other): """ get self to other mapping """ m = next(self._matcher(other).isomorphisms_iter(), None) if m: return {v: k for k, v in m.items()}
[ "def", "get_mapping", "(", "self", ",", "other", ")", ":", "m", "=", "next", "(", "self", ".", "_matcher", "(", "other", ")", ".", "isomorphisms_iter", "(", ")", ",", "None", ")", "if", "m", ":", "return", "{", "v", ":", "k", "for", "k", ",", "...
get self to other mapping
[ "get", "self", "to", "other", "mapping" ]
python
train
ewels/MultiQC
multiqc/modules/theta2/theta2.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/theta2/theta2.py#L75-L99
def theta2_purities_chart (self): """ Make the plot showing alignment rates """ # Specify the order of the different possible categories keys = OrderedDict() keys['proportion_germline'] = { 'name': 'Germline' } keys['proportion_tumour_1'] = { 'name': 'Tumour Subclone 1' } keys['proportion_tumour_2'] = { 'name': 'Tumour Subclone 2' } keys['proportion_tumour_3'] = { 'name': 'Tumour Subclone 3' } keys['proportion_tumour_4'] = { 'name': 'Tumour Subclone 4' } keys['proportion_tumour_5'] = { 'name': 'Tumour Subclone 5' } keys['proportion_tumour_gt5'] = { 'name': 'Tumour Subclones > 5' } # Config for the plot pconfig = { 'id': 'theta2_purity_plot', 'title': 'THetA2: Tumour Subclone Purities', 'cpswitch': False, 'ymin': 0, 'ymax': 100, 'ylab': '% Purity', 'tt_suffix': '%' } return bargraph.plot(self.theta2_data, keys, pconfig)
[ "def", "theta2_purities_chart", "(", "self", ")", ":", "# Specify the order of the different possible categories", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'proportion_germline'", "]", "=", "{", "'name'", ":", "'Germline'", "}", "keys", "[", "'proportion_tu...
Make the plot showing alignment rates
[ "Make", "the", "plot", "showing", "alignment", "rates" ]
python
train
openstack/horizon
horizon/utils/functions.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/utils/functions.py#L96-L100
def save_config_value(request, response, key, value): """Sets value of key `key` to `value` in both session and cookies.""" request.session[key] = value response.set_cookie(key, value, expires=one_year_from_now()) return response
[ "def", "save_config_value", "(", "request", ",", "response", ",", "key", ",", "value", ")", ":", "request", ".", "session", "[", "key", "]", "=", "value", "response", ".", "set_cookie", "(", "key", ",", "value", ",", "expires", "=", "one_year_from_now", ...
Sets value of key `key` to `value` in both session and cookies.
[ "Sets", "value", "of", "key", "key", "to", "value", "in", "both", "session", "and", "cookies", "." ]
python
train
chrisjrn/registrasion
registrasion/reporting/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/reporting/views.py#L414-L432
def credit_notes(request, form): ''' Shows all of the credit notes in the system. ''' notes = commerce.CreditNote.objects.all().select_related( "creditnoterefund", "creditnoteapplication", "invoice", "invoice__user__attendee__attendeeprofilebase", ) return QuerysetReport( "Credit Notes", ["id", "invoice__user__attendee__attendeeprofilebase__invoice_recipient", "status", "value"], notes, headings=["id", "Owner", "Status", "Value"], link_view=views.credit_note, )
[ "def", "credit_notes", "(", "request", ",", "form", ")", ":", "notes", "=", "commerce", ".", "CreditNote", ".", "objects", ".", "all", "(", ")", ".", "select_related", "(", "\"creditnoterefund\"", ",", "\"creditnoteapplication\"", ",", "\"invoice\"", ",", "\"i...
Shows all of the credit notes in the system.
[ "Shows", "all", "of", "the", "credit", "notes", "in", "the", "system", "." ]
python
test
splunk/splunk-sdk-python
splunklib/client.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L3590-L3602
def update_field(self, name, value): """Changes the definition of a KV Store field. :param name: name of field to change :type name: ``string`` :param value: new field definition :type value: ``string`` :return: Result of POST request """ kwargs = {} kwargs['field.' + name] = value return self.post(**kwargs)
[ "def", "update_field", "(", "self", ",", "name", ",", "value", ")", ":", "kwargs", "=", "{", "}", "kwargs", "[", "'field.'", "+", "name", "]", "=", "value", "return", "self", ".", "post", "(", "*", "*", "kwargs", ")" ]
Changes the definition of a KV Store field. :param name: name of field to change :type name: ``string`` :param value: new field definition :type value: ``string`` :return: Result of POST request
[ "Changes", "the", "definition", "of", "a", "KV", "Store", "field", "." ]
python
train
realestate-com-au/dashmat
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/client.py#L1883-L1914
def attach(self, host=None, source=None, sourcetype=None): """Opens a stream (a writable socket) for writing events to the index. :param host: The host value for events written to the stream. :type host: ``string`` :param source: The source value for events written to the stream. :type source: ``string`` :param sourcetype: The sourcetype value for events written to the stream. :type sourcetype: ``string`` :return: A writable socket. """ args = { 'index': self.name } if host is not None: args['host'] = host if source is not None: args['source'] = source if sourcetype is not None: args['sourcetype'] = sourcetype path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.urlencode(args), skip_encode=True) # Since we need to stream to the index connection, we have to keep # the connection open and use the Splunk extension headers to note # the input mode sock = self.service.connect() headers = ["POST %s HTTP/1.1\r\n" % self.service._abspath(path), "Host: %s:%s\r\n" % (self.service.host, int(self.service.port)), "Accept-Encoding: identity\r\n", "Authorization: %s\r\n" % self.service.token, "X-Splunk-Input-Mode: Streaming\r\n", "\r\n"] for h in headers: sock.write(h) return sock
[ "def", "attach", "(", "self", ",", "host", "=", "None", ",", "source", "=", "None", ",", "sourcetype", "=", "None", ")", ":", "args", "=", "{", "'index'", ":", "self", ".", "name", "}", "if", "host", "is", "not", "None", ":", "args", "[", "'host'...
Opens a stream (a writable socket) for writing events to the index. :param host: The host value for events written to the stream. :type host: ``string`` :param source: The source value for events written to the stream. :type source: ``string`` :param sourcetype: The sourcetype value for events written to the stream. :type sourcetype: ``string`` :return: A writable socket.
[ "Opens", "a", "stream", "(", "a", "writable", "socket", ")", "for", "writing", "events", "to", "the", "index", "." ]
python
train
codelv/enaml-native
src/enamlnative/widgets/scroll_view.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/widgets/scroll_view.py#L65-L72
def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ if change['type'] in ['event', 'update'] and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value'])
[ "def", "_update_proxy", "(", "self", ",", "change", ")", ":", "if", "change", "[", "'type'", "]", "in", "[", "'event'", ",", "'update'", "]", "and", "self", ".", "proxy_is_active", ":", "handler", "=", "getattr", "(", "self", ".", "proxy", ",", "'set_'...
An observer which sends the state change to the proxy.
[ "An", "observer", "which", "sends", "the", "state", "change", "to", "the", "proxy", "." ]
python
train
gbiggs/rtctree
rtctree/manager.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L236-L248
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
[ "def", "unload_module", "(", "self", ",", "path", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_obj", ".", "unload_module", "(", "path", ")", "!=", "RTC", ".", "RTC_OK", ":", "raise", "FailedToUnloadModuleError", "(", "path", ")" ]
Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError
[ "Unload", "a", "loaded", "shared", "library", "." ]
python
train
nickoala/telepot
telepot/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L789-L792
def deleteChatPhoto(self, chat_id): """ See: https://core.telegram.org/bots/api#deletechatphoto """ p = _strip(locals()) return self._api_request('deleteChatPhoto', _rectify(p))
[ "def", "deleteChatPhoto", "(", "self", ",", "chat_id", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ")", "return", "self", ".", "_api_request", "(", "'deleteChatPhoto'", ",", "_rectify", "(", "p", ")", ")" ]
See: https://core.telegram.org/bots/api#deletechatphoto
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#deletechatphoto" ]
python
train
useblocks/groundwork
groundwork/patterns/gw_base_pattern.py
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L148-L154
def _post_activate_injection(self): """ Injects functions after the activation routine of child classes got called :return: None """ self.active = True self.app.signals.send("plugin_activate_post", self)
[ "def", "_post_activate_injection", "(", "self", ")", ":", "self", ".", "active", "=", "True", "self", ".", "app", ".", "signals", ".", "send", "(", "\"plugin_activate_post\"", ",", "self", ")" ]
Injects functions after the activation routine of child classes got called :return: None
[ "Injects", "functions", "after", "the", "activation", "routine", "of", "child", "classes", "got", "called", ":", "return", ":", "None" ]
python
train
flatangle/flatlib
flatlib/predictives/primarydirections.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/predictives/primarydirections.py#L322-L328
def bySignificator(self, ID): """ Returns all directions to a significator. """ res = [] for direction in self.table: if ID in direction[2]: res.append(direction) return res
[ "def", "bySignificator", "(", "self", ",", "ID", ")", ":", "res", "=", "[", "]", "for", "direction", "in", "self", ".", "table", ":", "if", "ID", "in", "direction", "[", "2", "]", ":", "res", ".", "append", "(", "direction", ")", "return", "res" ]
Returns all directions to a significator.
[ "Returns", "all", "directions", "to", "a", "significator", "." ]
python
train
pybel/pybel-tools
src/pybel_tools/summary/node_properties.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/node_properties.py#L80-L89
def get_causal_central_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]: """Return a set of all nodes that have both an in-degree > 0 and out-degree > 0. This means that they are an integral part of a pathway, since they are both produced and consumed. """ return { node for node in graph if node.function == func and is_causal_central(graph, node) }
[ "def", "get_causal_central_nodes", "(", "graph", ":", "BELGraph", ",", "func", ":", "str", ")", "->", "Set", "[", "BaseEntity", "]", ":", "return", "{", "node", "for", "node", "in", "graph", "if", "node", ".", "function", "==", "func", "and", "is_causal_...
Return a set of all nodes that have both an in-degree > 0 and out-degree > 0. This means that they are an integral part of a pathway, since they are both produced and consumed.
[ "Return", "a", "set", "of", "all", "nodes", "that", "have", "both", "an", "in", "-", "degree", ">", "0", "and", "out", "-", "degree", ">", "0", "." ]
python
valid
saltstack/salt
salt/config/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/config/__init__.py#L2560-L2581
def apply_sdb(opts, sdb_opts=None): ''' Recurse for sdb:// links for opts ''' # Late load of SDB to keep CLI light import salt.utils.sdb if sdb_opts is None: sdb_opts = opts if isinstance(sdb_opts, six.string_types) and sdb_opts.startswith('sdb://'): return salt.utils.sdb.sdb_get(sdb_opts, opts) elif isinstance(sdb_opts, dict): for key, value in six.iteritems(sdb_opts): if value is None: continue sdb_opts[key] = apply_sdb(opts, value) elif isinstance(sdb_opts, list): for key, value in enumerate(sdb_opts): if value is None: continue sdb_opts[key] = apply_sdb(opts, value) return sdb_opts
[ "def", "apply_sdb", "(", "opts", ",", "sdb_opts", "=", "None", ")", ":", "# Late load of SDB to keep CLI light", "import", "salt", ".", "utils", ".", "sdb", "if", "sdb_opts", "is", "None", ":", "sdb_opts", "=", "opts", "if", "isinstance", "(", "sdb_opts", ",...
Recurse for sdb:// links for opts
[ "Recurse", "for", "sdb", ":", "//", "links", "for", "opts" ]
python
train
innogames/polysh
polysh/remote_dispatcher.py
https://github.com/innogames/polysh/blob/fbea36f3bc9f47a62d72040c48dad1776124dae3/polysh/remote_dispatcher.py#L367-L376
def rename(self, name): """Send to the remote shell, its new name to be shell expanded""" if name: # defug callback add? rename1, rename2 = callbacks.add( b'rename', self.change_name, False) self.dispatch_command(b'/bin/echo "' + rename1 + b'""' + rename2 + b'"' + name + b'\n') else: self.change_name(self.hostname.encode())
[ "def", "rename", "(", "self", ",", "name", ")", ":", "if", "name", ":", "# defug callback add?", "rename1", ",", "rename2", "=", "callbacks", ".", "add", "(", "b'rename'", ",", "self", ".", "change_name", ",", "False", ")", "self", ".", "dispatch_command",...
Send to the remote shell, its new name to be shell expanded
[ "Send", "to", "the", "remote", "shell", "its", "new", "name", "to", "be", "shell", "expanded" ]
python
train
peterdemin/pip-compile-multi
pipcompilemulti/environment.py
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L103-L115
def fix_lockfile(self): """Run each line of outfile through fix_pin""" with open(self.outfile, 'rt') as fp: lines = [ self.fix_pin(line) for line in self.concatenated(fp) ] with open(self.outfile, 'wt') as fp: fp.writelines([ line + '\n' for line in lines if line is not None ])
[ "def", "fix_lockfile", "(", "self", ")", ":", "with", "open", "(", "self", ".", "outfile", ",", "'rt'", ")", "as", "fp", ":", "lines", "=", "[", "self", ".", "fix_pin", "(", "line", ")", "for", "line", "in", "self", ".", "concatenated", "(", "fp", ...
Run each line of outfile through fix_pin
[ "Run", "each", "line", "of", "outfile", "through", "fix_pin" ]
python
train
gmr/tredis
tredis/sets.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L253-L285
def srandmember(self, key, count=None): """When called with just the key argument, return a random element from the set value stored at key. Starting from Redis version 2.6, when called with the additional count argument, return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. In this case the number of returned elements is the absolute value of the specified count. When called with just the key argument, the operation is similar to :meth:`~tredis.RedisClient.spop`, however while :meth:`~tredis.RedisClient.spop` also removes the randomly selected element from the set, :meth:`~tredis.RedisClient.srandmember` will just return a random element without altering the original set in any way. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'SRANDMEMBER', key] if count: command.append(ascii(count).encode('ascii')) return self._execute(command)
[ "def", "srandmember", "(", "self", ",", "key", ",", "count", "=", "None", ")", ":", "command", "=", "[", "b'SRANDMEMBER'", ",", "key", "]", "if", "count", ":", "command", ".", "append", "(", "ascii", "(", "count", ")", ".", "encode", "(", "'ascii'", ...
When called with just the key argument, return a random element from the set value stored at key. Starting from Redis version 2.6, when called with the additional count argument, return an array of count distinct elements if count is positive. If called with a negative count the behavior changes and the command is allowed to return the same element multiple times. In this case the number of returned elements is the absolute value of the specified count. When called with just the key argument, the operation is similar to :meth:`~tredis.RedisClient.spop`, however while :meth:`~tredis.RedisClient.spop` also removes the randomly selected element from the set, :meth:`~tredis.RedisClient.srandmember` will just return a random element without altering the original set in any way. .. note:: **Time complexity**: Without the count argument ``O(1)``, otherwise ``O(N)`` where ``N`` is the absolute value of the passed count. :param key: The key to get one or more random members from :type key: :class:`str`, :class:`bytes` :param int count: The number of members to return :rtype: bytes, list :raises: :exc:`~tredis.exceptions.RedisError`
[ "When", "called", "with", "just", "the", "key", "argument", "return", "a", "random", "element", "from", "the", "set", "value", "stored", "at", "key", "." ]
python
train
chrisspen/burlap
burlap/rpi.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/rpi.py#L170-L218
def init_ubuntu_disk(self, yes=0): """ Downloads the latest Ubuntu image and writes it to a microSD card. Based on the instructions from: https://wiki.ubuntu.com/ARM/RaspberryPi For recommended SD card brands, see: http://elinux.org/RPi_SD_cards Note, if you get an error like: Kernel panic-not syncing: VFS: unable to mount root fs that means the SD card is corrupted. Try re-imaging the card or use a different card. """ self.assume_localhost() yes = int(yes) if not self.dryrun: device_question = 'SD card present at %s? ' % self.env.sd_device inp = raw_input(device_question).strip() print('inp:', inp) if not yes and inp and not inp.lower().startswith('y'): return r = self.local_renderer # Confirm SD card is present. r.local('ls {sd_device}') # Download image. r.env.ubuntu_image_fn = os.path.abspath(os.path.split(self.env.ubuntu_download_url)[-1]) r.local('[ ! -f {ubuntu_image_fn} ] && wget {ubuntu_download_url} || true') # Ensure SD card is unmounted. with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir}" ] && umount {sd_media_mount_dir}') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir2}" ] && umount {sd_media_mount_dir2}') r.pc('Writing the image onto the card.') r.sudo('xzcat {ubuntu_image_fn} | dd bs=4M of={sd_device}') # Flush all writes to disk. r.run('sync')
[ "def", "init_ubuntu_disk", "(", "self", ",", "yes", "=", "0", ")", ":", "self", ".", "assume_localhost", "(", ")", "yes", "=", "int", "(", "yes", ")", "if", "not", "self", ".", "dryrun", ":", "device_question", "=", "'SD card present at %s? '", "%", "sel...
Downloads the latest Ubuntu image and writes it to a microSD card. Based on the instructions from: https://wiki.ubuntu.com/ARM/RaspberryPi For recommended SD card brands, see: http://elinux.org/RPi_SD_cards Note, if you get an error like: Kernel panic-not syncing: VFS: unable to mount root fs that means the SD card is corrupted. Try re-imaging the card or use a different card.
[ "Downloads", "the", "latest", "Ubuntu", "image", "and", "writes", "it", "to", "a", "microSD", "card", "." ]
python
valid
wonambi-python/wonambi
wonambi/widgets/detect_dialogs.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/detect_dialogs.py#L465-L476
def count_channels(self): """If more than one channel selected, activate merge checkbox.""" merge = self.index['merge'] if len(self.idx_chan.selectedItems()) > 1: if merge.isEnabled(): return else: merge.setEnabled(True) else: self.index['merge'].setCheckState(Qt.Unchecked) self.index['merge'].setEnabled(False)
[ "def", "count_channels", "(", "self", ")", ":", "merge", "=", "self", ".", "index", "[", "'merge'", "]", "if", "len", "(", "self", ".", "idx_chan", ".", "selectedItems", "(", ")", ")", ">", "1", ":", "if", "merge", ".", "isEnabled", "(", ")", ":", ...
If more than one channel selected, activate merge checkbox.
[ "If", "more", "than", "one", "channel", "selected", "activate", "merge", "checkbox", "." ]
python
train
nephila/python-taiga
taiga/models/models.py
https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L91-L104
def create(self, project, name, **attrs): """ Create a new :class:`CustomAttribute`. :param project: :class:`Project` id :param name: name of the custom attribute :param attrs: optional attributes of the custom attributes """ attrs.update( { 'project': project, 'name': name } ) return self._new_resource(payload=attrs)
[ "def", "create", "(", "self", ",", "project", ",", "name", ",", "*", "*", "attrs", ")", ":", "attrs", ".", "update", "(", "{", "'project'", ":", "project", ",", "'name'", ":", "name", "}", ")", "return", "self", ".", "_new_resource", "(", "payload", ...
Create a new :class:`CustomAttribute`. :param project: :class:`Project` id :param name: name of the custom attribute :param attrs: optional attributes of the custom attributes
[ "Create", "a", "new", ":", "class", ":", "CustomAttribute", "." ]
python
train
estnltk/estnltk
estnltk/mw_verbs/basic_verbchain_detection.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L133-L161
def _canFormAraPhrase( araVerb, otherVerb ): ''' Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga; Arvestab järgimisi ühilduvusi: ains 2. pööre: ära_neg.o + V_o ains 3. pööre: ära_neg.gu + V_gu mitm 1. pööre: ära_neg.me + V_me ära_neg.me + V_o ära_neg.gem + V_gem mitm 2. pööre: ära_neg.ge + V_ge mitm 3. pööre: ära_neg.gu + V_gu passiiv: ära_neg.gu + V_tagu Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi. Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list (millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile); Indeksite listid on sellised, nagu neid leitakse meetodi wordtemplate.matchingAnalyseIndexes(token) abil; ''' global _verbAraAgreements for i in range(0, len(_verbAraAgreements), 2): araVerbTemplate = _verbAraAgreements[i] otherVerbTemplate = _verbAraAgreements[i+1] matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb) if matchingAraAnalyses: matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(otherVerb) if matchingVerbAnalyses: return [matchingAraAnalyses, matchingVerbAnalyses] return []
[ "def", "_canFormAraPhrase", "(", "araVerb", ",", "otherVerb", ")", ":", "global", "_verbAraAgreements", "for", "i", "in", "range", "(", "0", ",", "len", "(", "_verbAraAgreements", ")", ",", "2", ")", ":", "araVerbTemplate", "=", "_verbAraAgreements", "[", "i...
Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga; Arvestab järgimisi ühilduvusi: ains 2. pööre: ära_neg.o + V_o ains 3. pööre: ära_neg.gu + V_gu mitm 1. pööre: ära_neg.me + V_me ära_neg.me + V_o ära_neg.gem + V_gem mitm 2. pööre: ära_neg.ge + V_ge mitm 3. pööre: ära_neg.gu + V_gu passiiv: ära_neg.gu + V_tagu Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi. Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list (millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile); Indeksite listid on sellised, nagu neid leitakse meetodi wordtemplate.matchingAnalyseIndexes(token) abil;
[ "Teeb", "kindlaks", "kas", "etteantud", "ära", "verb", "(", "araVerb", ")", "yhildub", "teise", "verbiga", ";", "Arvestab", "järgimisi", "ühilduvusi", ":", "ains", "2", ".", "pööre", ":", "ära_neg", ".", "o", "+", "V_o", "ains", "3", ".", "pööre", ":", ...
python
train
NASA-AMMOS/AIT-Core
ait/core/server/server.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/server.py#L240-L292
def _create_plugin(self, config): """ Creates a plugin from its config. Params: config: plugin configuration as read by ait.config Returns: plugin: a Plugin Raises: ValueError: if any of the required config values are missing """ if config is None: raise ValueError('No plugin config to create plugin from.') name = config.pop('name', None) if name is None: raise(cfg.AitConfigMissing('plugin name')) # TODO I don't think we actually care about this being unique? Left over from # previous conversations about stuff? module_name = name.rsplit('.', 1)[0] class_name = name.rsplit('.', 1)[-1] if class_name in [x.name for x in (self.outbound_streams + self.inbound_streams + self.servers + self.plugins)]: raise ValueError( 'Plugin "{}" already loaded. Only one plugin of a given name is allowed'. format(class_name) ) plugin_inputs = config.pop('inputs', None) if plugin_inputs is None: log.warn('No plugin inputs specified for {}'.format(name)) plugin_inputs = [ ] subscribers = config.pop('outputs', None) if subscribers is None: log.warn('No plugin outputs specified for {}'.format(name)) subscribers = [ ] # try to create plugin module = import_module(module_name) plugin_class = getattr(module, class_name) instance = plugin_class(plugin_inputs, subscribers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}, **config ) return instance
[ "def", "_create_plugin", "(", "self", ",", "config", ")", ":", "if", "config", "is", "None", ":", "raise", "ValueError", "(", "'No plugin config to create plugin from.'", ")", "name", "=", "config", ".", "pop", "(", "'name'", ",", "None", ")", "if", "name", ...
Creates a plugin from its config. Params: config: plugin configuration as read by ait.config Returns: plugin: a Plugin Raises: ValueError: if any of the required config values are missing
[ "Creates", "a", "plugin", "from", "its", "config", "." ]
python
train
ska-sa/katversion
katversion/version.py
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L248-L321
def get_version(path=None, module=None): """Return the version string. This function ensures that the version string complies with PEP 440. The format of our version string is: - for RELEASE builds: <major>.<minor> e.g. 0.1 2.4 - for DEVELOPMENT builds: <major>.<minor>.dev<num_branch_commits> \ +<branch_name>.g<short_git_sha>[.dirty] e.g. 1.1.dev34+new.shiny.feature.gfa973da 0.1.dev7+master.gb91ffa6.dirty - for UNKNOWN builds: 0.0+unknown.[<scm_type>.]<date> e.g. 0.0+unknown.svn.201402031023 0.0+unknown.201602081715 The <major>.<minor> substring for development builds will be that of the NEXT (minor) release, in order to allow proper Python version ordering. Parameters ---------- path : None or string, optional A file or directory to use to find the SCM or sdist checkout path (default is the current working directory) module : None or string, optional Get version via module name (e.g. __name__ variable), which takes precedence over path if provided (ignore otherwise) Returns ------- version: string A string representation of the package version """ # Check the module option first. version = get_version_from_module(module) if version: return normalised(version) # Turn path into a valid directory (default is current directory) if path is None: path = os.getcwd() path = os.path.abspath(path) if os.path.exists(path) and not os.path.isdir(path): path = os.path.dirname(path) if not os.path.isdir(path): raise ValueError('No such package source directory: %r' % (path,)) # Check for an sdist in the process of being installed by pip. version = get_version_from_unpacked_sdist(path) if version: return normalised(version) # Check the SCM. scm, version = get_version_from_scm(path) if version: return normalised(version) # Check if there is a katversion file in the given path. version = get_version_from_file(path) if version: return normalised(version) # None of the above got a version so we will make one up based on the date. return normalised(date_version(scm))
[ "def", "get_version", "(", "path", "=", "None", ",", "module", "=", "None", ")", ":", "# Check the module option first.", "version", "=", "get_version_from_module", "(", "module", ")", "if", "version", ":", "return", "normalised", "(", "version", ")", "# Turn pa...
Return the version string. This function ensures that the version string complies with PEP 440. The format of our version string is: - for RELEASE builds: <major>.<minor> e.g. 0.1 2.4 - for DEVELOPMENT builds: <major>.<minor>.dev<num_branch_commits> \ +<branch_name>.g<short_git_sha>[.dirty] e.g. 1.1.dev34+new.shiny.feature.gfa973da 0.1.dev7+master.gb91ffa6.dirty - for UNKNOWN builds: 0.0+unknown.[<scm_type>.]<date> e.g. 0.0+unknown.svn.201402031023 0.0+unknown.201602081715 The <major>.<minor> substring for development builds will be that of the NEXT (minor) release, in order to allow proper Python version ordering. Parameters ---------- path : None or string, optional A file or directory to use to find the SCM or sdist checkout path (default is the current working directory) module : None or string, optional Get version via module name (e.g. __name__ variable), which takes precedence over path if provided (ignore otherwise) Returns ------- version: string A string representation of the package version
[ "Return", "the", "version", "string", "." ]
python
train
audreyr/jinja2_pluralize
jinja2_pluralize/__init__.py
https://github.com/audreyr/jinja2_pluralize/blob/e94770417ff65c71461980ec26c5a1b4e4212ca5/jinja2_pluralize/__init__.py#L18-L62
def pluralize_dj(value, arg='s'): """ Adapted from django.template.defaultfilters: https://github.com/django/django/blob/master/django/template/defaultfilters.py Returns a plural suffix if the value is not 1. By default, 's' is used as the suffix: * If value is 0, vote{{ value|pluralize }} displays "0 votes". * If value is 1, vote{{ value|pluralize }} displays "1 vote". * If value is 2, vote{{ value|pluralize }} displays "2 votes". If an argument is provided, that string is used instead: * If value is 0, class{{ value|pluralize:"es" }} displays "0 classes". * If value is 1, class{{ value|pluralize:"es" }} displays "1 class". * If value is 2, class{{ value|pluralize:"es" }} displays "2 classes". If the provided argument contains a comma, the text before the comma is used for the singular case and the text after the comma is used for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies". """ if ',' not in arg: arg = ',' + arg bits = arg.split(',') if len(bits) > 2: return '' singular_suffix, plural_suffix = bits[:2] try: if int(value) != 1: return plural_suffix except ValueError: # Invalid string that's not a number. pass except TypeError: # Value isn't a string or a number; maybe it's a list? try: if len(value) != 1: return plural_suffix except TypeError: # len() of unsized object. pass return singular_suffix
[ "def", "pluralize_dj", "(", "value", ",", "arg", "=", "'s'", ")", ":", "if", "','", "not", "in", "arg", ":", "arg", "=", "','", "+", "arg", "bits", "=", "arg", ".", "split", "(", "','", ")", "if", "len", "(", "bits", ")", ">", "2", ":", "retu...
Adapted from django.template.defaultfilters: https://github.com/django/django/blob/master/django/template/defaultfilters.py Returns a plural suffix if the value is not 1. By default, 's' is used as the suffix: * If value is 0, vote{{ value|pluralize }} displays "0 votes". * If value is 1, vote{{ value|pluralize }} displays "1 vote". * If value is 2, vote{{ value|pluralize }} displays "2 votes". If an argument is provided, that string is used instead: * If value is 0, class{{ value|pluralize:"es" }} displays "0 classes". * If value is 1, class{{ value|pluralize:"es" }} displays "1 class". * If value is 2, class{{ value|pluralize:"es" }} displays "2 classes". If the provided argument contains a comma, the text before the comma is used for the singular case and the text after the comma is used for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
[ "Adapted", "from", "django", ".", "template", ".", "defaultfilters", ":", "https", ":", "//", "github", ".", "com", "/", "django", "/", "django", "/", "blob", "/", "master", "/", "django", "/", "template", "/", "defaultfilters", ".", "py" ]
python
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L147-L152
def to_json(self) -> dict: '''export the Deck object to json-ready format''' d = self.__dict__ d['p2th_wif'] = self.p2th_wif return d
[ "def", "to_json", "(", "self", ")", "->", "dict", ":", "d", "=", "self", ".", "__dict__", "d", "[", "'p2th_wif'", "]", "=", "self", ".", "p2th_wif", "return", "d" ]
export the Deck object to json-ready format
[ "export", "the", "Deck", "object", "to", "json", "-", "ready", "format" ]
python
train
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L239-L259
def get_non_compulsory_fields(layer_purpose, layer_subcategory=None): """Get non compulsory field based on layer_purpose and layer_subcategory. Used for get field in InaSAFE Fields step in wizard. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: Compulsory fields :rtype: list """ all_fields = get_fields( layer_purpose, layer_subcategory, replace_null=False) compulsory_field = get_compulsory_fields( layer_purpose, layer_subcategory) if compulsory_field in all_fields: all_fields.remove(compulsory_field) return all_fields
[ "def", "get_non_compulsory_fields", "(", "layer_purpose", ",", "layer_subcategory", "=", "None", ")", ":", "all_fields", "=", "get_fields", "(", "layer_purpose", ",", "layer_subcategory", ",", "replace_null", "=", "False", ")", "compulsory_field", "=", "get_compulsory...
Get non compulsory field based on layer_purpose and layer_subcategory. Used for get field in InaSAFE Fields step in wizard. :param layer_purpose: The layer purpose. :type layer_purpose: str :param layer_subcategory: Exposure or hazard value. :type layer_subcategory: str :returns: Compulsory fields :rtype: list
[ "Get", "non", "compulsory", "field", "based", "on", "layer_purpose", "and", "layer_subcategory", "." ]
python
train
gwastro/pycbc
pycbc/psd/estimate.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/estimate.py#L57-L185
def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann', avg_method='median', num_segments=None, require_exact_data_fit=False): """PSD estimator based on Welch's method. Parameters ---------- timeseries : TimeSeries Time series for which the PSD is to be estimated. seg_len : int Segment length in samples. seg_stride : int Separation between consecutive segments, in samples. window : {'hann', numpy.ndarray} Function used to window segments before Fourier transforming, or a `numpy.ndarray` that specifies the window. avg_method : {'median', 'mean', 'median-mean'} Method used for averaging individual segment PSDs. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD. Raises ------ ValueError For invalid choices of `seg_len`, `seg_stride` `window` and `avg_method` and for inconsistent combinations of len(`timeseries`), `seg_len` and `seg_stride`. Notes ----- See arXiv:gr-qc/0509116 for details. """ window_map = { 'hann': numpy.hanning } # sanity checks if isinstance(window, numpy.ndarray) and window.size != seg_len: raise ValueError('Invalid window: incorrect window length') if not isinstance(window, numpy.ndarray) and window not in window_map: raise ValueError('Invalid window: unknown window {!r}'.format(window)) if avg_method not in ('mean', 'median', 'median-mean'): raise ValueError('Invalid averaging method') if type(seg_len) is not int or type(seg_stride) is not int \ or seg_len <= 0 or seg_stride <= 0: raise ValueError('Segment length and stride must be positive integers') if timeseries.precision == 'single': fs_dtype = numpy.complex64 elif timeseries.precision == 'double': fs_dtype = numpy.complex128 num_samples = len(timeseries) if num_segments is None: num_segments = int(num_samples // seg_stride) # NOTE: Is this not always true? if (num_segments - 1) * seg_stride + seg_len > num_samples: num_segments -= 1 if not require_exact_data_fit: data_len = (num_segments - 1) * seg_stride + seg_len # Get the correct amount of data if data_len < num_samples: diff = num_samples - data_len start = diff // 2 end = num_samples - diff // 2 # Want this to be integers so if diff is odd, catch it here. if diff % 2: start = start + 1 timeseries = timeseries[start:end] num_samples = len(timeseries) if data_len > num_samples: err_msg = "I was asked to estimate a PSD on %d " %(data_len) err_msg += "data samples. However the data provided only contains " err_msg += "%d data samples." %(num_samples) if num_samples != (num_segments - 1) * seg_stride + seg_len: raise ValueError('Incorrect choice of segmentation parameters') if not isinstance(window, numpy.ndarray): window = window_map[window](seg_len) w = Array(window.astype(timeseries.dtype)) # calculate psd of each segment delta_f = 1. / timeseries.delta_t / seg_len segment_tilde = FrequencySeries( numpy.zeros(int(seg_len / 2 + 1)), delta_f=delta_f, dtype=fs_dtype, ) segment_psds = [] for i in range(num_segments): segment_start = i * seg_stride segment_end = segment_start + seg_len segment = timeseries[segment_start:segment_end] assert len(segment) == seg_len fft(segment * w, segment_tilde) seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy() #halve the DC and Nyquist components to be consistent with TO10095 seg_psd[0] /= 2 seg_psd[-1] /= 2 segment_psds.append(seg_psd) segment_psds = numpy.array(segment_psds) if avg_method == 'mean': psd = numpy.mean(segment_psds, axis=0) elif avg_method == 'median': psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments) elif avg_method == 'median-mean': odd_psds = segment_psds[::2] even_psds = segment_psds[1::2] odd_median = numpy.median(odd_psds, axis=0) / \ median_bias(len(odd_psds)) even_median = numpy.median(even_psds, axis=0) / \ median_bias(len(even_psds)) psd = (odd_median + even_median) / 2 psd *= 2 * delta_f * seg_len / (w*w).sum() return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype, epoch=timeseries.start_time)
[ "def", "welch", "(", "timeseries", ",", "seg_len", "=", "4096", ",", "seg_stride", "=", "2048", ",", "window", "=", "'hann'", ",", "avg_method", "=", "'median'", ",", "num_segments", "=", "None", ",", "require_exact_data_fit", "=", "False", ")", ":", "wind...
PSD estimator based on Welch's method. Parameters ---------- timeseries : TimeSeries Time series for which the PSD is to be estimated. seg_len : int Segment length in samples. seg_stride : int Separation between consecutive segments, in samples. window : {'hann', numpy.ndarray} Function used to window segments before Fourier transforming, or a `numpy.ndarray` that specifies the window. avg_method : {'median', 'mean', 'median-mean'} Method used for averaging individual segment PSDs. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD. Raises ------ ValueError For invalid choices of `seg_len`, `seg_stride` `window` and `avg_method` and for inconsistent combinations of len(`timeseries`), `seg_len` and `seg_stride`. Notes ----- See arXiv:gr-qc/0509116 for details.
[ "PSD", "estimator", "based", "on", "Welch", "s", "method", "." ]
python
train
wheeler-microfluidics/dmf-control-board-firmware
dmf_control_board_firmware/__init__.py
https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L1504-L1520
def persistent_write(self, address, byte, refresh_config=False): ''' Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings. ''' self._persistent_write(address, byte) if refresh_config: self.load_config(False)
[ "def", "persistent_write", "(", "self", ",", "address", ",", "byte", ",", "refresh_config", "=", "False", ")", ":", "self", ".", "_persistent_write", "(", "address", ",", "byte", ")", "if", "refresh_config", ":", "self", ".", "load_config", "(", "False", "...
Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings.
[ "Write", "a", "single", "byte", "to", "an", "address", "in", "persistent", "memory", "." ]
python
train
JosuaKrause/quick_server
quick_server/quick_server.py
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2787-L2794
def handle_error(self, request, client_address): """Handle an error gracefully. """ if self.can_ignore_error(): return thread = threading.current_thread() msg("Error in request ({0}): {1} in {2}\n{3}", client_address, repr(request), thread.name, traceback.format_exc())
[ "def", "handle_error", "(", "self", ",", "request", ",", "client_address", ")", ":", "if", "self", ".", "can_ignore_error", "(", ")", ":", "return", "thread", "=", "threading", ".", "current_thread", "(", ")", "msg", "(", "\"Error in request ({0}): {1} in {2}\\n...
Handle an error gracefully.
[ "Handle", "an", "error", "gracefully", "." ]
python
train
Scifabric/pbs
helpers.py
https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L103-L138
def _update_project(config, task_presenter, results, long_description, tutorial): """Update a project.""" try: # Get project project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) # Update attributes project.name = config.project['name'] project.short_name = config.project['short_name'] project.description = config.project['description'] # Update long_description with open(long_description, 'r') as f: project.long_description = f.read() # Update task presenter with open(task_presenter, 'r') as f: project.info['task_presenter'] = f.read() _update_task_presenter_bundle_js(project) # Update results with open(results, 'r') as f: project.info['results'] = f.read() # Update tutorial with open(tutorial, 'r') as f: project.info['tutorial'] = f.read() response = config.pbclient.update_project(project) check_api_error(response) return ("Project %s updated!" % config.project['short_name']) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except ProjectNotFound: return ("Project not found! The project: %s is missing." \ " Use the flag --all=1 to search in all the server " \ % config.project['short_name']) except TaskNotFound: raise
[ "def", "_update_project", "(", "config", ",", "task_presenter", ",", "results", ",", "long_description", ",", "tutorial", ")", ":", "try", ":", "# Get project", "project", "=", "find_project_by_short_name", "(", "config", ".", "project", "[", "'short_name'", "]", ...
Update a project.
[ "Update", "a", "project", "." ]
python
train
upsight/doctor
doctor/flask.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/flask.py#L97-L246
def handle_http(handler: Resource, args: Tuple, kwargs: Dict, logic: Callable): """Handle a Flask HTTP request :param handler: flask_restful.Resource: An instance of a Flask Restful resource class. :param tuple args: Any positional arguments passed to the wrapper method. :param dict kwargs: Any keyword arguments passed to the wrapper method. :param callable logic: The callable to invoke to actually perform the business logic for this request. """ try: # We are checking mimetype here instead of content_type because # mimetype is just the content-type, where as content_type can # contain encoding, charset, and language information. e.g. # `Content-Type: application/json; charset=UTF8` if (request.mimetype == 'application/json' and request.method in HTTP_METHODS_WITH_JSON_BODY): # This is a proper typed JSON request. The parameters will be # encoded into the request body as a JSON blob. if not logic._doctor_req_obj_type: request_params = map_param_names( request.json, logic._doctor_signature.parameters) else: request_params = request.json else: # Try to parse things from normal HTTP parameters request_params = parse_form_and_query_params( request.values, logic._doctor_signature.parameters) params = request_params # Only filter out additional params if a req_obj_type was not specified. if not logic._doctor_req_obj_type: # Filter out any params not part of the logic signature. all_params = logic._doctor_params.all params = {k: v for k, v in params.items() if k in all_params} params.update(**kwargs) # Check for required params missing = [] for required in logic._doctor_params.required: if required not in params: missing.append(required) if missing: verb = 'are' if len(missing) == 1: verb = 'is' missing = missing[0] error = '{} {} required.'.format(missing, verb) raise InvalidValueError(error) # Validate and coerce parameters to the appropriate types. errors = {} sig = logic._doctor_signature # If a `req_obj_type` was defined for the route, pass all request # params to that type for validation/coercion if logic._doctor_req_obj_type: annotation = logic._doctor_req_obj_type try: # NOTE: We calculate the value before applying native type in # order to support UnionType types which dynamically modifies # the native_type property based on the initialized value. value = annotation(params) params = annotation.native_type(value) except TypeError: logging.exception( 'Error casting and validating params with value `%s`.', params) raise except TypeSystemError as e: errors['__all__'] = e.detail else: for name, value in params.items(): annotation = sig.parameters[name].annotation if annotation.nullable and value is None: continue try: # NOTE: We calculate the value before applying native type # in order to support UnionType types which dynamically # modifies the native_type property based on the initialized # value. value = annotation(value) params[name] = annotation.native_type(value) except TypeSystemError as e: errors[name] = e.detail if errors: raise TypeSystemError(errors, errors=errors) if logic._doctor_req_obj_type: # Pass any positional arguments followed by the coerced request # parameters to the logic function. response = logic(*args, params) else: # Only pass request parameters defined by the logic signature. logic_params = {k: v for k, v in params.items() if k in logic._doctor_params.logic} response = logic(*args, **logic_params) # response validation if sig.return_annotation != sig.empty: return_annotation = sig.return_annotation _response = response if isinstance(response, Response): _response = response.content # Check if our return annotation is a Response that supplied a # type to validate against. If so, use that type for validation # e.g. def logic() -> Response[MyType] if (issubclass(return_annotation, Response) and return_annotation.__args__ is not None): return_annotation = return_annotation.__args__[0] try: return_annotation(_response) except TypeSystemError as e: response_str = str(_response) logging.warning('Response to %s %s does not validate: %s.', request.method, request.path, response_str, exc_info=e) if should_raise_response_validation_errors(): error = ('Response to {method} {path} `{response}` does not' ' validate: {error}'.format( method=request.method, path=request.path, response=response, error=e.detail)) raise TypeSystemError(error) if isinstance(response, Response): status_code = response.status_code if status_code is None: status_code = STATUS_CODE_MAP.get(request.method, 200) return (response.content, status_code, response.headers) return response, STATUS_CODE_MAP.get(request.method, 200) except (InvalidValueError, TypeSystemError) as e: errors = getattr(e, 'errors', None) raise HTTP400Exception(e, errors=errors) except UnauthorizedError as e: raise HTTP401Exception(e) except ForbiddenError as e: raise HTTP403Exception(e) except NotFoundError as e: raise HTTP404Exception(e) except ImmutableError as e: raise HTTP409Exception(e) except Exception as e: # Always re-raise exceptions when DEBUG is enabled for development. if current_app.config.get('DEBUG', False): raise allowed_exceptions = logic._doctor_allowed_exceptions if allowed_exceptions and any(isinstance(e, cls) for cls in allowed_exceptions): raise logging.exception(e) raise HTTP500Exception('Uncaught error in logic function')
[ "def", "handle_http", "(", "handler", ":", "Resource", ",", "args", ":", "Tuple", ",", "kwargs", ":", "Dict", ",", "logic", ":", "Callable", ")", ":", "try", ":", "# We are checking mimetype here instead of content_type because", "# mimetype is just the content-type, wh...
Handle a Flask HTTP request :param handler: flask_restful.Resource: An instance of a Flask Restful resource class. :param tuple args: Any positional arguments passed to the wrapper method. :param dict kwargs: Any keyword arguments passed to the wrapper method. :param callable logic: The callable to invoke to actually perform the business logic for this request.
[ "Handle", "a", "Flask", "HTTP", "request" ]
python
train
gccxml/pygccxml
pygccxml/declarations/scopedef.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/scopedef.py#L582-L603
def variable( self, name=None, function=None, decl_type=None, header_dir=None, header_file=None, recursive=None): """returns reference to variable declaration, that is matched defined criteria""" return ( self._find_single( self._impl_matchers[ scopedef_t.variable], name=name, function=function, decl_type=decl_type, header_dir=header_dir, header_file=header_file, recursive=recursive) )
[ "def", "variable", "(", "self", ",", "name", "=", "None", ",", "function", "=", "None", ",", "decl_type", "=", "None", ",", "header_dir", "=", "None", ",", "header_file", "=", "None", ",", "recursive", "=", "None", ")", ":", "return", "(", "self", "....
returns reference to variable declaration, that is matched defined criteria
[ "returns", "reference", "to", "variable", "declaration", "that", "is", "matched", "defined", "criteria" ]
python
train
StanfordVL/robosuite
robosuite/environments/sawyer.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/sawyer.py#L106-L116
def _reset_internal(self): """ Sets initial pose of arm and grippers. """ super()._reset_internal() self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos if self.has_gripper: self.sim.data.qpos[ self._ref_joint_gripper_actuator_indexes ] = self.gripper.init_qpos
[ "def", "_reset_internal", "(", "self", ")", ":", "super", "(", ")", ".", "_reset_internal", "(", ")", "self", ".", "sim", ".", "data", ".", "qpos", "[", "self", ".", "_ref_joint_pos_indexes", "]", "=", "self", ".", "mujoco_robot", ".", "init_qpos", "if",...
Sets initial pose of arm and grippers.
[ "Sets", "initial", "pose", "of", "arm", "and", "grippers", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L1263-L1276
def __write_funding(self): """ Write funding section. There are likely multiple entries. :param dict d: :return none: """ self.__reorganize_funding() # if funding is empty, insert a blank entry so that it'll still write the empty section on the template. if not self.noaa_data_sorted["Funding_Agency"]: self.noaa_data_sorted["Funding_Agency"].append({"grant": "", "agency": ""}) for idx, entry in enumerate(self.noaa_data_sorted["Funding_Agency"]): logger_lpd_noaa.info("funding: {}".format(idx)) self.__write_generic('Funding_Agency', entry) return
[ "def", "__write_funding", "(", "self", ")", ":", "self", ".", "__reorganize_funding", "(", ")", "# if funding is empty, insert a blank entry so that it'll still write the empty section on the template.", "if", "not", "self", ".", "noaa_data_sorted", "[", "\"Funding_Agency\"", "...
Write funding section. There are likely multiple entries. :param dict d: :return none:
[ "Write", "funding", "section", ".", "There", "are", "likely", "multiple", "entries", ".", ":", "param", "dict", "d", ":", ":", "return", "none", ":" ]
python
train
spacetelescope/pysynphot
pysynphot/spectrum.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/spectrum.py#L1818-L1856
def pivot(self, binned=False): """Calculate :ref:`pysynphot-formula-pivwv`. Parameters ---------- binned : bool This is reserved for use by `~pysynphot.observation.Observation`. If `True`, binned wavelength set is used. Default is `False`. Returns ------- ans : float Pivot wavelength. Raises ------ AttributeError Binned wavelength set requested but not found. """ if binned: try: wave = self.binwave except AttributeError: raise AttributeError('Class ' + str(type(self)) + ' does not support binning.') else: wave = self.wave countmulwave = self(wave)*wave countdivwave = self(wave)/wave num = self.trapezoidIntegration(wave, countmulwave) den = self.trapezoidIntegration(wave, countdivwave) if num == 0.0 or den == 0.0: return 0.0 return math.sqrt(num/den)
[ "def", "pivot", "(", "self", ",", "binned", "=", "False", ")", ":", "if", "binned", ":", "try", ":", "wave", "=", "self", ".", "binwave", "except", "AttributeError", ":", "raise", "AttributeError", "(", "'Class '", "+", "str", "(", "type", "(", "self",...
Calculate :ref:`pysynphot-formula-pivwv`. Parameters ---------- binned : bool This is reserved for use by `~pysynphot.observation.Observation`. If `True`, binned wavelength set is used. Default is `False`. Returns ------- ans : float Pivot wavelength. Raises ------ AttributeError Binned wavelength set requested but not found.
[ "Calculate", ":", "ref", ":", "pysynphot", "-", "formula", "-", "pivwv", "." ]
python
train
bastibe/PySoundCard
pysoundcard.py
https://github.com/bastibe/PySoundCard/blob/fb16460b75a1bb416089ebecdf700fa954faa5b7/pysoundcard.py#L416-L425
def start(self): """Commence audio processing. If successful, the stream is considered active. """ err = _pa.Pa_StartStream(self._stream) if err == _pa.paStreamIsNotStopped: return self._handle_error(err)
[ "def", "start", "(", "self", ")", ":", "err", "=", "_pa", ".", "Pa_StartStream", "(", "self", ".", "_stream", ")", "if", "err", "==", "_pa", ".", "paStreamIsNotStopped", ":", "return", "self", ".", "_handle_error", "(", "err", ")" ]
Commence audio processing. If successful, the stream is considered active.
[ "Commence", "audio", "processing", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L68-L74
def addOntology(self, ontology): """ Add an ontology map to this data repository. """ self._ontologyNameMap[ontology.getName()] = ontology self._ontologyIdMap[ontology.getId()] = ontology self._ontologyIds.append(ontology.getId())
[ "def", "addOntology", "(", "self", ",", "ontology", ")", ":", "self", ".", "_ontologyNameMap", "[", "ontology", ".", "getName", "(", ")", "]", "=", "ontology", "self", ".", "_ontologyIdMap", "[", "ontology", ".", "getId", "(", ")", "]", "=", "ontology", ...
Add an ontology map to this data repository.
[ "Add", "an", "ontology", "map", "to", "this", "data", "repository", "." ]
python
train
peterwittek/somoclu
src/Python/somoclu/train.py
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L136-L154
def load_bmus(self, filename): """Load the best matching units from a file to the Somoclu object. :param filename: The name of the file. :type filename: str. """ self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2)) if self.n_vectors != 0 and len(self.bmus) != self.n_vectors: raise Exception("The number of best matching units does not match " "the number of data instances") else: self.n_vectors = len(self.bmus) tmp = self.bmus[:, 0].copy() self.bmus[:, 0] = self.bmus[:, 1].copy() self.bmus[:, 1] = tmp if max(self.bmus[:, 0]) > self._n_columns - 1 or \ max(self.bmus[:, 1]) > self._n_rows - 1: raise Exception("The dimensions of the best matching units do not " "match that of the map")
[ "def", "load_bmus", "(", "self", ",", "filename", ")", ":", "self", ".", "bmus", "=", "np", ".", "loadtxt", "(", "filename", ",", "comments", "=", "'%'", ",", "usecols", "=", "(", "1", ",", "2", ")", ")", "if", "self", ".", "n_vectors", "!=", "0"...
Load the best matching units from a file to the Somoclu object. :param filename: The name of the file. :type filename: str.
[ "Load", "the", "best", "matching", "units", "from", "a", "file", "to", "the", "Somoclu", "object", "." ]
python
train
gwpy/gwpy
gwpy/utils/sphinx/zenodo.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/sphinx/zenodo.py#L46-L90
def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'): """Query and format a citations page from Zenodo entries Parameters ---------- zid : `int`, `str` the Zenodo ID of the target record url : `str`, optional the base URL of the Zenodo host, defaults to ``https://zenodo.org`` hist : `int`, optional the maximum number of hits to show, default: ``10`` tag_prefix : `str`, optional the prefix for git tags. This is removed to generate the section headers in the output RST Returns ------- rst : `str` an RST-formatted string of DOI badges with URLs """ # query for metadata url = ('{url}/api/records/?' 'page=1&' 'size={hits}&' 'q=conceptrecid:"{id}"&' 'sort=-version&' 'all_versions=True'.format(id=zid, url=url, hits=hits)) metadata = requests.get(url).json() lines = [] for i, hit in enumerate(metadata['hits']['hits']): version = hit['metadata']['version'][len(tag_prefix):] lines.append('-' * len(version)) lines.append(version) lines.append('-' * len(version)) lines.append('') lines.append('.. image:: {badge}\n' ' :target: {doi}'.format(**hit['links'])) if i < hits - 1: lines.append('') return '\n'.join(lines)
[ "def", "format_citations", "(", "zid", ",", "url", "=", "'https://zenodo.org/'", ",", "hits", "=", "10", ",", "tag_prefix", "=", "'v'", ")", ":", "# query for metadata", "url", "=", "(", "'{url}/api/records/?'", "'page=1&'", "'size={hits}&'", "'q=conceptrecid:\"{id}...
Query and format a citations page from Zenodo entries Parameters ---------- zid : `int`, `str` the Zenodo ID of the target record url : `str`, optional the base URL of the Zenodo host, defaults to ``https://zenodo.org`` hist : `int`, optional the maximum number of hits to show, default: ``10`` tag_prefix : `str`, optional the prefix for git tags. This is removed to generate the section headers in the output RST Returns ------- rst : `str` an RST-formatted string of DOI badges with URLs
[ "Query", "and", "format", "a", "citations", "page", "from", "Zenodo", "entries" ]
python
train
Microsoft/botbuilder-python
libraries/botbuilder-core/botbuilder/core/bot_adapter.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-core/botbuilder/core/bot_adapter.py#L51-L59
async def run_middleware(self, context: TurnContext, callback: Callable=None): """ Called by the parent class to run the adapters middleware set and calls the passed in `callback()` handler at the end of the chain. :param context: :param callback: :return: """ return await self._middleware.receive_activity_with_status(context, callback)
[ "async", "def", "run_middleware", "(", "self", ",", "context", ":", "TurnContext", ",", "callback", ":", "Callable", "=", "None", ")", ":", "return", "await", "self", ".", "_middleware", ".", "receive_activity_with_status", "(", "context", ",", "callback", ")"...
Called by the parent class to run the adapters middleware set and calls the passed in `callback()` handler at the end of the chain. :param context: :param callback: :return:
[ "Called", "by", "the", "parent", "class", "to", "run", "the", "adapters", "middleware", "set", "and", "calls", "the", "passed", "in", "callback", "()", "handler", "at", "the", "end", "of", "the", "chain", ".", ":", "param", "context", ":", ":", "param", ...
python
test
PyCQA/pylint
pylint/checkers/utils.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/utils.py#L398-L411
def is_ancestor_name( frame: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG ) -> bool: """return True if `frame` is an astroid.Class node with `node` in the subtree of its bases attribute """ try: bases = frame.bases except AttributeError: return False for base in bases: if node in base.nodes_of_class(astroid.Name): return True return False
[ "def", "is_ancestor_name", "(", "frame", ":", "astroid", ".", "node_classes", ".", "NodeNG", ",", "node", ":", "astroid", ".", "node_classes", ".", "NodeNG", ")", "->", "bool", ":", "try", ":", "bases", "=", "frame", ".", "bases", "except", "AttributeError...
return True if `frame` is an astroid.Class node with `node` in the subtree of its bases attribute
[ "return", "True", "if", "frame", "is", "an", "astroid", ".", "Class", "node", "with", "node", "in", "the", "subtree", "of", "its", "bases", "attribute" ]
python
test
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L4467-L4483
def swo_stop(self): """Stops collecting SWO data. Args: self (JLink): the ``JLink`` instance Returns: ``None`` Raises: JLinkException: on error """ res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.STOP, 0) if res < 0: raise errors.JLinkException(res) return None
[ "def", "swo_stop", "(", "self", ")", ":", "res", "=", "self", ".", "_dll", ".", "JLINKARM_SWO_Control", "(", "enums", ".", "JLinkSWOCommands", ".", "STOP", ",", "0", ")", "if", "res", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "res",...
Stops collecting SWO data. Args: self (JLink): the ``JLink`` instance Returns: ``None`` Raises: JLinkException: on error
[ "Stops", "collecting", "SWO", "data", "." ]
python
train