repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
DataBiosphere/toil
src/toil/provisioners/gceProvisioner.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/gceProvisioner.py#L338-L348
def _injectWorkerFiles(self, node, botoExists): """ Set up the credentials on the worker. """ node.waitForNode('toil_worker', keyName=self._keyName) node.copySshKeys(self._keyName) node.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_worker') if self._sseKey: node.injectFile(self._sseKey, self._sseKey, 'toil_worker') if botoExists: node.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_worker')
[ "def", "_injectWorkerFiles", "(", "self", ",", "node", ",", "botoExists", ")", ":", "node", ".", "waitForNode", "(", "'toil_worker'", ",", "keyName", "=", "self", ".", "_keyName", ")", "node", ".", "copySshKeys", "(", "self", ".", "_keyName", ")", "node", ...
Set up the credentials on the worker.
[ "Set", "up", "the", "credentials", "on", "the", "worker", "." ]
python
train
iotile/coretools
iotilegateway/iotilegateway/gateway.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/gateway.py#L59-L67
async def stop(self): """Stop the gateway manager and synchronously wait for it to stop.""" self._logger.info("Stopping all servers") for server in self.servers: await server.stop() self._logger.info("Stopping all device adapters") await self.device_manager.stop()
[ "async", "def", "stop", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Stopping all servers\"", ")", "for", "server", "in", "self", ".", "servers", ":", "await", "server", ".", "stop", "(", ")", "self", ".", "_logger", ".", "info",...
Stop the gateway manager and synchronously wait for it to stop.
[ "Stop", "the", "gateway", "manager", "and", "synchronously", "wait", "for", "it", "to", "stop", "." ]
python
train
cuihantao/andes
andes/variables/varout.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/varout.py#L199-L216
def dump(self): """ Dump the TDS results to the output `dat` file :return: succeed flag """ logger.warn('This function is deprecated and replaced by `dump_np_vars`.') ret = False if self.system.files.no_output: # return ``True`` because it did not fail return True if self.write_lst() and self.write_dat(): ret = True return ret
[ "def", "dump", "(", "self", ")", ":", "logger", ".", "warn", "(", "'This function is deprecated and replaced by `dump_np_vars`.'", ")", "ret", "=", "False", "if", "self", ".", "system", ".", "files", ".", "no_output", ":", "# return ``True`` because it did not fail", ...
Dump the TDS results to the output `dat` file :return: succeed flag
[ "Dump", "the", "TDS", "results", "to", "the", "output", "dat", "file" ]
python
train
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1777-L1794
def ms_cutlo(self, viewer, event, data_x, data_y): """An interactive way to set the low cut level. """ if not self.cancut: return True x, y = self.get_win_xy(viewer) if event.state == 'move': self._cutlow_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y self._loval, self._hival = viewer.get_cut_levels() else: viewer.onscreen_message(None) return True
[ "def", "ms_cutlo", "(", "self", ",", "viewer", ",", "event", ",", "data_x", ",", "data_y", ")", ":", "if", "not", "self", ".", "cancut", ":", "return", "True", "x", ",", "y", "=", "self", ".", "get_win_xy", "(", "viewer", ")", "if", "event", ".", ...
An interactive way to set the low cut level.
[ "An", "interactive", "way", "to", "set", "the", "low", "cut", "level", "." ]
python
train
fprimex/zdesk
zdesk/zdesk.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk.py#L303-L598
def call(self, path, query=None, method='GET', data=None, files=None, get_all_pages=False, complete_response=False, retry_on=None, max_retries=0, raw_query=None, retval=None, **kwargs): """Make a REST call to the Zendesk web service. Parameters: path - Path portion of the Zendesk REST endpoint URL. query - Query parameters in dict form. method - HTTP method to use in making the request. data - POST data or multi-part form data to include. files - Requests style dict of files for multi-part file uploads. get_all_pages - Make multiple requests and follow next_page. complete_response - Return raw request results. retry_on - Specify any exceptions from ACCEPT_RETRIES or non-2xx HTTP codes on which you want to retry request. Note that calling Zendesk.call with get_all_pages=True can make up to (max_retries + 1) * pages. Defaults to empty set, but can be any iterable, exception or int, which will become set with same values you provided. max_retries - How many additional connections to make when first one fails. No effect when retry_on evaluates to False. Defaults to 0. raw_query - Raw query string, starting with '?', that will be appended to the URL path and will completely override / discard any other query parameters. Enables use cases where query parameters need to be repeated in the query string. retval - Request a specific part of the returned response. Valid values are 'content', 'code', 'location', and 'headers'. JSON content is still automatically deserialized if possible. If retval is not specified, then the old behavior of trying to determine an appropriate value to return is used. """ # Rather obscure way to support retry_on per single API call if retry_on and max_retries: try: _retry_on = self._retry_on _max_retries = self._max_retries self.retry_on = retry_on self.max_retries = max_retries return self.call(path=path, query=query, method=method, data=data, files=files, get_all_pages=get_all_pages, complete_response=complete_response) finally: self._retry_on = _retry_on self._max_retries = _max_retries # Support specifying a mime-type other than application/json mime_type = kwargs.pop('mime_type', 'application/json') for key in kwargs.keys(): value = kwargs[key] if hasattr(value, '__iter__') and not isinstance(value, str): kwargs[key] = ','.join(map(str, value)) if query: if kwargs: kwargs.update(query) else: kwargs = query if raw_query: path = path + raw_query kwargs = None url = self.zdesk_url + path if files: # Sending multipart file. data contains parameters. json = None self.headers.pop('Content-Type', None) elif (mime_type == 'application/json' and (method == 'POST' or method == 'PUT')): # Sending JSON data. json = data data = {} self.headers.pop('Content-Type', None) elif (mime_type != 'application/json' and (method == 'POST' or method == 'PUT')): # Uploading an attachment, probably. # Specifying the MIME type is required. json = None self.headers['Content-Type'] = mime_type else: # Probably a GET or DELETE. Not sending JSON or files. json = None self.headers.pop('Content-Type', None) results = [] all_requests_complete = False request_count = 0 while not all_requests_complete: # Make an http request # counts request attempts in order to fetch this specific one request_count += 1 try: response = self.client.request(method, url, params=kwargs, json=json, data=data, headers=self.headers, files=files, **self.client_args) except requests.RequestException: if request_count <= self.max_retries: # we have to bind response to None in case # self.client.request raises an exception and # response holds old requests.Response # (and possibly its Retry-After header) response = None self._handle_retry(response) continue else: raise # If the response status is not in the 200 range then assume an # error and raise proper exception code = response.status_code try: if not 200 <= code < 300 and code != 422: if code == 401: raise AuthenticationError( response.content, code, response) elif code == 429: raise RateLimitError( response.content, code, response) else: raise ZendeskError( response.content, code, response) except ZendeskError: if request_count <= self.max_retries: self._handle_retry(response) continue else: raise # Deserialize json content if content exists. # In some cases Zendesk returns ' ' strings. # Also return false non strings (0, [], (), {}) if response.content.strip() and 'json' in response.headers['content-type']: content = response.json() # set url to the next page if that was returned in the response url = content.get('next_page', None) # url we get above already has the start_time appended to it, # specific to incremental exports kwargs = {} elif response.content.strip() and 'text' in response.headers['content-type']: try: content = response.json() # set url to the next page if that was returned in the response url = content.get('next_page', None) # url we get above already has the start_time appended to it, # specific to incremental exports kwargs = {} except ValueError: content = response.content else: content = response.content url = None if complete_response: results.append({ 'response': response, 'content': content, 'status': response.status_code }) else: if retval == 'content': results.append(content) elif retval == 'code': results.append(response.status_code) elif retval == 'location': results.append(response.headers.get('location')) elif retval == 'headers': results.append(response.headers) else: # Attempt to automatically determine the value of # most interest to return. if response.headers.get('location'): # Zendesk's response is sometimes the url of a newly # created user/ticket/group/etc and they pass this through # 'location'. Otherwise, the body of 'content' # has our response. results.append(response.headers.get('location')) elif content: results.append(content) else: results.append(responses[response.status_code]) # if there is a next_page, and we are getting pages, then continue # making requests # deal with how incremental export results are returned # there could be two cases # response code == 422 returned when end_time < five minutes recent # or count < 1000 # this is an ugly check, and we have to check this just for incremental export end-points # non-incremental load end-points have a 100 item/page limit and return next_page = null for last page # also note that incremental/ticket_metric_events end-point has a 10,000 items per page limit url = None if (url is not None and 'incremental' in url and content.get('count') < 1000) else url all_requests_complete = not (get_all_pages and url) request_count = 0 if get_all_pages and complete_response: # Return the list of results from all calls made. # This way even if only one page was present the caller will # always receive back an iterable value, since multiple pages # were requested/expected. This also provides the information for # every call, and saves us from having to try to combine all of # that ourselves in a sensible way. return results if len(results) == 1: # regardless as to whether all pages were requested, there was # only one call and set of results, so just send it back. return results[0] # Now we need to try to combine or reduce the results: hashable = True try: if len(set(results)) == 1: # all responses were the same, so return just the first one. # may have a list of locations or response statuses return results[0] except TypeError: # probably we have a list of content dictionaries. hashable = False if hashable: # we have a list of simple objects like strings, but they are not # all the same so send them all back. return results # may have a sequence of response contents # (dicts, possibly lists in the future as that is valid json also) combined_dict_results = {} combined_list_results = [] for result in results: if isinstance(result, list): # the result of this call returned a list. # extend the combined list with these results. combined_list_results.extend(result) elif isinstance(result, dict): # the result of this call returned a dict. the dict probably # has both simple attributes (strings) and complex attributes # (lists). if the attribute is a list, we will extend the # combined attribute, otherwise we will just take the last # attribute value from the last call. # the end result is a response that looks like one giant call, # to e.g. list tickets, but was actually made by multiple API # calls. for k in result.keys(): v = result[k] if isinstance(v, list): try: combined_dict_results[k].extend(v) except KeyError: combined_dict_results[k] = v else: combined_dict_results[k] = v else: # returned result is not a dict or a list. don't know how to # deal with this, so just send everything back. return results if combined_list_results and combined_dict_results: # there was a mix of list and dict results from the sequence # of calls. this case seems very odd to me if it ever happens. # at any rate, send everything back uncombined return results if combined_dict_results: return combined_dict_results if combined_list_results: return combined_list_results # I don't expect to make it here, but I suppose it could happen if, # perhaps, a sequence of empty dicts were returned or some such. # Send everything back. return results
[ "def", "call", "(", "self", ",", "path", ",", "query", "=", "None", ",", "method", "=", "'GET'", ",", "data", "=", "None", ",", "files", "=", "None", ",", "get_all_pages", "=", "False", ",", "complete_response", "=", "False", ",", "retry_on", "=", "N...
Make a REST call to the Zendesk web service. Parameters: path - Path portion of the Zendesk REST endpoint URL. query - Query parameters in dict form. method - HTTP method to use in making the request. data - POST data or multi-part form data to include. files - Requests style dict of files for multi-part file uploads. get_all_pages - Make multiple requests and follow next_page. complete_response - Return raw request results. retry_on - Specify any exceptions from ACCEPT_RETRIES or non-2xx HTTP codes on which you want to retry request. Note that calling Zendesk.call with get_all_pages=True can make up to (max_retries + 1) * pages. Defaults to empty set, but can be any iterable, exception or int, which will become set with same values you provided. max_retries - How many additional connections to make when first one fails. No effect when retry_on evaluates to False. Defaults to 0. raw_query - Raw query string, starting with '?', that will be appended to the URL path and will completely override / discard any other query parameters. Enables use cases where query parameters need to be repeated in the query string. retval - Request a specific part of the returned response. Valid values are 'content', 'code', 'location', and 'headers'. JSON content is still automatically deserialized if possible. If retval is not specified, then the old behavior of trying to determine an appropriate value to return is used.
[ "Make", "a", "REST", "call", "to", "the", "Zendesk", "web", "service", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10003-L10027
def global_position_int_cov_send(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1=False): ''' The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. NOTE: This message is intended for onboard networks / companion computers and higher-bandwidth links and optimized for accuracy and completeness. Please use the GLOBAL_POSITION_INT message for a minimal subset. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t) estimator_type : Class id of the estimator this estimate originated from. (uint8_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t) relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s (float) vy : Ground Y Speed (Longitude), expressed as m/s (float) vz : Ground Z Speed (Altitude), expressed as m/s (float) covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float) ''' return self.send(self.global_position_int_cov_encode(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance), force_mavlink1=force_mavlink1)
[ "def", "global_position_int_cov_send", "(", "self", ",", "time_boot_ms", ",", "time_utc", ",", "estimator_type", ",", "lat", ",", "lon", ",", "alt", ",", "relative_alt", ",", "vx", ",", "vy", ",", "vz", ",", "covariance", ",", "force_mavlink1", "=", "False",...
The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. NOTE: This message is intended for onboard networks / companion computers and higher-bandwidth links and optimized for accuracy and completeness. Please use the GLOBAL_POSITION_INT message for a minimal subset. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t) estimator_type : Class id of the estimator this estimate originated from. (uint8_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t) relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s (float) vy : Ground Y Speed (Longitude), expressed as m/s (float) vz : Ground Z Speed (Altitude), expressed as m/s (float) covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
[ "The", "filtered", "global", "position", "(", "e", ".", "g", ".", "fused", "GPS", "and", "accelerometers", ")", ".", "The", "position", "is", "in", "GPS", "-", "frame", "(", "right", "-", "handed", "Z", "-", "up", ")", ".", "It", "is", "designed", ...
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_kmlread.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_kmlread.py#L48-L74
def cmd_param(self, args): '''control kml reading''' usage = "Usage: kml <clear | load (filename) | layers | toggle (layername) | fence (layername)>" if len(args) < 1: print(usage) return elif args[0] == "clear": self.clearkml() elif args[0] == "snapwp": self.cmd_snap_wp(args[1:]) elif args[0] == "snapfence": self.cmd_snap_fence(args[1:]) elif args[0] == "load": if len(args) != 2: print("usage: kml load <filename>") return self.loadkml(args[1]) elif args[0] == "layers": for layer in self.curlayers: print("Found layer: " + layer) elif args[0] == "toggle": self.togglekml(args[1]) elif args[0] == "fence": self.fencekml(args[1]) else: print(usage) return
[ "def", "cmd_param", "(", "self", ",", "args", ")", ":", "usage", "=", "\"Usage: kml <clear | load (filename) | layers | toggle (layername) | fence (layername)>\"", "if", "len", "(", "args", ")", "<", "1", ":", "print", "(", "usage", ")", "return", "elif", "args", ...
control kml reading
[ "control", "kml", "reading" ]
python
train
raphaelm/django-hierarkey
hierarkey/models.py
https://github.com/raphaelm/django-hierarkey/blob/3ca822f94fa633c9a6d5abe9c80cb1551299ae46/hierarkey/models.py#L49-L57
def add_default(self, key: str, value: Optional[str], default_type: type = str) -> None: """ Adds a default value and a default type for a key. :param key: Key :param value: *Serialized* default value, i.e. a string or ``None``. :param default_type: The type to unserialize values for this key to, defaults to ``str``. """ self.defaults[key] = HierarkeyDefault(value, default_type)
[ "def", "add_default", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Optional", "[", "str", "]", ",", "default_type", ":", "type", "=", "str", ")", "->", "None", ":", "self", ".", "defaults", "[", "key", "]", "=", "HierarkeyDefault", "(", ...
Adds a default value and a default type for a key. :param key: Key :param value: *Serialized* default value, i.e. a string or ``None``. :param default_type: The type to unserialize values for this key to, defaults to ``str``.
[ "Adds", "a", "default", "value", "and", "a", "default", "type", "for", "a", "key", "." ]
python
train
eerimoq/bincopy
bincopy.py
https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L922-L929
def add_binary_file(self, filename, address=0, overwrite=False): """Open given binary file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ with open(filename, 'rb') as fin: self.add_binary(fin.read(), address, overwrite)
[ "def", "add_binary_file", "(", "self", ",", "filename", ",", "address", "=", "0", ",", "overwrite", "=", "False", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fin", ":", "self", ".", "add_binary", "(", "fin", ".", "read", "(", ...
Open given binary file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten.
[ "Open", "given", "binary", "file", "and", "add", "its", "contents", ".", "Set", "overwrite", "to", "True", "to", "allow", "already", "added", "data", "to", "be", "overwritten", "." ]
python
train
ttinoco/OPTALG
optalg/lin_solver/__init__.py
https://github.com/ttinoco/OPTALG/blob/d4f141292f281eea4faa71473258139e7f433001/optalg/lin_solver/__init__.py#L14-L40
def new_linsolver(name,prop): """ Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` """ if name == 'mumps': return LinSolverMUMPS(prop) elif name == 'superlu': return LinSolverSUPERLU(prop) elif name == 'umfpack': return LinSolverUMFPACK(prop) elif name == 'default': try: return new_linsolver('mumps',prop) except ImportError: return new_linsolver('superlu',prop) else: raise ValueError('invalid linear solver name')
[ "def", "new_linsolver", "(", "name", ",", "prop", ")", ":", "if", "name", "==", "'mumps'", ":", "return", "LinSolverMUMPS", "(", "prop", ")", "elif", "name", "==", "'superlu'", ":", "return", "LinSolverSUPERLU", "(", "prop", ")", "elif", "name", "==", "'...
Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>`
[ "Creates", "a", "linear", "solver", "." ]
python
train
squaresLab/BugZoo
bugzoo/mgr/container.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/container.py#L340-L347
def interact(self, container: Container) -> None: """ Connects to the PTY (pseudo-TTY) for a given container. Blocks until the user exits the PTY. """ cmd = "/bin/bash -c 'source /.environment && /bin/bash'" cmd = "docker exec -it {} {}".format(container.id, cmd) subprocess.call(cmd, shell=True)
[ "def", "interact", "(", "self", ",", "container", ":", "Container", ")", "->", "None", ":", "cmd", "=", "\"/bin/bash -c 'source /.environment && /bin/bash'\"", "cmd", "=", "\"docker exec -it {} {}\"", ".", "format", "(", "container", ".", "id", ",", "cmd", ")", ...
Connects to the PTY (pseudo-TTY) for a given container. Blocks until the user exits the PTY.
[ "Connects", "to", "the", "PTY", "(", "pseudo", "-", "TTY", ")", "for", "a", "given", "container", ".", "Blocks", "until", "the", "user", "exits", "the", "PTY", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xlogrecordwidget/xlogrecordwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlogrecordwidget/xlogrecordwidget.py#L218-L227
def hasLogger(self, logger): """ Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger> """ if isinstance(logger, logging.Logger): logger = logging.name return logger in self._loggers
[ "def", "hasLogger", "(", "self", ",", "logger", ")", ":", "if", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", ":", "logger", "=", "logging", ".", "name", "return", "logger", "in", "self", ".", "_loggers" ]
Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger>
[ "Returns", "whether", "or", "not", "the", "inputed", "logger", "is", "tracked", "by", "this", "widget", ".", ":", "param", "logger", "|", "<str", ">", "||", "<logging", ".", "Logger", ">" ]
python
train
SmokinCaterpillar/pypet
pypet/utils/helpful_classes.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/helpful_classes.py#L35-L57
def next(self): """Returns next element from chain. More precisely, it returns the next element of the foremost iterator. If this iterator is empty it moves iteratively along the chain of available iterators to pick the new foremost one. Raises StopIteration if there are no elements left. """ while True: # We need this loop because some iterators may already be empty. # We keep on popping from the left until next succeeds and as long # as there are iterators available try: return next(self._current) except StopIteration: try: self._current = iter(self._chain.popleft()) except IndexError: # If we run out of iterators we are sure that # there can be no more element raise StopIteration('Reached end of iterator chain')
[ "def", "next", "(", "self", ")", ":", "while", "True", ":", "# We need this loop because some iterators may already be empty.", "# We keep on popping from the left until next succeeds and as long", "# as there are iterators available", "try", ":", "return", "next", "(", "self", "...
Returns next element from chain. More precisely, it returns the next element of the foremost iterator. If this iterator is empty it moves iteratively along the chain of available iterators to pick the new foremost one. Raises StopIteration if there are no elements left.
[ "Returns", "next", "element", "from", "chain", "." ]
python
test
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L50-L72
def add_ngrams(self, corpus, minimum, maximum, catalogue=None): """Adds n-gram data from `corpus` to the data store. :param corpus: corpus of works :type corpus: `Corpus` :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int` :param catalogue: optional catalogue to limit corpus to :type catalogue: `Catalogue` """ self._initialise_database() if catalogue: for work in catalogue: for witness in corpus.get_witnesses(work): self._add_text_ngrams(witness, minimum, maximum) else: for witness in corpus.get_witnesses(): self._add_text_ngrams(witness, minimum, maximum) self._add_indices() self._analyse()
[ "def", "add_ngrams", "(", "self", ",", "corpus", ",", "minimum", ",", "maximum", ",", "catalogue", "=", "None", ")", ":", "self", ".", "_initialise_database", "(", ")", "if", "catalogue", ":", "for", "work", "in", "catalogue", ":", "for", "witness", "in"...
Adds n-gram data from `corpus` to the data store. :param corpus: corpus of works :type corpus: `Corpus` :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int` :param catalogue: optional catalogue to limit corpus to :type catalogue: `Catalogue`
[ "Adds", "n", "-", "gram", "data", "from", "corpus", "to", "the", "data", "store", "." ]
python
train
bitcraft/PyTMX
pytmx/pytmx.py
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L530-L546
def get_tile_image_by_gid(self, gid): """ Return the tile image for this location :param gid: GID of image :rtype: surface if found, otherwise ValueError """ try: assert (int(gid) >= 0) return self.images[gid] except TypeError: msg = "GIDs must be expressed as a number. Got: {0}" logger.debug(msg.format(gid)) raise TypeError except (AssertionError, IndexError): msg = "Coords: ({0},{1}) in layer {2} has invalid GID: {3}" logger.debug(msg.format(gid)) raise ValueError
[ "def", "get_tile_image_by_gid", "(", "self", ",", "gid", ")", ":", "try", ":", "assert", "(", "int", "(", "gid", ")", ">=", "0", ")", "return", "self", ".", "images", "[", "gid", "]", "except", "TypeError", ":", "msg", "=", "\"GIDs must be expressed as a...
Return the tile image for this location :param gid: GID of image :rtype: surface if found, otherwise ValueError
[ "Return", "the", "tile", "image", "for", "this", "location" ]
python
train
google/grr
grr/server/grr_response_server/signed_binary_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/signed_binary_utils.py#L209-L246
def FetchBlobsForSignedBinary( binary_urn, token = None ): """Retrieves blobs for the given binary from the datastore. Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Returns: A tuple containing an iterator for all the binary's blobs and an RDFDatetime representing when the binary's contents were saved to the datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists. """ if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) timestamp = aff4_stream.Get(aff4_stream.Schema.TYPE).age return (blob for blob in aff4_stream), timestamp else: try: references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) blob_ids = [r.blob_id for r in references.items] raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids) blobs = ( rdf_crypto.SignedBlob.FromSerializedString(raw_blob) for raw_blob in raw_blobs) return blobs, timestamp
[ "def", "FetchBlobsForSignedBinary", "(", "binary_urn", ",", "token", "=", "None", ")", ":", "if", "_ShouldUseLegacyDatastore", "(", ")", ":", "try", ":", "aff4_stream", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "binary_urn", ",", "aff4_type", "=", "coll...
Retrieves blobs for the given binary from the datastore. Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Returns: A tuple containing an iterator for all the binary's blobs and an RDFDatetime representing when the binary's contents were saved to the datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists.
[ "Retrieves", "blobs", "for", "the", "given", "binary", "from", "the", "datastore", "." ]
python
train
edx/ease
ease/model_creator.py
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/model_creator.py#L131-L158
def extract_features_and_generate_model_predictors(predictor_set, algorithm=util_functions.AlgorithmTypes.regression): """ Extracts features and generates predictors based on a given predictor set predictor_set - a PredictorSet object that has been initialized with data type - one of util_functions.AlgorithmType """ if(algorithm not in [util_functions.AlgorithmTypes.regression, util_functions.AlgorithmTypes.classification]): algorithm = util_functions.AlgorithmTypes.regression f = predictor_extractor.PredictorExtractor() f.initialize_dictionaries(predictor_set) train_feats = f.gen_feats(predictor_set) clf,clf2 = get_algorithms(algorithm) cv_error_results=get_cv_error(clf2,train_feats,predictor_set._target) try: set_score = numpy.asarray(predictor_set._target, dtype=numpy.int) clf.fit(train_feats, set_score) except ValueError: log.exception("Not enough classes (0,1,etc) in sample.") set_score = predictor_set._target set_score[0]=1 set_score[1]=0 clf.fit(train_feats, set_score) return f, clf, cv_error_results
[ "def", "extract_features_and_generate_model_predictors", "(", "predictor_set", ",", "algorithm", "=", "util_functions", ".", "AlgorithmTypes", ".", "regression", ")", ":", "if", "(", "algorithm", "not", "in", "[", "util_functions", ".", "AlgorithmTypes", ".", "regress...
Extracts features and generates predictors based on a given predictor set predictor_set - a PredictorSet object that has been initialized with data type - one of util_functions.AlgorithmType
[ "Extracts", "features", "and", "generates", "predictors", "based", "on", "a", "given", "predictor", "set", "predictor_set", "-", "a", "PredictorSet", "object", "that", "has", "been", "initialized", "with", "data", "type", "-", "one", "of", "util_functions", ".",...
python
valid
ibelie/typy
typy/google/protobuf/internal/python_message.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/internal/python_message.py#L916-L947
def _InternalUnpackAny(msg): """Unpacks Any message and returns the unpacked message. This internal method is differnt from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message. """ type_url = msg.type_url db = symbol_database.Default() if not type_url: return None # TODO(haberman): For now we just strip the hostname. Better logic will be # required. type_name = type_url.split("/")[-1] descriptor = db.pool.FindMessageTypeByName(type_name) if descriptor is None: return None message_class = db.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
[ "def", "_InternalUnpackAny", "(", "msg", ")", ":", "type_url", "=", "msg", ".", "type_url", "db", "=", "symbol_database", ".", "Default", "(", ")", "if", "not", "type_url", ":", "return", "None", "# TODO(haberman): For now we just strip the hostname. Better logic wil...
Unpacks Any message and returns the unpacked message. This internal method is differnt from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message.
[ "Unpacks", "Any", "message", "and", "returns", "the", "unpacked", "message", "." ]
python
valid
mrstephenneal/dirutility
dirutility/permissions.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/permissions.py#L156-L159
def allow_rwe(self, name): """Allow all privileges for a particular name group (user, group, other).""" assert name in PERMISSIONS.keys() os.chmod(self.file_path, PERMISSIONS[name]['all'])
[ "def", "allow_rwe", "(", "self", ",", "name", ")", ":", "assert", "name", "in", "PERMISSIONS", ".", "keys", "(", ")", "os", ".", "chmod", "(", "self", ".", "file_path", ",", "PERMISSIONS", "[", "name", "]", "[", "'all'", "]", ")" ]
Allow all privileges for a particular name group (user, group, other).
[ "Allow", "all", "privileges", "for", "a", "particular", "name", "group", "(", "user", "group", "other", ")", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/storage.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1290-L1302
def delete(expnum, ccd, version, ext, prefix=None): """ delete a file, no error on does not exist @param expnum: @param ccd: @param version: @param ext: @param prefix: @return: """ uri = get_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) remove(uri)
[ "def", "delete", "(", "expnum", ",", "ccd", ",", "version", ",", "ext", ",", "prefix", "=", "None", ")", ":", "uri", "=", "get_uri", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "ext", ",", "prefix", "="...
delete a file, no error on does not exist @param expnum: @param ccd: @param version: @param ext: @param prefix: @return:
[ "delete", "a", "file", "no", "error", "on", "does", "not", "exist" ]
python
train
chrislit/abydos
abydos/distance/_manhattan.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_manhattan.py#L159-L192
def dist_manhattan(src, tar, qval=2, alphabet=None): """Return the normalized Manhattan distance between two strings. This is a wrapper for :py:meth:`Manhattan.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Manhattan distance Examples -------- >>> dist_manhattan('cat', 'hat') 0.5 >>> round(dist_manhattan('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_manhattan('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_manhattan('ATCG', 'TAGC') 1.0 """ return Manhattan().dist(src, tar, qval, alphabet)
[ "def", "dist_manhattan", "(", "src", ",", "tar", ",", "qval", "=", "2", ",", "alphabet", "=", "None", ")", ":", "return", "Manhattan", "(", ")", ".", "dist", "(", "src", ",", "tar", ",", "qval", ",", "alphabet", ")" ]
Return the normalized Manhattan distance between two strings. This is a wrapper for :py:meth:`Manhattan.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Manhattan distance Examples -------- >>> dist_manhattan('cat', 'hat') 0.5 >>> round(dist_manhattan('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_manhattan('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_manhattan('ATCG', 'TAGC') 1.0
[ "Return", "the", "normalized", "Manhattan", "distance", "between", "two", "strings", "." ]
python
valid
onnx/onnxmltools
onnxmltools/utils/main.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/utils/main.py#L57-L75
def set_model_domain(model, domain): """ Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme") """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_string_type(domain): raise ValueError("Domain must be a string type.") model.domain = domain
[ "def", "set_model_domain", "(", "model", ",", "domain", ")", ":", "if", "model", "is", "None", "or", "not", "isinstance", "(", "model", ",", "onnx_proto", ".", "ModelProto", ")", ":", "raise", "ValueError", "(", "\"Model is not a valid ONNX model.\"", ")", "if...
Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme")
[ "Sets", "the", "domain", "on", "the", "ONNX", "model", "." ]
python
train
wavycloud/pyboto3
pyboto3/ec2.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/ec2.py#L3090-L3207
def create_volume(DryRun=None, Size=None, SnapshotId=None, AvailabilityZone=None, VolumeType=None, Iops=None, Encrypted=None, KmsKeyId=None, TagSpecifications=None): """ Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints . You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources . For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide . See also: AWS API Documentation Examples This example creates an 80 GiB General Purpose (SSD) volume in the Availability Zone us-east-1a. Expected Output: This example creates a new Provisioned IOPS (SSD) volume with 1000 provisioned IOPS from a snapshot in the Availability Zone us-east-1a. Expected Output: :example: response = client.create_volume( DryRun=True|False, Size=123, SnapshotId='string', AvailabilityZone='string', VolumeType='standard'|'io1'|'gp2'|'sc1'|'st1', Iops=123, Encrypted=True|False, KmsKeyId='string', TagSpecifications=[ { 'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] }, ] ) :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation . :type Size: integer :param Size: The size of the volume, in GiBs. Constraints: 1-16384 for gp2 , 4-16384 for io1 , 500-16384 for st1 , 500-16384 for sc1 , and 1-1024 for standard . If you specify a snapshot, the volume size must be equal to or larger than the snapshot size. Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. :type SnapshotId: string :param SnapshotId: The snapshot from which to create the volume. :type AvailabilityZone: string :param AvailabilityZone: [REQUIRED] The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you. :type VolumeType: string :param VolumeType: The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Default: standard :type Iops: integer :param Iops: Only valid for Provisioned IOPS SSD volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes :type Encrypted: boolean :param Encrypted: Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . :type KmsKeyId: string :param KmsKeyId: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1 :012345678910 :key/abcd1234-a123-456a-a12b-a123b4cd56ef . If a KmsKeyId is specified, the Encrypted flag must also be set. :type TagSpecifications: list :param TagSpecifications: The tags to apply to the volume during creation. (dict) --The tags to apply to a resource when the resource is being created. ResourceType (string) --The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume . Tags (list) --The tags to apply to the resource. (dict) --Describes a tag. Key (string) --The key of the tag. Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws: Value (string) --The value of the tag. Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters. :rtype: dict :return: { 'VolumeId': 'string', 'Size': 123, 'SnapshotId': 'string', 'AvailabilityZone': 'string', 'State': 'creating'|'available'|'in-use'|'deleting'|'deleted'|'error', 'CreateTime': datetime(2015, 1, 1), 'Attachments': [ { 'VolumeId': 'string', 'InstanceId': 'string', 'Device': 'string', 'State': 'attaching'|'attached'|'detaching'|'detached', 'AttachTime': datetime(2015, 1, 1), 'DeleteOnTermination': True|False }, ], 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ], 'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1', 'Iops': 123, 'Encrypted': True|False, 'KmsKeyId': 'string' } """ pass
[ "def", "create_volume", "(", "DryRun", "=", "None", ",", "Size", "=", "None", ",", "SnapshotId", "=", "None", ",", "AvailabilityZone", "=", "None", ",", "VolumeType", "=", "None", ",", "Iops", "=", "None", ",", "Encrypted", "=", "None", ",", "KmsKeyId", ...
Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints . You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources . For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide . See also: AWS API Documentation Examples This example creates an 80 GiB General Purpose (SSD) volume in the Availability Zone us-east-1a. Expected Output: This example creates a new Provisioned IOPS (SSD) volume with 1000 provisioned IOPS from a snapshot in the Availability Zone us-east-1a. Expected Output: :example: response = client.create_volume( DryRun=True|False, Size=123, SnapshotId='string', AvailabilityZone='string', VolumeType='standard'|'io1'|'gp2'|'sc1'|'st1', Iops=123, Encrypted=True|False, KmsKeyId='string', TagSpecifications=[ { 'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] }, ] ) :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation . :type Size: integer :param Size: The size of the volume, in GiBs. Constraints: 1-16384 for gp2 , 4-16384 for io1 , 500-16384 for st1 , 500-16384 for sc1 , and 1-1024 for standard . If you specify a snapshot, the volume size must be equal to or larger than the snapshot size. Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. :type SnapshotId: string :param SnapshotId: The snapshot from which to create the volume. :type AvailabilityZone: string :param AvailabilityZone: [REQUIRED] The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you. :type VolumeType: string :param VolumeType: The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Default: standard :type Iops: integer :param Iops: Only valid for Provisioned IOPS SSD volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes :type Encrypted: boolean :param Encrypted: Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide . :type KmsKeyId: string :param KmsKeyId: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1 :012345678910 :key/abcd1234-a123-456a-a12b-a123b4cd56ef . If a KmsKeyId is specified, the Encrypted flag must also be set. :type TagSpecifications: list :param TagSpecifications: The tags to apply to the volume during creation. (dict) --The tags to apply to a resource when the resource is being created. ResourceType (string) --The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume . Tags (list) --The tags to apply to the resource. (dict) --Describes a tag. Key (string) --The key of the tag. Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws: Value (string) --The value of the tag. Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters. :rtype: dict :return: { 'VolumeId': 'string', 'Size': 123, 'SnapshotId': 'string', 'AvailabilityZone': 'string', 'State': 'creating'|'available'|'in-use'|'deleting'|'deleted'|'error', 'CreateTime': datetime(2015, 1, 1), 'Attachments': [ { 'VolumeId': 'string', 'InstanceId': 'string', 'Device': 'string', 'State': 'attaching'|'attached'|'detaching'|'detached', 'AttachTime': datetime(2015, 1, 1), 'DeleteOnTermination': True|False }, ], 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ], 'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1', 'Iops': 123, 'Encrypted': True|False, 'KmsKeyId': 'string' }
[ "Creates", "an", "EBS", "volume", "that", "can", "be", "attached", "to", "an", "instance", "in", "the", "same", "Availability", "Zone", ".", "The", "volume", "is", "created", "in", "the", "regional", "endpoint", "that", "you", "send", "the", "HTTP", "reque...
python
train
python-rope/rope
rope/base/oi/doa.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/oi/doa.py#L101-L105
def wait_process(self): """Wait for the process to finish""" self.process.wait() if self.analyze_data: self.receiving_thread.join()
[ "def", "wait_process", "(", "self", ")", ":", "self", ".", "process", ".", "wait", "(", ")", "if", "self", ".", "analyze_data", ":", "self", ".", "receiving_thread", ".", "join", "(", ")" ]
Wait for the process to finish
[ "Wait", "for", "the", "process", "to", "finish" ]
python
train
dcos/shakedown
shakedown/dcos/task.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/task.py#L107-L115
def task_property_present_predicate(service, task, prop): """ True if the json_element passed is present for the task specified. """ try: response = get_service_task(service, task) except Exception as e: pass return (response is not None) and (prop in response)
[ "def", "task_property_present_predicate", "(", "service", ",", "task", ",", "prop", ")", ":", "try", ":", "response", "=", "get_service_task", "(", "service", ",", "task", ")", "except", "Exception", "as", "e", ":", "pass", "return", "(", "response", "is", ...
True if the json_element passed is present for the task specified.
[ "True", "if", "the", "json_element", "passed", "is", "present", "for", "the", "task", "specified", "." ]
python
train
brainiak/brainiak
brainiak/fcma/preprocessing.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L328-L414
def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32, random=RandomType.NORANDOM): """ obtain the data for activity-based voxel selection using Searchlight Average the activity within epochs and z-scoring within subject, while maintaining the 3D brain structure. In order to save memory, the data is processed subject by subject instead of reading all in before processing. Assuming all subjects live in the identical cube. Parameters ---------- images: Iterable[SpatialImage] Data. conditions: List[UniqueLabelConditionSpec] Condition specification. data_type Type to cast image to. random: Optional[RandomType] Randomize the image data within subject or not. Returns ------- processed_data: 4D array in shape [brain 3D + epoch] averaged epoch by epoch processed data labels: 1D array contains labels of the data """ time1 = time.time() epoch_info = generate_epochs_info(conditions) num_epochs = len(epoch_info) processed_data = None logger.info( 'there are %d subjects, and in total %d epochs' % (len(conditions), num_epochs) ) labels = np.empty(num_epochs) # assign labels for idx, epoch in enumerate(epoch_info): labels[idx] = epoch[0] # counting the epochs per subject for z-scoring subject_count = np.zeros(len(conditions), dtype=np.int32) logger.info('start to apply masks and separate epochs') for sid, f in enumerate(images): data = f.get_data().astype(data_type) [d1, d2, d3, d4] = data.shape if random == RandomType.REPRODUCIBLE: data = data.reshape((d1 * d2 * d3, d4)) _randomize_single_subject(data, seed=sid) data = data.reshape((d1, d2, d3, d4)) elif random == RandomType.UNREPRODUCIBLE: data = data.reshape((d1 * d2 * d3, d4)) _randomize_single_subject(data) data = data.reshape((d1, d2, d3, d4)) if processed_data is None: processed_data = np.empty([d1, d2, d3, num_epochs], dtype=data_type) # averaging for idx, epoch in enumerate(epoch_info): if sid == epoch[1]: subject_count[sid] += 1 processed_data[:, :, :, idx] = \ np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3) logger.debug( 'file %s is loaded and processed, with data shape %s', f.get_filename(), data.shape ) # z-scoring cur_epoch = 0 for i in subject_count: if i > 1: processed_data[:, :, :, cur_epoch:cur_epoch + i] = \ zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i], axis=3, ddof=0) cur_epoch += i # if zscore fails (standard deviation is zero), # set all values to be zero processed_data = np.nan_to_num(processed_data) time2 = time.time() logger.info( 'data processed for activity-based voxel selection, takes %.2f s' % (time2 - time1) ) return processed_data, labels
[ "def", "prepare_searchlight_mvpa_data", "(", "images", ",", "conditions", ",", "data_type", "=", "np", ".", "float32", ",", "random", "=", "RandomType", ".", "NORANDOM", ")", ":", "time1", "=", "time", ".", "time", "(", ")", "epoch_info", "=", "generate_epoc...
obtain the data for activity-based voxel selection using Searchlight Average the activity within epochs and z-scoring within subject, while maintaining the 3D brain structure. In order to save memory, the data is processed subject by subject instead of reading all in before processing. Assuming all subjects live in the identical cube. Parameters ---------- images: Iterable[SpatialImage] Data. conditions: List[UniqueLabelConditionSpec] Condition specification. data_type Type to cast image to. random: Optional[RandomType] Randomize the image data within subject or not. Returns ------- processed_data: 4D array in shape [brain 3D + epoch] averaged epoch by epoch processed data labels: 1D array contains labels of the data
[ "obtain", "the", "data", "for", "activity", "-", "based", "voxel", "selection", "using", "Searchlight" ]
python
train
Xion/callee
callee/objects.py
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/objects.py#L115-L154
def is_method(arg, min_arity=None, max_arity=None): """Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ if not callable(arg): return False if not any(is_(arg) for is_ in (inspect.ismethod, inspect.ismethoddescriptor, inspect.isbuiltin)): return False try: argnames, varargs, kwargs, defaults = getargspec(arg) except TypeError: # On CPython 2.x, built-in methods of file aren't inspectable, # so if it's file.read() or file.write(), we can't tell it for sure. # Given how this check is being used, assuming the best is probably # all we can do here. return True else: if argnames and argnames[0] == 'self': argnames = argnames[1:] if min_arity is not None: actual_min_arity = len(argnames) - len(defaults or ()) assert actual_min_arity >= 0, ( "Minimum arity of %r found to be negative (got %s)!" % ( arg, actual_min_arity)) if int(min_arity) != actual_min_arity: return False if max_arity is not None: actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames) if int(max_arity) != actual_max_arity: return False return True
[ "def", "is_method", "(", "arg", ",", "min_arity", "=", "None", ",", "max_arity", "=", "None", ")", ":", "if", "not", "callable", "(", "arg", ")", ":", "return", "False", "if", "not", "any", "(", "is_", "(", "arg", ")", "for", "is_", "in", "(", "i...
Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum.
[ "Check", "if", "argument", "is", "a", "method", "." ]
python
train
gusutabopb/aioinflux
aioinflux/serialization/dataframe.py
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L86-L127
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes: """Converts a Pandas DataFrame into line protocol format""" # Pre-processing if measurement is None: raise ValueError("Missing 'measurement'") if not isinstance(df.index, pd.DatetimeIndex): raise ValueError('DataFrame index is not DatetimeIndex') tag_columns = set(tag_columns or []) isnull = df.isnull().any(axis=1) # Make parser function tags = [] fields = [] for k, v in extra_tags.items(): tags.append(f"{k}={escape(v, key_escape)}") for i, (k, v) in enumerate(df.dtypes.items()): k = k.translate(key_escape) if k in tag_columns: tags.append(f"{k}={{p[{i+1}]}}") elif issubclass(v.type, np.integer): fields.append(f"{k}={{p[{i+1}]}}i") elif issubclass(v.type, (np.float, np.bool_)): fields.append(f"{k}={{p[{i+1}]}}") else: # String escaping is skipped for performance reasons # Strings containing double-quotes can cause strange write errors # and should be sanitized by the user. # e.g., df[k] = df[k].astype('str').str.translate(str_escape) fields.append(f"{k}=\"{{p[{i+1}]}}\"") fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags), ' ', ','.join(fields), ' {p[0].value}') f = eval("lambda p: f'{}'".format(''.join(fmt))) # Map/concat if isnull.any(): lp = map(f, _itertuples(df[~isnull])) rep = _replace(df) lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p)) for p in _itertuples(df[isnull])) return '\n'.join(chain(lp, lp_nan)).encode('utf-8') else: return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
[ "def", "serialize", "(", "df", ",", "measurement", ",", "tag_columns", "=", "None", ",", "*", "*", "extra_tags", ")", "->", "bytes", ":", "# Pre-processing", "if", "measurement", "is", "None", ":", "raise", "ValueError", "(", "\"Missing 'measurement'\"", ")", ...
Converts a Pandas DataFrame into line protocol format
[ "Converts", "a", "Pandas", "DataFrame", "into", "line", "protocol", "format" ]
python
train
line/line-bot-sdk-python
linebot/http_client.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/http_client.py#L110-L132
def get(self, url, headers=None, params=None, stream=False, timeout=None): """GET request. :param str url: Request url :param dict headers: (optional) Request headers :param dict params: (optional) Request query parameter :param bool stream: (optional) get content as stream :param timeout: (optional), How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is :py:attr:`self.timeout` :type timeout: float | tuple(float, float) :rtype: :py:class:`RequestsHttpResponse` :return: RequestsHttpResponse instance """ if timeout is None: timeout = self.timeout response = requests.get( url, headers=headers, params=params, stream=stream, timeout=timeout ) return RequestsHttpResponse(response)
[ "def", "get", "(", "self", ",", "url", ",", "headers", "=", "None", ",", "params", "=", "None", ",", "stream", "=", "False", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "timeout", "respons...
GET request. :param str url: Request url :param dict headers: (optional) Request headers :param dict params: (optional) Request query parameter :param bool stream: (optional) get content as stream :param timeout: (optional), How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is :py:attr:`self.timeout` :type timeout: float | tuple(float, float) :rtype: :py:class:`RequestsHttpResponse` :return: RequestsHttpResponse instance
[ "GET", "request", "." ]
python
train
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1239-L1274
def calc_registration(preregistration_map, anchors, max_steps=2000, max_step_size=0.05, method='random'): ''' calc_registration is a calculator that creates the registration coordinates. ''' # if max steps is a tuple (max, stride) then a trajectory is saved into # the registered_map meta-data pmap = preregistration_map if is_tuple(max_steps) or is_list(max_steps): (max_steps, stride) = max_steps traj = [preregistration_map.coordinates] x = preregistration_map.coordinates for s in np.arange(0, max_steps, stride): x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], initial_coordinates=x, method=method, max_steps=stride, max_step_size=max_step_size) traj.append(x) pmap = pmap.with_meta(trajectory=np.asarray(traj)) else: x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], method=method, max_steps=max_steps, max_step_size=max_step_size) return pmap.copy(coordinates=x)
[ "def", "calc_registration", "(", "preregistration_map", ",", "anchors", ",", "max_steps", "=", "2000", ",", "max_step_size", "=", "0.05", ",", "method", "=", "'random'", ")", ":", "# if max steps is a tuple (max, stride) then a trajectory is saved into", "# the registered_m...
calc_registration is a calculator that creates the registration coordinates.
[ "calc_registration", "is", "a", "calculator", "that", "creates", "the", "registration", "coordinates", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L4502-L4518
def ANDN(cpu, dest, src1, src2): """Performs a bitwise logical AND of inverted second operand (the first source operand) with the third operand (the second source operand). The result is stored in the first operand (destination operand). DEST <- (NOT SRC1) bitwiseAND SRC2; SF <- DEST[OperandSize -1]; ZF <- (DEST = 0); Flags Affected SF and ZF are updated based on result. OF and CF flags are cleared. AF and PF flags are undefined. """ value = ~src1.read() & src2.read() dest.write(value) cpu.ZF = value == 0 cpu.SF = (value & (1 << dest.size)) != 0 cpu.OF = False cpu.CF = False
[ "def", "ANDN", "(", "cpu", ",", "dest", ",", "src1", ",", "src2", ")", ":", "value", "=", "~", "src1", ".", "read", "(", ")", "&", "src2", ".", "read", "(", ")", "dest", ".", "write", "(", "value", ")", "cpu", ".", "ZF", "=", "value", "==", ...
Performs a bitwise logical AND of inverted second operand (the first source operand) with the third operand (the second source operand). The result is stored in the first operand (destination operand). DEST <- (NOT SRC1) bitwiseAND SRC2; SF <- DEST[OperandSize -1]; ZF <- (DEST = 0); Flags Affected SF and ZF are updated based on result. OF and CF flags are cleared. AF and PF flags are undefined.
[ "Performs", "a", "bitwise", "logical", "AND", "of", "inverted", "second", "operand", "(", "the", "first", "source", "operand", ")", "with", "the", "third", "operand", "(", "the", "second", "source", "operand", ")", ".", "The", "result", "is", "stored", "in...
python
valid
flaviogrossi/sockjs-cyclone
sockjs/cyclone/transports/base.py
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/transports/base.py#L9-L17
def get_conn_info(self): from sockjs.cyclone.conn import ConnectionInfo """ Return C{ConnectionInfo} object from current transport """ return ConnectionInfo(self.request.remote_ip, self.request.cookies, self.request.arguments, self.request.headers, self.request.path)
[ "def", "get_conn_info", "(", "self", ")", ":", "from", "sockjs", ".", "cyclone", ".", "conn", "import", "ConnectionInfo", "return", "ConnectionInfo", "(", "self", ".", "request", ".", "remote_ip", ",", "self", ".", "request", ".", "cookies", ",", "self", "...
Return C{ConnectionInfo} object from current transport
[ "Return", "C", "{", "ConnectionInfo", "}", "object", "from", "current", "transport" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/win32/user32.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/win32/user32.py#L607-L625
def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP): """ Translate coordinates from one window to another. @see: L{client_to_screen}, L{screen_to_client} @type hWndFrom: int or L{HWND} or L{system.Window} @param hWndFrom: Window handle to translate from. Use C{HWND_DESKTOP} for screen coordinates. @type hWndTo: int or L{HWND} or L{system.Window} @param hWndTo: Window handle to translate to. Use C{HWND_DESKTOP} for screen coordinates. @rtype: L{Rect} @return: New object containing the translated coordinates. """ points = [ (self.left, self.top), (self.right, self.bottom) ] return MapWindowPoints(hWndFrom, hWndTo, points)
[ "def", "translate", "(", "self", ",", "hWndFrom", "=", "HWND_DESKTOP", ",", "hWndTo", "=", "HWND_DESKTOP", ")", ":", "points", "=", "[", "(", "self", ".", "left", ",", "self", ".", "top", ")", ",", "(", "self", ".", "right", ",", "self", ".", "bott...
Translate coordinates from one window to another. @see: L{client_to_screen}, L{screen_to_client} @type hWndFrom: int or L{HWND} or L{system.Window} @param hWndFrom: Window handle to translate from. Use C{HWND_DESKTOP} for screen coordinates. @type hWndTo: int or L{HWND} or L{system.Window} @param hWndTo: Window handle to translate to. Use C{HWND_DESKTOP} for screen coordinates. @rtype: L{Rect} @return: New object containing the translated coordinates.
[ "Translate", "coordinates", "from", "one", "window", "to", "another", "." ]
python
train
seequent/vectormath
vectormath/vector.py
https://github.com/seequent/vectormath/blob/a2259fb82cf5a665170f50d216b11a738400d878/vectormath/vector.py#L303-L307
def cross(self, vec): """Cross product with another vector""" if not isinstance(vec, self.__class__): raise TypeError('Cross product operand must be a vector') return Vector3(0, 0, np.asscalar(np.cross(self, vec)))
[ "def", "cross", "(", "self", ",", "vec", ")", ":", "if", "not", "isinstance", "(", "vec", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "'Cross product operand must be a vector'", ")", "return", "Vector3", "(", "0", ",", "0", ",", "...
Cross product with another vector
[ "Cross", "product", "with", "another", "vector" ]
python
train
SFDO-Tooling/CumulusCI
cumulusci/utils.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/utils.py#L448-L460
def temporary_dir(): """Context manager that creates a temporary directory and chdirs to it. When the context manager exits it returns to the previous cwd and deletes the temporary directory. """ d = tempfile.mkdtemp() try: with cd(d): yield d finally: if os.path.exists(d): shutil.rmtree(d)
[ "def", "temporary_dir", "(", ")", ":", "d", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "with", "cd", "(", "d", ")", ":", "yield", "d", "finally", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "shutil", ".", "rmtre...
Context manager that creates a temporary directory and chdirs to it. When the context manager exits it returns to the previous cwd and deletes the temporary directory.
[ "Context", "manager", "that", "creates", "a", "temporary", "directory", "and", "chdirs", "to", "it", "." ]
python
train
tcalmant/ipopo
pelix/misc/jabsorb.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/jabsorb.py#L141-L154
def _compute_jsonclass(obj): """ Compute the content of the __jsonclass__ field for the given object :param obj: An object :return: The content of the __jsonclass__ field """ # It's not a standard type, so it needs __jsonclass__ module_name = inspect.getmodule(obj).__name__ json_class = obj.__class__.__name__ if module_name not in ("", "__main__"): json_class = "{0}.{1}".format(module_name, json_class) return [json_class, []]
[ "def", "_compute_jsonclass", "(", "obj", ")", ":", "# It's not a standard type, so it needs __jsonclass__", "module_name", "=", "inspect", ".", "getmodule", "(", "obj", ")", ".", "__name__", "json_class", "=", "obj", ".", "__class__", ".", "__name__", "if", "module_...
Compute the content of the __jsonclass__ field for the given object :param obj: An object :return: The content of the __jsonclass__ field
[ "Compute", "the", "content", "of", "the", "__jsonclass__", "field", "for", "the", "given", "object" ]
python
train
nutechsoftware/alarmdecoder
alarmdecoder/devices/socket_device.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/devices/socket_device.py#L245-L271
def write(self, data): """ Writes data to the device. :param data: data to write :type data: string :returns: number of bytes sent :raises: :py:class:`~alarmdecoder.util.CommError` """ data_sent = None try: if isinstance(data, str): data = data.encode('utf-8') data_sent = self._device.send(data) if data_sent == 0: raise CommError('Error writing to device.') self.on_write(data=data) except (SSL.Error, socket.error) as err: raise CommError('Error writing to device.', err) return data_sent
[ "def", "write", "(", "self", ",", "data", ")", ":", "data_sent", "=", "None", "try", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "data_sent", "=", "self", ".", "_device", ".",...
Writes data to the device. :param data: data to write :type data: string :returns: number of bytes sent :raises: :py:class:`~alarmdecoder.util.CommError`
[ "Writes", "data", "to", "the", "device", "." ]
python
train
lvjiyong/configreset
configreset/__init__.py
https://github.com/lvjiyong/configreset/blob/cde0a426e993a6aa483d6934358e61750c944de9/configreset/__init__.py#L222-L252
def _load_from_ini_py3(ini, default_section=_DEFAULT_SECTION): """ py3从单个配置文件中,获取设置 :param default_section: :param ini: :return: """ cf = configparser.ConfigParser(default_section=default_section) cf.read(ini, encoding="UTF8") settings = OrderedDict() for item in cf.items(): logger.debug(item[0]) settings[item[0].upper()] = OrderedDict(item[1]) logger.debug(settings) for k, v in cf.items(default_section): # for k, v in cf.items(): logger.debug(v) logger.debug(settings) logger.debug(settings.get(k)) settings[k.upper()] = convert_value(v) logger.debug(settings) if k.lower() in settings: del settings[k.lower()] if default_section in settings: del settings[default_section] logger.debug(settings) return settings
[ "def", "_load_from_ini_py3", "(", "ini", ",", "default_section", "=", "_DEFAULT_SECTION", ")", ":", "cf", "=", "configparser", ".", "ConfigParser", "(", "default_section", "=", "default_section", ")", "cf", ".", "read", "(", "ini", ",", "encoding", "=", "\"UTF...
py3从单个配置文件中,获取设置 :param default_section: :param ini: :return:
[ "py3从单个配置文件中", "获取设置", ":", "param", "default_section", ":", ":", "param", "ini", ":", ":", "return", ":" ]
python
train
deepmind/pysc2
pysc2/lib/features.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/features.py#L536-L598
def parse_agent_interface_format( feature_screen=None, feature_minimap=None, rgb_screen=None, rgb_minimap=None, action_space=None, camera_width_world_units=None, use_feature_units=False, use_raw_units=False, use_unit_counts=False, use_camera_position=False): """Creates an AgentInterfaceFormat object from keyword args. Convenient when using dictionaries or command-line arguments for config. Note that the feature_* and rgb_* properties define the respective spatial observation dimensions and accept: * None or 0 to disable that spatial observation. * A single int for a square observation with that side length. * A (int, int) tuple for a rectangular (width, height) observation. Args: feature_screen: If specified, so must feature_minimap be. feature_minimap: If specified, so must feature_screen be. rgb_screen: If specified, so must rgb_minimap be. rgb_minimap: If specified, so must rgb_screen be. action_space: ["FEATURES", "RGB"]. camera_width_world_units: An int. use_feature_units: A boolean, defaults to False. use_raw_units: A boolean, defaults to False. use_unit_counts: A boolean, defaults to False. use_camera_position: A boolean, defaults to False. Returns: An `AgentInterfaceFormat` object. Raises: ValueError: If an invalid parameter is specified. """ if feature_screen or feature_minimap: feature_dimensions = Dimensions( screen=feature_screen, minimap=feature_minimap) else: feature_dimensions = None if rgb_screen or rgb_minimap: rgb_dimensions = Dimensions( screen=rgb_screen, minimap=rgb_minimap) else: rgb_dimensions = None return AgentInterfaceFormat( feature_dimensions=feature_dimensions, rgb_dimensions=rgb_dimensions, action_space=(action_space and actions.ActionSpace[action_space.upper()]), camera_width_world_units=camera_width_world_units, use_feature_units=use_feature_units, use_raw_units=use_raw_units, use_unit_counts=use_unit_counts, use_camera_position=use_camera_position )
[ "def", "parse_agent_interface_format", "(", "feature_screen", "=", "None", ",", "feature_minimap", "=", "None", ",", "rgb_screen", "=", "None", ",", "rgb_minimap", "=", "None", ",", "action_space", "=", "None", ",", "camera_width_world_units", "=", "None", ",", ...
Creates an AgentInterfaceFormat object from keyword args. Convenient when using dictionaries or command-line arguments for config. Note that the feature_* and rgb_* properties define the respective spatial observation dimensions and accept: * None or 0 to disable that spatial observation. * A single int for a square observation with that side length. * A (int, int) tuple for a rectangular (width, height) observation. Args: feature_screen: If specified, so must feature_minimap be. feature_minimap: If specified, so must feature_screen be. rgb_screen: If specified, so must rgb_minimap be. rgb_minimap: If specified, so must rgb_screen be. action_space: ["FEATURES", "RGB"]. camera_width_world_units: An int. use_feature_units: A boolean, defaults to False. use_raw_units: A boolean, defaults to False. use_unit_counts: A boolean, defaults to False. use_camera_position: A boolean, defaults to False. Returns: An `AgentInterfaceFormat` object. Raises: ValueError: If an invalid parameter is specified.
[ "Creates", "an", "AgentInterfaceFormat", "object", "from", "keyword", "args", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/components_manager_ui/components_manager_ui.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/components_manager_ui/components_manager_ui.py#L909-L944
def reload_components_ui(self): """ Reloads user selected Components. :return: Method success. :rtype: bool :note: May require user interaction. """ selected_components = self.get_selected_components() self.__engine.start_processing("Reloading Components ...", len(selected_components)) reload_failed_components = [] for component in selected_components: if component.interface.deactivatable: success = self.reload_component(component.name) or False if not success: reload_failed_components.append(component) else: self.__engine.notifications_manager.warnify( "{0} | '{1}' Component cannot be deactivated and won't be reloaded!".format(self.__class__.__name__, component.name)) self.__engine.step_processing() self.__engine.stop_processing() if not reload_failed_components: return True else: raise manager.exceptions.ComponentReloadError( "{0} | Exception(s) raised while reloading '{1}' Component(s)!".format(self.__class__.__name__, ", ".join( (reload_failed_component.name for reload_failed_component in reload_failed_components))))
[ "def", "reload_components_ui", "(", "self", ")", ":", "selected_components", "=", "self", ".", "get_selected_components", "(", ")", "self", ".", "__engine", ".", "start_processing", "(", "\"Reloading Components ...\"", ",", "len", "(", "selected_components", ")", ")...
Reloads user selected Components. :return: Method success. :rtype: bool :note: May require user interaction.
[ "Reloads", "user", "selected", "Components", "." ]
python
train
jobovy/galpy
galpy/potential/DiskSCFPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/DiskSCFPotential.py#L316-L337
def _zforce(self,R,z,phi=0,t=0): """ NAME: _zforce PURPOSE: evaluate the vertical force at (R,z, phi) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: vertical force at (R,z, phi) HISTORY: 2016-12-26 - Written - Bovy (UofT/CCA) """ r= numpy.sqrt(R**2.+z**2.) out= self._scf.zforce(R,z,phi=phi,use_physical=False) for a,s,ds,H,dH in zip(self._Sigma_amp,self._Sigma,self._dSigmadR, self._Hz,self._dHzdz): out-= 4.*numpy.pi*a*(ds(r)*H(z)*z/r+s(r)*dH(z)) return out
[ "def", "_zforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0", ",", "t", "=", "0", ")", ":", "r", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "z", "**", "2.", ")", "out", "=", "self", ".", "_scf", ".", "zforce", "(", ...
NAME: _zforce PURPOSE: evaluate the vertical force at (R,z, phi) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: vertical force at (R,z, phi) HISTORY: 2016-12-26 - Written - Bovy (UofT/CCA)
[ "NAME", ":", "_zforce", "PURPOSE", ":", "evaluate", "the", "vertical", "force", "at", "(", "R", "z", "phi", ")", "INPUT", ":", "R", "-", "Cylindrical", "Galactocentric", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", ...
python
train
oceanprotocol/squid-py
squid_py/aquarius/aquarius.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/aquarius/aquarius.py#L127-L151
def publish_asset_ddo(self, ddo): """ Register asset ddo in aquarius. :param ddo: DDO instance :return: API response (depends on implementation) """ try: asset_did = ddo.did response = self.requests_session.post(self.url, data=ddo.as_text(), headers=self._headers) except AttributeError: raise AttributeError('DDO invalid. Review that all the required parameters are filled.') if response.status_code == 500: raise ValueError( f'This Asset ID already exists! \n\tHTTP Error message: \n\t\t{response.text}') elif response.status_code != 201: raise Exception(f'{response.status_code} ERROR Full error: \n{response.text}') elif response.status_code == 201: response = json.loads(response.content) logger.debug(f'Published asset DID {asset_did}') return response else: raise Exception(f'Unhandled ERROR: status-code {response.status_code}, ' f'error message {response.text}')
[ "def", "publish_asset_ddo", "(", "self", ",", "ddo", ")", ":", "try", ":", "asset_did", "=", "ddo", ".", "did", "response", "=", "self", ".", "requests_session", ".", "post", "(", "self", ".", "url", ",", "data", "=", "ddo", ".", "as_text", "(", ")",...
Register asset ddo in aquarius. :param ddo: DDO instance :return: API response (depends on implementation)
[ "Register", "asset", "ddo", "in", "aquarius", "." ]
python
train
lambdamusic/Ontospy
ontospy/extras/hacks/sketch.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L149-L166
def omnigraffle(self): """ tries to open an export directly in omnigraffle """ temp = self.rdf_source("dot") try: # try to put in the user/tmp folder from os.path import expanduser home = expanduser("~") filename = home + "/tmp/turtle_sketch.dot" f = open(filename, "w") except: filename = "turtle_sketch.dot" f = open(filename, "w") f.write(temp) f.close() try: os.system("open " + filename) except: os.system("start " + filename)
[ "def", "omnigraffle", "(", "self", ")", ":", "temp", "=", "self", ".", "rdf_source", "(", "\"dot\"", ")", "try", ":", "# try to put in the user/tmp folder", "from", "os", ".", "path", "import", "expanduser", "home", "=", "expanduser", "(", "\"~\"", ")", "fil...
tries to open an export directly in omnigraffle
[ "tries", "to", "open", "an", "export", "directly", "in", "omnigraffle" ]
python
train
base4sistemas/satcfe
satcfe/clientesathub.py
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/clientesathub.py#L190-L199
def consultar_numero_sessao(self, numero_sessao): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.consultar_numero_sessao`. :return: Uma resposta SAT que irá depender da sessão consultada. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('consultarnumerosessao', numero_sessao=numero_sessao) conteudo = resp.json() return RespostaConsultarNumeroSessao.analisar(conteudo.get('retorno'))
[ "def", "consultar_numero_sessao", "(", "self", ",", "numero_sessao", ")", ":", "resp", "=", "self", ".", "_http_post", "(", "'consultarnumerosessao'", ",", "numero_sessao", "=", "numero_sessao", ")", "conteudo", "=", "resp", ".", "json", "(", ")", "return", "R...
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.consultar_numero_sessao`. :return: Uma resposta SAT que irá depender da sessão consultada. :rtype: satcfe.resposta.padrao.RespostaSAT
[ "Sobrepõe", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "consultar_numero_sessao", "." ]
python
train
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L110-L143
def analyze_pages(file_name, char_margin=1.0): """ Input: the file path to the PDF file Output: yields the layout object for each page in the PDF """ log = logging.getLogger(__name__) # Open a PDF file. with open(os.path.realpath(file_name), "rb") as fp: # Create a PDF parser object associated with the file object. parser = PDFParser(fp) # Create a PDF document object that stores the document structure. # Supply the password for initialization. document = PDFDocument(parser, password="") # Create a PDF resource manager object that stores shared resources. rsrcmgr = PDFResourceManager() # Set parameters for analysis. laparams = LAParams( char_margin=char_margin, word_margin=0.1, detect_vertical=True ) # Create a PDF page aggregator object. device = CustomPDFPageAggregator(rsrcmgr, laparams=laparams) # Create a PDF interpreter object. interpreter = PDFPageInterpreter(rsrcmgr, device) # Process each page contained in the document. for page_num, page in enumerate(PDFPage.create_pages(document)): try: interpreter.process_page(page) except OverflowError as oe: log.exception( "{}, skipping page {} of {}".format(oe, page_num, file_name) ) continue layout = device.get_result() yield layout
[ "def", "analyze_pages", "(", "file_name", ",", "char_margin", "=", "1.0", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Open a PDF file.", "with", "open", "(", "os", ".", "path", ".", "realpath", "(", "file_name", ")", ",", ...
Input: the file path to the PDF file Output: yields the layout object for each page in the PDF
[ "Input", ":", "the", "file", "path", "to", "the", "PDF", "file", "Output", ":", "yields", "the", "layout", "object", "for", "each", "page", "in", "the", "PDF" ]
python
train
guaix-ucm/pyemir
emirdrp/instrument/csu_configuration.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/instrument/csu_configuration.py#L179-L207
def widths_in_range_mm( self, minwidth=EMIR_MINIMUM_SLITLET_WIDTH_MM, maxwidth=EMIR_MAXIMUM_SLITLET_WIDTH_MM ): """Return list of slitlets which width is within given range Parameters ---------- minwidth : float Minimum slit width (mm). maxwidth : float Maximum slit width (mm). Returns ------- list_ok : list List of booleans indicating whether the corresponding slitlet width is within range """ list_ok = [] for i in range(EMIR_NBARS): slitlet_ok = minwidth <= self._csu_bar_slit_width[i] <= maxwidth if slitlet_ok: list_ok.append(i + 1) return list_ok
[ "def", "widths_in_range_mm", "(", "self", ",", "minwidth", "=", "EMIR_MINIMUM_SLITLET_WIDTH_MM", ",", "maxwidth", "=", "EMIR_MAXIMUM_SLITLET_WIDTH_MM", ")", ":", "list_ok", "=", "[", "]", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "slitlet_ok", "=",...
Return list of slitlets which width is within given range Parameters ---------- minwidth : float Minimum slit width (mm). maxwidth : float Maximum slit width (mm). Returns ------- list_ok : list List of booleans indicating whether the corresponding slitlet width is within range
[ "Return", "list", "of", "slitlets", "which", "width", "is", "within", "given", "range" ]
python
train
mongodb/mongo-python-driver
pymongo/bulk.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/bulk.py#L432-L497
def execute_no_results(self, sock_info, generator): """Execute all operations, returning no results (w=0). """ if self.uses_collation: raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') if self.uses_array_filters: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val and sock_info.max_wire_version >= 4: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") # OP_MSG if sock_info.max_wire_version > 5: if self.ordered: return self.execute_command_no_results(sock_info, generator) return self.execute_op_msg_no_results(sock_info, generator) coll = self.collection # If ordered is True we have to send GLE or use write # commands so we can abort on the first error. write_concern = WriteConcern(w=int(self.ordered)) op_id = _randint() next_run = next(generator) while next_run: # An ordered bulk write needs to send acknowledged writes to short # circuit the next run. However, the final message on the final # run can be unacknowledged. run = next_run next_run = next(generator, None) needs_ack = self.ordered and next_run is not None try: if run.op_type == _INSERT: self.execute_insert_no_results( sock_info, run, op_id, needs_ack) elif run.op_type == _UPDATE: for operation in run.ops: doc = operation['u'] check_keys = True if doc and next(iter(doc)).startswith('$'): check_keys = False coll._update( sock_info, operation['q'], doc, operation['upsert'], check_keys, operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered, bypass_doc_val=self.bypass_doc_val) else: for operation in run.ops: coll._delete(sock_info, operation['q'], not operation['limit'], write_concern, op_id, self.ordered) except OperationFailure: if self.ordered: break
[ "def", "execute_no_results", "(", "self", ",", "sock_info", ",", "generator", ")", ":", "if", "self", ".", "uses_collation", ":", "raise", "ConfigurationError", "(", "'Collation is unsupported for unacknowledged writes.'", ")", "if", "self", ".", "uses_array_filters", ...
Execute all operations, returning no results (w=0).
[ "Execute", "all", "operations", "returning", "no", "results", "(", "w", "=", "0", ")", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8496-L8509
def airspeeds_send(self, time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy, force_mavlink1=False): ''' The airspeed measured by sensors and IMU time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) airspeed_imu : Airspeed estimate from IMU, cm/s (int16_t) airspeed_pitot : Pitot measured forward airpseed, cm/s (int16_t) airspeed_hot_wire : Hot wire anenometer measured airspeed, cm/s (int16_t) airspeed_ultrasonic : Ultrasonic measured airspeed, cm/s (int16_t) aoa : Angle of attack sensor, degrees * 10 (int16_t) aoy : Yaw angle sensor, degrees * 10 (int16_t) ''' return self.send(self.airspeeds_encode(time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy), force_mavlink1=force_mavlink1)
[ "def", "airspeeds_send", "(", "self", ",", "time_boot_ms", ",", "airspeed_imu", ",", "airspeed_pitot", ",", "airspeed_hot_wire", ",", "airspeed_ultrasonic", ",", "aoa", ",", "aoy", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "...
The airspeed measured by sensors and IMU time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) airspeed_imu : Airspeed estimate from IMU, cm/s (int16_t) airspeed_pitot : Pitot measured forward airpseed, cm/s (int16_t) airspeed_hot_wire : Hot wire anenometer measured airspeed, cm/s (int16_t) airspeed_ultrasonic : Ultrasonic measured airspeed, cm/s (int16_t) aoa : Angle of attack sensor, degrees * 10 (int16_t) aoy : Yaw angle sensor, degrees * 10 (int16_t)
[ "The", "airspeed", "measured", "by", "sensors", "and", "IMU" ]
python
train
saltstack/salt
salt/beacons/haproxy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/haproxy.py#L30-L59
def validate(config): ''' Validate the beacon configuration ''' if not isinstance(config, list): return False, ('Configuration for haproxy beacon must ' 'be a list.') else: _config = {} list(map(_config.update, config)) if 'backends' not in _config: return False, ('Configuration for haproxy beacon ' 'requires backends.') else: if not isinstance(_config['backends'], dict): return False, ('Backends for haproxy beacon ' 'must be a dictionary.') else: for backend in _config['backends']: log.debug('_config %s', _config['backends'][backend]) if 'servers' not in _config['backends'][backend]: return False, ('Backends for haproxy beacon ' 'require servers.') else: _servers = _config['backends'][backend]['servers'] if not isinstance(_servers, list): return False, ('Servers for haproxy beacon ' 'must be a list.') return True, 'Valid beacon configuration'
[ "def", "validate", "(", "config", ")", ":", "if", "not", "isinstance", "(", "config", ",", "list", ")", ":", "return", "False", ",", "(", "'Configuration for haproxy beacon must '", "'be a list.'", ")", "else", ":", "_config", "=", "{", "}", "list", "(", "...
Validate the beacon configuration
[ "Validate", "the", "beacon", "configuration" ]
python
train
KnuVerse/knuverse-sdk-python
knuverse/knufactor.py
https://github.com/KnuVerse/knuverse-sdk-python/blob/00f1275a452a4dcf9bc92ef345f6985504226d8e/knuverse/knufactor.py#L639-L647
def status(self): """ Get server status. Uses GET to /status interface. :Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_. """ response = self._get(url.status) self._check_response(response, 200) return self._create_response(response)
[ "def", "status", "(", "self", ")", ":", "response", "=", "self", ".", "_get", "(", "url", ".", "status", ")", "self", ".", "_check_response", "(", "response", ",", "200", ")", "return", "self", ".", "_create_response", "(", "response", ")" ]
Get server status. Uses GET to /status interface. :Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_.
[ "Get", "server", "status", ".", "Uses", "GET", "to", "/", "status", "interface", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L81-L90
def _read_input_csv(in_file): """Parse useful details from SampleSheet CSV file. """ with io.open(in_file, newline=None) as in_handle: reader = csv.reader(in_handle) next(reader) # header for line in reader: if line: # empty lines (fc_id, lane, sample_id, genome, barcode) = line[:5] yield fc_id, lane, sample_id, genome, barcode
[ "def", "_read_input_csv", "(", "in_file", ")", ":", "with", "io", ".", "open", "(", "in_file", ",", "newline", "=", "None", ")", "as", "in_handle", ":", "reader", "=", "csv", ".", "reader", "(", "in_handle", ")", "next", "(", "reader", ")", "# header",...
Parse useful details from SampleSheet CSV file.
[ "Parse", "useful", "details", "from", "SampleSheet", "CSV", "file", "." ]
python
train
luqasz/librouteros
librouteros/connections.py
https://github.com/luqasz/librouteros/blob/59293eb49c07a339af87b0416e4619e78ca5176d/librouteros/connections.py#L178-L188
def read(self, length): """ Read as many bytes from socket as specified in length. Loop as long as every byte is read unless exception is raised. """ data = bytearray() while len(data) != length: data += self.sock.recv((length - len(data))) if not data: raise ConnectionError('Connection unexpectedly closed.') return data
[ "def", "read", "(", "self", ",", "length", ")", ":", "data", "=", "bytearray", "(", ")", "while", "len", "(", "data", ")", "!=", "length", ":", "data", "+=", "self", ".", "sock", ".", "recv", "(", "(", "length", "-", "len", "(", "data", ")", ")...
Read as many bytes from socket as specified in length. Loop as long as every byte is read unless exception is raised.
[ "Read", "as", "many", "bytes", "from", "socket", "as", "specified", "in", "length", ".", "Loop", "as", "long", "as", "every", "byte", "is", "read", "unless", "exception", "is", "raised", "." ]
python
train
mrcagney/gtfstk
gtfstk/miscellany.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/miscellany.py#L23-L103
def summarize(feed: "Feed", table: str = None) -> DataFrame: """ Return a DataFrame summarizing all GTFS tables in the given feed or in the given table if specified. Parameters ---------- feed : Feed table : string A GTFS table name, e.g. ``'stop_times'`` Returns ------- DataFrame Columns are - ``'table'``: name of the GTFS table, e.g. ``'stops'`` - ``'column'``: name of a column in the table, e.g. ``'stop_id'`` - ``'num_values'``: number of values in the column - ``'num_nonnull_values'``: number of nonnull values in the column - ``'num_unique_values'``: number of unique values in the column, excluding null values - ``'min_value'``: minimum value in the column - ``'max_value'``: maximum value in the column Notes ----- - If the table is not in the feed, then return an empty DataFrame - If the table is not valid, raise a ValueError """ gtfs_tables = cs.GTFS_REF.table.unique() if table is not None: if table not in gtfs_tables: raise ValueError(f"{table} is not a GTFS table") else: tables = [table] else: tables = gtfs_tables frames = [] for table in tables: f = getattr(feed, table) if f is None: continue def my_agg(col): d = {} d["column"] = col.name d["num_values"] = col.size d["num_nonnull_values"] = col.count() d["num_unique_values"] = col.nunique() d["min_value"] = col.dropna().min() d["max_value"] = col.dropna().max() return pd.Series(d) g = f.apply(my_agg).T.reset_index(drop=True) g["table"] = table frames.append(g) cols = [ "table", "column", "num_values", "num_nonnull_values", "num_unique_values", "min_value", "max_value", ] if not frames: f = pd.DataFrame() else: f = pd.concat(frames) # Rearrange columns f = f[cols].copy() return f
[ "def", "summarize", "(", "feed", ":", "\"Feed\"", ",", "table", ":", "str", "=", "None", ")", "->", "DataFrame", ":", "gtfs_tables", "=", "cs", ".", "GTFS_REF", ".", "table", ".", "unique", "(", ")", "if", "table", "is", "not", "None", ":", "if", "...
Return a DataFrame summarizing all GTFS tables in the given feed or in the given table if specified. Parameters ---------- feed : Feed table : string A GTFS table name, e.g. ``'stop_times'`` Returns ------- DataFrame Columns are - ``'table'``: name of the GTFS table, e.g. ``'stops'`` - ``'column'``: name of a column in the table, e.g. ``'stop_id'`` - ``'num_values'``: number of values in the column - ``'num_nonnull_values'``: number of nonnull values in the column - ``'num_unique_values'``: number of unique values in the column, excluding null values - ``'min_value'``: minimum value in the column - ``'max_value'``: maximum value in the column Notes ----- - If the table is not in the feed, then return an empty DataFrame - If the table is not valid, raise a ValueError
[ "Return", "a", "DataFrame", "summarizing", "all", "GTFS", "tables", "in", "the", "given", "feed", "or", "in", "the", "given", "table", "if", "specified", "." ]
python
train
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L595-L624
def matches(self, spec): """Whether the spec applies to this object. Args: spec: A function, spec or type to check for a match * A 'type[[.group].label]' string which is compared against the type, group and label of this object * A function which is given the object and returns a boolean. * An object type matched using isinstance. Returns: bool: Whether the spec matched this object. """ if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc)) self_spec = match_fn(split_spec) unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec if unescaped_match: return True sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer] identifier_specification = tuple(fn(ident, escape=False) for ident, fn in zip(specification, sanitizers)) identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec return identifier_match
[ "def", "matches", "(", "self", ",", "spec", ")", ":", "if", "callable", "(", "spec", ")", "and", "not", "isinstance", "(", "spec", ",", "type", ")", ":", "return", "spec", "(", "self", ")", "elif", "isinstance", "(", "spec", ",", "type", ")", ":", ...
Whether the spec applies to this object. Args: spec: A function, spec or type to check for a match * A 'type[[.group].label]' string which is compared against the type, group and label of this object * A function which is given the object and returns a boolean. * An object type matched using isinstance. Returns: bool: Whether the spec matched this object.
[ "Whether", "the", "spec", "applies", "to", "this", "object", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/ppo.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo.py#L71-L142
def define_ppo_epoch(memory, hparams, action_space, batch_size): """PPO epoch.""" observation, reward, done, action, old_pdf, value = memory # This is to avoid propagating gradients through simulated environment. observation = tf.stop_gradient(observation) action = tf.stop_gradient(action) reward = tf.stop_gradient(reward) if hasattr(hparams, "rewards_preprocessing_fun"): reward = hparams.rewards_preprocessing_fun(reward) done = tf.stop_gradient(done) value = tf.stop_gradient(value) old_pdf = tf.stop_gradient(old_pdf) advantage = calculate_generalized_advantage_estimator( reward, value, done, hparams.gae_gamma, hparams.gae_lambda) discounted_reward = tf.stop_gradient(advantage + value[:-1]) advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage_normalized = tf.stop_gradient( (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs // hparams.optimization_batch_size) epoch_length = hparams.epoch_length if hparams.effective_num_agents is not None: number_of_batches *= batch_size number_of_batches //= hparams.effective_num_agents epoch_length //= hparams.effective_num_agents assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" lr = learning_rate.learning_rate_schedule(hparams) shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) for _ in range(hparams.optimization_epochs)] shuffled_indices = tf.concat(shuffled_indices, axis=0) shuffled_indices = shuffled_indices[:number_of_batches * hparams.optimization_batch_size] indices_of_batches = tf.reshape(shuffled_indices, shape=(-1, hparams.optimization_batch_size)) input_tensors = [observation, action, discounted_reward, advantage_normalized, old_pdf] ppo_step_rets = tf.scan( lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda a, define_ppo_step([tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], hparams, action_space, lr )), tf.range(number_of_batches), [0., 0., 0.], parallel_iterations=1) ppo_summaries = [tf.reduce_mean(ret) / number_of_batches for ret in ppo_step_rets] ppo_summaries.append(lr) summaries_names = [ "policy_loss", "value_loss", "entropy_loss", "learning_rate" ] summaries = [tf.summary.scalar(summary_name, summary) for summary_name, summary in zip(summaries_names, ppo_summaries)] losses_summary = tf.summary.merge(summaries) for summary_name, summary in zip(summaries_names, ppo_summaries): losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") return losses_summary
[ "def", "define_ppo_epoch", "(", "memory", ",", "hparams", ",", "action_space", ",", "batch_size", ")", ":", "observation", ",", "reward", ",", "done", ",", "action", ",", "old_pdf", ",", "value", "=", "memory", "# This is to avoid propagating gradients through simul...
PPO epoch.
[ "PPO", "epoch", "." ]
python
train
xperscore/alley
alley/migrations.py
https://github.com/xperscore/alley/blob/f9a5e9e2970230e38fd8a48b6a0bc1d43a38548e/alley/migrations.py#L76-L87
def show_status(self): """Show status of unregistered migrations""" if not self.check_directory(): return migrations = self.get_unregistered_migrations() if migrations: logger.info('Unregistered migrations:') for migration in migrations: logger.info(migration.filename) else: logger.info(self.NO_MIGRATIONS_MSG)
[ "def", "show_status", "(", "self", ")", ":", "if", "not", "self", ".", "check_directory", "(", ")", ":", "return", "migrations", "=", "self", ".", "get_unregistered_migrations", "(", ")", "if", "migrations", ":", "logger", ".", "info", "(", "'Unregistered mi...
Show status of unregistered migrations
[ "Show", "status", "of", "unregistered", "migrations" ]
python
train
trailofbits/manticore
manticore/platforms/decree.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/decree.py#L767-L796
def sched(self): """ Yield CPU. This will choose another process from the RUNNNIG list and change current running process. May give the same cpu if only one running process. """ if len(self.procs) > 1: logger.info("SCHED:") logger.info("\tProcess: %r", self.procs) logger.info("\tRunning: %r", self.running) logger.info("\tRWait: %r", self.rwait) logger.info("\tTWait: %r", self.twait) logger.info("\tTimers: %r", self.timers) logger.info("\tCurrent clock: %d", self.clocks) logger.info("\tCurrent cpu: %d", self._current) if len(self.running) == 0: logger.info("None running checking if there is some process waiting for a timeout") if all([x is None for x in self.timers]): raise Deadlock() self.clocks = min([x for x in self.timers if x is not None]) + 1 self.check_timers() assert len(self.running) != 0, "DEADLOCK!" self._current = self.running[0] return next_index = (self.running.index(self._current) + 1) % len(self.running) next = self.running[next_index] if len(self.procs) > 1: logger.info("\tTransfer control from process %d to %d", self._current, next) self._current = next
[ "def", "sched", "(", "self", ")", ":", "if", "len", "(", "self", ".", "procs", ")", ">", "1", ":", "logger", ".", "info", "(", "\"SCHED:\"", ")", "logger", ".", "info", "(", "\"\\tProcess: %r\"", ",", "self", ".", "procs", ")", "logger", ".", "info...
Yield CPU. This will choose another process from the RUNNNIG list and change current running process. May give the same cpu if only one running process.
[ "Yield", "CPU", ".", "This", "will", "choose", "another", "process", "from", "the", "RUNNNIG", "list", "and", "change", "current", "running", "process", ".", "May", "give", "the", "same", "cpu", "if", "only", "one", "running", "process", "." ]
python
valid
i3visio/deepify
deepify/utils/configuration.py
https://github.com/i3visio/deepify/blob/2af04e0bea3eaabe96b0565e10f7eeb29b042a2b/deepify/utils/configuration.py#L76-L122
def getConfiguration(configPath = None): """ Reading the configuration file to look for where the different gates are running. :return: A json containing the information stored in the .cfg file. """ if configPath == None: # If a current.cfg has not been found, creating it by copying from default configPath = getConfigPath("browser.cfg") # Checking if the configuration file exists if not os.path.exists(configPath): try: # Copy the data from the default folder defaultConfigPath = getConfigPath(os.path.join("default", "browser.cfg")) with open(configPath, "w") as oF: with open(defaultConfigPath) as iF: cont = iF.read() oF.write(cont) except Exception, e: errMsg = "ERROR. No configuration file could be found and the default file was not found either. You might need to reset it manually." raise Exception( errMsg + " " + str(e)) try: # Reading the configuration file config = ConfigParser.ConfigParser() config.read(configPath) info = {} # Iterating through all the sections, which contain the platforms for section in config.sections(): current = {} # Iterating through parametgers for (param, value) in config.items(section): current[param] = value # Loading the configuration in the info dictionary info[section] = current except Exception, e: errMsg = "ERROR. Something happened when processing the Configuration file (some kind of malform?). Check it before running it again." raise Exception( errMsg + " " + str(e)) return info
[ "def", "getConfiguration", "(", "configPath", "=", "None", ")", ":", "if", "configPath", "==", "None", ":", "# If a current.cfg has not been found, creating it by copying from default", "configPath", "=", "getConfigPath", "(", "\"browser.cfg\"", ")", "# Checking if the config...
Reading the configuration file to look for where the different gates are running. :return: A json containing the information stored in the .cfg file.
[ "Reading", "the", "configuration", "file", "to", "look", "for", "where", "the", "different", "gates", "are", "running", ".", ":", "return", ":", "A", "json", "containing", "the", "information", "stored", "in", "the", ".", "cfg", "file", "." ]
python
train
ulule/django-linguist
linguist/mixins.py
https://github.com/ulule/django-linguist/blob/d2b95a6ab921039d56d5eeb352badfe5be9e8f77/linguist/mixins.py#L166-L175
def get_translation_args(self, args): """ Returns linguist args from model args. """ translation_args = [] for arg in args: condition = self._get_linguist_condition(arg, transform=True) if condition: translation_args.append(condition) return translation_args
[ "def", "get_translation_args", "(", "self", ",", "args", ")", ":", "translation_args", "=", "[", "]", "for", "arg", "in", "args", ":", "condition", "=", "self", ".", "_get_linguist_condition", "(", "arg", ",", "transform", "=", "True", ")", "if", "conditio...
Returns linguist args from model args.
[ "Returns", "linguist", "args", "from", "model", "args", "." ]
python
train
fishtown-analytics/dbt
core/dbt/linker.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/linker.py#L144-L155
def mark_done(self, node_id): """Given a node's unique ID, mark it as done. This method takes the lock. :param str node_id: The node ID to mark as complete. """ with self.lock: self.in_progress.remove(node_id) self.graph.remove_node(node_id) self._find_new_additions() self.inner.task_done()
[ "def", "mark_done", "(", "self", ",", "node_id", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "in_progress", ".", "remove", "(", "node_id", ")", "self", ".", "graph", ".", "remove_node", "(", "node_id", ")", "self", ".", "_find_new_additions"...
Given a node's unique ID, mark it as done. This method takes the lock. :param str node_id: The node ID to mark as complete.
[ "Given", "a", "node", "s", "unique", "ID", "mark", "it", "as", "done", "." ]
python
train
rochacbruno/dynaconf
dynaconf/base.py
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/base.py#L115-L125
def _setup(self): """Initial setup, run once.""" default_settings.reload() environment_variable = self._kwargs.get( "ENVVAR_FOR_DYNACONF", default_settings.ENVVAR_FOR_DYNACONF ) settings_module = os.environ.get(environment_variable) self._wrapped = Settings( settings_module=settings_module, **self._kwargs ) self.logger.debug("Lazy Settings _setup ...")
[ "def", "_setup", "(", "self", ")", ":", "default_settings", ".", "reload", "(", ")", "environment_variable", "=", "self", ".", "_kwargs", ".", "get", "(", "\"ENVVAR_FOR_DYNACONF\"", ",", "default_settings", ".", "ENVVAR_FOR_DYNACONF", ")", "settings_module", "=", ...
Initial setup, run once.
[ "Initial", "setup", "run", "once", "." ]
python
train
dragnet-org/dragnet
dragnet/model_training.py
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/model_training.py#L19-L48
def evaluate_model_predictions(y_true, y_pred, weights=None): """ Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float] """ if isinstance(y_pred[0], np.ndarray): y_pred = np.concatenate(y_pred) if isinstance(y_true[0], np.ndarray): y_true = np.concatenate(y_true) if (weights is not None) and (isinstance(weights[0], np.ndarray)): weights = np.concatenate(weights) accuracy = accuracy_score( y_true, y_pred, normalize=True, sample_weight=weights) precision = precision_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) recall = recall_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) f1 = f1_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
[ "def", "evaluate_model_predictions", "(", "y_true", ",", "y_pred", ",", "weights", "=", "None", ")", ":", "if", "isinstance", "(", "y_pred", "[", "0", "]", ",", "np", ".", "ndarray", ")", ":", "y_pred", "=", "np", ".", "concatenate", "(", "y_pred", ")"...
Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float]
[ "Evaluate", "the", "performance", "of", "an", "extractor", "model", "s", "binary", "classification", "predictions", "typically", "at", "the", "block", "level", "of", "whether", "a", "block", "is", "content", "or", "not", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/printer.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/printer.py#L67-L102
def _summarize_network_layer_info(layer): """ Args: layer - an MLModel NeuralNetwork Layer protobuf message Returns: layer_type : str - type of layer layer_name : str - name of the layer layer_inputs : list[str] - a list of strings representing input blobs of the layer layer_outputs : list[str] - a list of strings representing output blobs of the layer layer_field_content : list[(str, str)] - a list of two-tuple of (parameter_name, content) """ layer_type_str = layer.WhichOneof('layer') layer_name = layer.name layer_inputs = list(layer.input) layer_outputs = list(layer.output) typed_layer = getattr(layer, layer_type_str) layer_field_names = [l.name for l in typed_layer.DESCRIPTOR.fields] layer_field_content = [] for name in layer_field_names: field = getattr(typed_layer,name) summary_str = '' if type(field) == _NeuralNetwork_pb2.LSTMWeightParams: summary_str = _get_lstm_weight_param_summary(field) elif type(field) == _NeuralNetwork_pb2.WeightParams: summary_str = _get_weight_param_summary(field) else: field_str = str(field) if len(field_str) > 0: summary_str = field_str.replace('\n', ' ') if len(summary_str) > 0: layer_field_content.append([name, summary_str]) return layer_type_str, layer_name, layer_inputs, layer_outputs, layer_field_content
[ "def", "_summarize_network_layer_info", "(", "layer", ")", ":", "layer_type_str", "=", "layer", ".", "WhichOneof", "(", "'layer'", ")", "layer_name", "=", "layer", ".", "name", "layer_inputs", "=", "list", "(", "layer", ".", "input", ")", "layer_outputs", "=",...
Args: layer - an MLModel NeuralNetwork Layer protobuf message Returns: layer_type : str - type of layer layer_name : str - name of the layer layer_inputs : list[str] - a list of strings representing input blobs of the layer layer_outputs : list[str] - a list of strings representing output blobs of the layer layer_field_content : list[(str, str)] - a list of two-tuple of (parameter_name, content)
[ "Args", ":", "layer", "-", "an", "MLModel", "NeuralNetwork", "Layer", "protobuf", "message", "Returns", ":", "layer_type", ":", "str", "-", "type", "of", "layer", "layer_name", ":", "str", "-", "name", "of", "the", "layer", "layer_inputs", ":", "list", "["...
python
train
pypa/pipenv
pipenv/vendor/dotenv/cli.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/cli.py#L45-L53
def set(ctx, key, value): '''Store the given key/value.''' file = ctx.obj['FILE'] quote = ctx.obj['QUOTE'] success, key, value = set_key(file, key, value, quote) if success: click.echo('%s=%s' % (key, value)) else: exit(1)
[ "def", "set", "(", "ctx", ",", "key", ",", "value", ")", ":", "file", "=", "ctx", ".", "obj", "[", "'FILE'", "]", "quote", "=", "ctx", ".", "obj", "[", "'QUOTE'", "]", "success", ",", "key", ",", "value", "=", "set_key", "(", "file", ",", "key"...
Store the given key/value.
[ "Store", "the", "given", "key", "/", "value", "." ]
python
train
PMEAL/OpenPNM
openpnm/topotools/topotools.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L2445-L2497
def add_boundary_pores(network, pores, offset, apply_label='boundary'): r""" This method uses ``clone_pores`` to clone the input pores, then shifts them the specified amount and direction, then applies the given label. Parameters ---------- pores : array_like List of pores to offset. If no pores are specified, then it assumes that all surface pores are to be cloned. offset : 3 x 1 array The distance in vector form which the cloned boundary pores should be offset. apply_label : string This label is applied to the boundary pores. Default is 'boundary'. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> print(pn.Np) # Confirm initial Network size 125 >>> Ps = pn.pores('top') # Select pores on top face >>> pn.add_boundary_pores(labels=['top']) >>> print(pn.Np) # Confirm addition of 25 new pores 150 """ # Parse the input pores Ps = sp.array(pores, ndmin=1) if Ps.dtype is bool: Ps = network.toindices(Ps) if sp.size(pores) == 0: # Handle an empty array if given return sp.array([], dtype=sp.int64) # Clone the specifed pores clone_pores(network=network, pores=Ps) newPs = network.pores('pore.clone') del network['pore.clone'] newTs = network.throats('clone') del network['throat.clone'] # Offset the cloned pores network['pore.coords'][newPs] += offset # Apply labels to boundary pores (trim leading 'pores' if present) label = apply_label.split('.')[-1] plabel = 'pore.' + label tlabel = 'throat.' + label network[plabel] = False network[plabel][newPs] = True network[tlabel] = False network[tlabel][newTs] = True
[ "def", "add_boundary_pores", "(", "network", ",", "pores", ",", "offset", ",", "apply_label", "=", "'boundary'", ")", ":", "# Parse the input pores", "Ps", "=", "sp", ".", "array", "(", "pores", ",", "ndmin", "=", "1", ")", "if", "Ps", ".", "dtype", "is"...
r""" This method uses ``clone_pores`` to clone the input pores, then shifts them the specified amount and direction, then applies the given label. Parameters ---------- pores : array_like List of pores to offset. If no pores are specified, then it assumes that all surface pores are to be cloned. offset : 3 x 1 array The distance in vector form which the cloned boundary pores should be offset. apply_label : string This label is applied to the boundary pores. Default is 'boundary'. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> print(pn.Np) # Confirm initial Network size 125 >>> Ps = pn.pores('top') # Select pores on top face >>> pn.add_boundary_pores(labels=['top']) >>> print(pn.Np) # Confirm addition of 25 new pores 150
[ "r", "This", "method", "uses", "clone_pores", "to", "clone", "the", "input", "pores", "then", "shifts", "them", "the", "specified", "amount", "and", "direction", "then", "applies", "the", "given", "label", "." ]
python
train
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1798-L1816
def next(self): """Iterator protocol: get next item or raise StopIteration.""" if self._fut is None: self._fut = self._iter.getq() try: try: # The future result is set by this class's _extended_callback # method. # pylint: disable=unpacking-non-sequence (ent, self._cursor_before, self._cursor_after, self._more_results) = self._fut.get_result() return ent except EOFError: self._exhausted = True raise StopIteration finally: self._fut = None
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_fut", "is", "None", ":", "self", ".", "_fut", "=", "self", ".", "_iter", ".", "getq", "(", ")", "try", ":", "try", ":", "# The future result is set by this class's _extended_callback", "# method.", ...
Iterator protocol: get next item or raise StopIteration.
[ "Iterator", "protocol", ":", "get", "next", "item", "or", "raise", "StopIteration", "." ]
python
train
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L175-L183
def SeqN(n, *inner_rules, **kwargs): """ A rule that accepts a sequence of tokens satisfying ``rules`` and returns the value returned by rule number ``n``, or None if the first rule was not satisfied. """ @action(Seq(*inner_rules), loc=kwargs.get("loc", None)) def rule(parser, *values): return values[n] return rule
[ "def", "SeqN", "(", "n", ",", "*", "inner_rules", ",", "*", "*", "kwargs", ")", ":", "@", "action", "(", "Seq", "(", "*", "inner_rules", ")", ",", "loc", "=", "kwargs", ".", "get", "(", "\"loc\"", ",", "None", ")", ")", "def", "rule", "(", "par...
A rule that accepts a sequence of tokens satisfying ``rules`` and returns the value returned by rule number ``n``, or None if the first rule was not satisfied.
[ "A", "rule", "that", "accepts", "a", "sequence", "of", "tokens", "satisfying", "rules", "and", "returns", "the", "value", "returned", "by", "rule", "number", "n", "or", "None", "if", "the", "first", "rule", "was", "not", "satisfied", "." ]
python
valid
hozn/coilmq
coilmq/topic.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/topic.py#L99-L112
def disconnect(self, connection): """ Removes a subscriber connection. @param connection: The client connection to unsubscribe. @type connection: L{coilmq.server.StompConnection} """ self.log.debug("Disconnecting %s" % connection) for dest in list(self._topics.keys()): if connection in self._topics[dest]: self._topics[dest].remove(connection) if not self._topics[dest]: # This won't trigger RuntimeError, since we're using keys() del self._topics[dest]
[ "def", "disconnect", "(", "self", ",", "connection", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Disconnecting %s\"", "%", "connection", ")", "for", "dest", "in", "list", "(", "self", ".", "_topics", ".", "keys", "(", ")", ")", ":", "if", "co...
Removes a subscriber connection. @param connection: The client connection to unsubscribe. @type connection: L{coilmq.server.StompConnection}
[ "Removes", "a", "subscriber", "connection", "." ]
python
train
zyga/guacamole
guacamole/recipes/cmd.py
https://github.com/zyga/guacamole/blob/105c10a798144e3b89659b500d7c2b84b0c76546/guacamole/recipes/cmd.py#L411-L421
def get_ingredients(self): """Get a list of ingredients for guacamole.""" return [ cmdtree.CommandTreeBuilder(self.command), cmdtree.CommandTreeDispatcher(), argparse.AutocompleteIngredient(), argparse.ParserIngredient(), crash.VerboseCrashHandler(), ansi.ANSIIngredient(), log.Logging(), ]
[ "def", "get_ingredients", "(", "self", ")", ":", "return", "[", "cmdtree", ".", "CommandTreeBuilder", "(", "self", ".", "command", ")", ",", "cmdtree", ".", "CommandTreeDispatcher", "(", ")", ",", "argparse", ".", "AutocompleteIngredient", "(", ")", ",", "ar...
Get a list of ingredients for guacamole.
[ "Get", "a", "list", "of", "ingredients", "for", "guacamole", "." ]
python
train
ramses-tech/ramses
ramses/generators.py
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/generators.py#L125-L151
def generate_server(raml_root, config): """ Handle server generation process. :param raml_root: Instance of ramlfications.raml.RootNode. :param config: Pyramid Configurator instance. """ log.info('Server generation started') if not raml_root.resources: return root_resource = config.get_root_resource() generated_resources = {} for raml_resource in raml_root.resources: if raml_resource.path in generated_resources: continue # Get Nefertari parent resource parent_resource = _get_nefertari_parent_resource( raml_resource, generated_resources, root_resource) # Get generated resource and store it new_resource = generate_resource( config, raml_resource, parent_resource) if new_resource is not None: generated_resources[raml_resource.path] = new_resource
[ "def", "generate_server", "(", "raml_root", ",", "config", ")", ":", "log", ".", "info", "(", "'Server generation started'", ")", "if", "not", "raml_root", ".", "resources", ":", "return", "root_resource", "=", "config", ".", "get_root_resource", "(", ")", "ge...
Handle server generation process. :param raml_root: Instance of ramlfications.raml.RootNode. :param config: Pyramid Configurator instance.
[ "Handle", "server", "generation", "process", "." ]
python
train
mitsei/dlkit
dlkit/handcar/relationship/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/objects.py#L139-L158
def get_next_relationship(self): """Gets the next ``Relationship`` in this list. return: (osid.relationship.Relationship) - the next ``Relationship`` in this list. The ``has_next()`` method should be used to test that a next ``Relationship`` is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: # Need to specify exceptions here! raise OperationFailed() else: return next_object
[ "def", "get_next_relationship", "(", "self", ")", ":", "try", ":", "next_object", "=", "next", "(", "self", ")", "except", "StopIteration", ":", "raise", "IllegalState", "(", "'no more elements available in this list'", ")", "except", "Exception", ":", "# Need to sp...
Gets the next ``Relationship`` in this list. return: (osid.relationship.Relationship) - the next ``Relationship`` in this list. The ``has_next()`` method should be used to test that a next ``Relationship`` is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "next", "Relationship", "in", "this", "list", "." ]
python
train
kennedyshead/aioasuswrt
aioasuswrt/asuswrt.py
https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/asuswrt.py#L235-L240
async def async_current_transfer_human_readable( self, use_cache=True): """Gets current transfer rates in a human readable format.""" rx, tx = await self.async_get_current_transfer_rates(use_cache) return "%s/s" % convert_size(rx), "%s/s" % convert_size(tx)
[ "async", "def", "async_current_transfer_human_readable", "(", "self", ",", "use_cache", "=", "True", ")", ":", "rx", ",", "tx", "=", "await", "self", ".", "async_get_current_transfer_rates", "(", "use_cache", ")", "return", "\"%s/s\"", "%", "convert_size", "(", ...
Gets current transfer rates in a human readable format.
[ "Gets", "current", "transfer", "rates", "in", "a", "human", "readable", "format", "." ]
python
train
scruffy-t/mpls
mpls/mpls.py
https://github.com/scruffy-t/mpls/blob/1320d1217cd72404509da49d0ea7b65163a7b40b/mpls/mpls.py#L83-L120
def get(name, stype, **kwargs): """Returns the rcParams specified in the style file given by `name` and `stype`. Parameters ---------- name: str The name of the style. stype: str Any of ('context', 'style', 'palette'). kwargs: - stylelib_url: str Overwrite the value in the local config with the specified url. - ignore_cache: bool Ignore files in the cache and force loading from the stylelib. Raises ------ ValueError: If `stype` is not any of ('context', 'style', 'palette') Returns ------- rcParams: dict The parameter dict of the file. """ stype = str(stype) params = {} if stype in MPLS_STYPES: params.update(__get(name, stype, **kwargs)) else: raise ValueError('unexpected stype: {}! Must be any of {!r}'.format(stype, MPLS_STYPES)) # color palette hack if params.get('axes.prop_cycle'): params['axes.prop_cycle'] = mpl.rcsetup.cycler('color', params['axes.prop_cycle']) return params
[ "def", "get", "(", "name", ",", "stype", ",", "*", "*", "kwargs", ")", ":", "stype", "=", "str", "(", "stype", ")", "params", "=", "{", "}", "if", "stype", "in", "MPLS_STYPES", ":", "params", ".", "update", "(", "__get", "(", "name", ",", "stype"...
Returns the rcParams specified in the style file given by `name` and `stype`. Parameters ---------- name: str The name of the style. stype: str Any of ('context', 'style', 'palette'). kwargs: - stylelib_url: str Overwrite the value in the local config with the specified url. - ignore_cache: bool Ignore files in the cache and force loading from the stylelib. Raises ------ ValueError: If `stype` is not any of ('context', 'style', 'palette') Returns ------- rcParams: dict The parameter dict of the file.
[ "Returns", "the", "rcParams", "specified", "in", "the", "style", "file", "given", "by", "name", "and", "stype", "." ]
python
train
brian-rose/climlab
climlab/radiation/greygas.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/radiation/greygas.py#L206-L218
def flux_components_bottom(self): '''Compute the contributions to the downwelling flux to surface due to emissions from each level.''' N = self.lev.size atmComponents = np.zeros_like(self.Tatm) flux_down_top = np.zeros_like(self.Ts) # same comment as above... would be nice to vectorize for n in range(N): emission = np.zeros_like(self.emission) emission[..., n] = self.emission[..., n] this_flux_down = self.trans.flux_down(flux_down_top, emission) atmComponents[..., n] = this_flux_down[..., 0] return atmComponents
[ "def", "flux_components_bottom", "(", "self", ")", ":", "N", "=", "self", ".", "lev", ".", "size", "atmComponents", "=", "np", ".", "zeros_like", "(", "self", ".", "Tatm", ")", "flux_down_top", "=", "np", ".", "zeros_like", "(", "self", ".", "Ts", ")",...
Compute the contributions to the downwelling flux to surface due to emissions from each level.
[ "Compute", "the", "contributions", "to", "the", "downwelling", "flux", "to", "surface", "due", "to", "emissions", "from", "each", "level", "." ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L3366-L3404
def nvmlDeviceGetTotalEccErrors(handle, errorType, counterType): r""" /** * Retrieves the total ECC error counts for the device. * * For Fermi &tm; or newer fully supported devices. * Only applicable to devices with ECC. * Requires \a NVML_INFOROM_ECC version 1.0 or higher. * Requires ECC Mode to be enabled. * * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of * errors across the entire device. * * See \ref nvmlMemoryErrorType_t for a description of available error types.\n * See \ref nvmlEccCounterType_t for a description of available counter types. * * @param device The identifier of the target device * @param errorType Flag that specifies the type of the errors. * @param counterType Flag that specifies the counter-type of the errors. * @param eccCounts Reference in which to return the specified ECC errors * * @return * - \ref NVML_SUCCESS if \a eccCounts has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceClearEccErrorCounts() */ nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors """ c_count = c_ulonglong() fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEccErrors") ret = fn(handle, _nvmlMemoryErrorType_t(errorType), _nvmlEccCounterType_t(counterType), byref(c_count)) _nvmlCheckReturn(ret) return bytes_to_str(c_count.value)
[ "def", "nvmlDeviceGetTotalEccErrors", "(", "handle", ",", "errorType", ",", "counterType", ")", ":", "c_count", "=", "c_ulonglong", "(", ")", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetTotalEccErrors\"", ")", "ret", "=", "fn", "(", "handle", ",", ...
r""" /** * Retrieves the total ECC error counts for the device. * * For Fermi &tm; or newer fully supported devices. * Only applicable to devices with ECC. * Requires \a NVML_INFOROM_ECC version 1.0 or higher. * Requires ECC Mode to be enabled. * * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of * errors across the entire device. * * See \ref nvmlMemoryErrorType_t for a description of available error types.\n * See \ref nvmlEccCounterType_t for a description of available counter types. * * @param device The identifier of the target device * @param errorType Flag that specifies the type of the errors. * @param counterType Flag that specifies the counter-type of the errors. * @param eccCounts Reference in which to return the specified ECC errors * * @return * - \ref NVML_SUCCESS if \a eccCounts has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceClearEccErrorCounts() */ nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors
[ "r", "/", "**", "*", "Retrieves", "the", "total", "ECC", "error", "counts", "for", "the", "device", ".", "*", "*", "For", "Fermi", "&tm", ";", "or", "newer", "fully", "supported", "devices", ".", "*", "Only", "applicable", "to", "devices", "with", "ECC...
python
train
jcalogovic/lightning
stormstats/misc.py
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L134-L159
def gen_stats(datain): """**Calculate lightning statitics and return a dictionary** Using a raw data in certain time interval calculate mean, std, min, max value for detection error or number of stations. :paramter datain: vector with detection error or number of stations :Example: >>> gen_stats(lighning_data['#sta']) """ tmp_dic={} tmp_dic['count'] = len(datain) # if there is no lightning strikes set nan values for all stats parameters if(tmp_dic['count'] == 0): tmp_dic['mean'] = np.nan tmp_dic['std'] = np.nan tmp_dic['min'] = np.nan tmp_dic['max'] = np.nan else: tmp_dic['mean'] = np.mean(datain) tmp_dic['std'] = np.std(datain) tmp_dic['min'] = min(datain) tmp_dic['max'] = max(datain) return tmp_dic
[ "def", "gen_stats", "(", "datain", ")", ":", "tmp_dic", "=", "{", "}", "tmp_dic", "[", "'count'", "]", "=", "len", "(", "datain", ")", "# if there is no lightning strikes set nan values for all stats parameters", "if", "(", "tmp_dic", "[", "'count'", "]", "==", ...
**Calculate lightning statitics and return a dictionary** Using a raw data in certain time interval calculate mean, std, min, max value for detection error or number of stations. :paramter datain: vector with detection error or number of stations :Example: >>> gen_stats(lighning_data['#sta'])
[ "**", "Calculate", "lightning", "statitics", "and", "return", "a", "dictionary", "**" ]
python
train
log2timeline/dfvfs
dfvfs/vfs/tar_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tar_file_entry.py#L120-L128
def _GetDirectory(self): """Retrieves a directory. Returns: TARDirectory: a directory or None if not available. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return TARDirectory(self._file_system, self.path_spec)
[ "def", "_GetDirectory", "(", "self", ")", ":", "if", "self", ".", "entry_type", "!=", "definitions", ".", "FILE_ENTRY_TYPE_DIRECTORY", ":", "return", "None", "return", "TARDirectory", "(", "self", ".", "_file_system", ",", "self", ".", "path_spec", ")" ]
Retrieves a directory. Returns: TARDirectory: a directory or None if not available.
[ "Retrieves", "a", "directory", "." ]
python
train
numenta/nupic
src/nupic/algorithms/temporal_memory.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory.py#L696-L722
def _destroyMinPermanenceSynapses(cls, connections, random, segment, nDestroy, excludeCells): """ Destroy nDestroy synapses on the specified segment, but don't destroy synapses to the "excludeCells". """ destroyCandidates = sorted( (synapse for synapse in connections.synapsesForSegment(segment) if synapse.presynapticCell not in excludeCells), key=lambda s: s._ordinal ) for _ in xrange(nDestroy): if len(destroyCandidates) == 0: break minSynapse = None minPermanence = float("inf") for synapse in destroyCandidates: if synapse.permanence < minPermanence - EPSILON: minSynapse = synapse minPermanence = synapse.permanence connections.destroySynapse(minSynapse) destroyCandidates.remove(minSynapse)
[ "def", "_destroyMinPermanenceSynapses", "(", "cls", ",", "connections", ",", "random", ",", "segment", ",", "nDestroy", ",", "excludeCells", ")", ":", "destroyCandidates", "=", "sorted", "(", "(", "synapse", "for", "synapse", "in", "connections", ".", "synapsesF...
Destroy nDestroy synapses on the specified segment, but don't destroy synapses to the "excludeCells".
[ "Destroy", "nDestroy", "synapses", "on", "the", "specified", "segment", "but", "don", "t", "destroy", "synapses", "to", "the", "excludeCells", "." ]
python
valid
KelSolaar/Manager
manager/components_manager.py
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1147-L1173
def instantiate_components(self, callback=None): """ Instantiates the Components. Usage:: >>> manager = Manager((tests_manager,)) >>> manager.register_components() True >>> manager.instantiate_components() True >>> manager.get_interface("core.tests_component_a") <tests_component_a.TestsComponentA object at 0x17a5bb0> :param callback: Callback object. :type callback: object """ uninstantiated_components = [component for component in self.list_components() if not self.instantiate_component(component, callback)] if not uninstantiated_components: return True else: raise manager.exceptions.ComponentInstantiationError( "{0} | '{1}' Components failed to instantiate!".format(self.__class__.__name__, ", ".join(uninstantiated_components)))
[ "def", "instantiate_components", "(", "self", ",", "callback", "=", "None", ")", ":", "uninstantiated_components", "=", "[", "component", "for", "component", "in", "self", ".", "list_components", "(", ")", "if", "not", "self", ".", "instantiate_component", "(", ...
Instantiates the Components. Usage:: >>> manager = Manager((tests_manager,)) >>> manager.register_components() True >>> manager.instantiate_components() True >>> manager.get_interface("core.tests_component_a") <tests_component_a.TestsComponentA object at 0x17a5bb0> :param callback: Callback object. :type callback: object
[ "Instantiates", "the", "Components", "." ]
python
train
cjdrake/pyeda
pyeda/parsing/dimacs.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/dimacs.py#L377-L382
def _sat(lexer, varname): """Return a DIMACS SAT.""" _expect_token(lexer, {KW_p}) fmt = _expect_token(lexer, {KW_sat, KW_satx, KW_sate, KW_satex}).value nvars = _expect_token(lexer, {IntegerToken}).value return _sat_formula(lexer, varname, fmt, nvars)
[ "def", "_sat", "(", "lexer", ",", "varname", ")", ":", "_expect_token", "(", "lexer", ",", "{", "KW_p", "}", ")", "fmt", "=", "_expect_token", "(", "lexer", ",", "{", "KW_sat", ",", "KW_satx", ",", "KW_sate", ",", "KW_satex", "}", ")", ".", "value", ...
Return a DIMACS SAT.
[ "Return", "a", "DIMACS", "SAT", "." ]
python
train
geopython/OWSLib
owslib/feature/wfs300.py
https://github.com/geopython/OWSLib/blob/96d47842401a129f1e86fa9f66dccef5a5a6872c/owslib/feature/wfs300.py#L101-L127
def collection_items(self, collection_name, **kwargs): """ implements Requirement 17 (/req/core/fc-op) @type collection_name: string @param collection_name: name of collection @type bbox: list @param bbox: list of minx,miny,maxx,maxy @type time: string @param time: time extent or time instant @type limit: int @param limit: limit number of features @type startindex: int @param startindex: start position of results @returns: feature results """ if 'bbox' in kwargs: kwargs['bbox'] = ','.join(kwargs['bbox']) path = 'collections/{}/items'.format(collection_name) url = self._build_url(path) LOGGER.debug('Request: {}'.format(url)) response = requests.get(url, headers=REQUEST_HEADERS, params=kwargs).json() return response
[ "def", "collection_items", "(", "self", ",", "collection_name", ",", "*", "*", "kwargs", ")", ":", "if", "'bbox'", "in", "kwargs", ":", "kwargs", "[", "'bbox'", "]", "=", "','", ".", "join", "(", "kwargs", "[", "'bbox'", "]", ")", "path", "=", "'coll...
implements Requirement 17 (/req/core/fc-op) @type collection_name: string @param collection_name: name of collection @type bbox: list @param bbox: list of minx,miny,maxx,maxy @type time: string @param time: time extent or time instant @type limit: int @param limit: limit number of features @type startindex: int @param startindex: start position of results @returns: feature results
[ "implements", "Requirement", "17", "(", "/", "req", "/", "core", "/", "fc", "-", "op", ")" ]
python
test
riggsd/davies
davies/compass/__init__.py
https://github.com/riggsd/davies/blob/8566c626202a875947ad01c087300108c68d80b5/davies/compass/__init__.py#L201-L203
def excluded_length(self): """Surveyed length which does not count toward the included total""" return sum([shot.length for shot in self.shots if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags])
[ "def", "excluded_length", "(", "self", ")", ":", "return", "sum", "(", "[", "shot", ".", "length", "for", "shot", "in", "self", ".", "shots", "if", "Exclude", ".", "LENGTH", "in", "shot", ".", "flags", "or", "Exclude", ".", "TOTAL", "in", "shot", "."...
Surveyed length which does not count toward the included total
[ "Surveyed", "length", "which", "does", "not", "count", "toward", "the", "included", "total" ]
python
train
mosdef-hub/mbuild
mbuild/lib/recipes/tiled_compound.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/lib/recipes/tiled_compound.py#L122-L126
def _hoist_ports(self, new_tile): """Add labels for all the ports to the parent (TiledCompound). """ for port in new_tile.children: if isinstance(port, Port): self.add(port, containment=False)
[ "def", "_hoist_ports", "(", "self", ",", "new_tile", ")", ":", "for", "port", "in", "new_tile", ".", "children", ":", "if", "isinstance", "(", "port", ",", "Port", ")", ":", "self", ".", "add", "(", "port", ",", "containment", "=", "False", ")" ]
Add labels for all the ports to the parent (TiledCompound).
[ "Add", "labels", "for", "all", "the", "ports", "to", "the", "parent", "(", "TiledCompound", ")", "." ]
python
train
iotaledger/iota.lib.py
iota/api.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/api.py#L732-L789
def get_new_addresses( self, index=0, count=1, security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL, checksum=False, ): # type: (int, Optional[int], int, bool) -> dict """ Generates one or more new addresses from the seed. :param index: The key index of the first new address to generate (must be >= 1). :param count: Number of addresses to generate (must be >= 1). .. tip:: This is more efficient than calling ``get_new_address`` inside a loop. If ``None``, this method will progressively generate addresses and scan the Tangle until it finds one that has no transactions referencing it. :param security_level: Number of iterations to use when generating new addresses. Larger values take longer, but the resulting signatures are more secure. This value must be between 1 and 3, inclusive. :param checksum: Specify whether to return the address with the checksum. Defaults to ``False``. :return: Dict with the following structure:: { 'addresses': List[Address], Always a list, even if only one address was generated. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress """ return extended.GetNewAddressesCommand(self.adapter)( count=count, index=index, securityLevel=security_level, checksum=checksum, seed=self.seed, )
[ "def", "get_new_addresses", "(", "self", ",", "index", "=", "0", ",", "count", "=", "1", ",", "security_level", "=", "AddressGenerator", ".", "DEFAULT_SECURITY_LEVEL", ",", "checksum", "=", "False", ",", ")", ":", "# type: (int, Optional[int], int, bool) -> dict", ...
Generates one or more new addresses from the seed. :param index: The key index of the first new address to generate (must be >= 1). :param count: Number of addresses to generate (must be >= 1). .. tip:: This is more efficient than calling ``get_new_address`` inside a loop. If ``None``, this method will progressively generate addresses and scan the Tangle until it finds one that has no transactions referencing it. :param security_level: Number of iterations to use when generating new addresses. Larger values take longer, but the resulting signatures are more secure. This value must be between 1 and 3, inclusive. :param checksum: Specify whether to return the address with the checksum. Defaults to ``False``. :return: Dict with the following structure:: { 'addresses': List[Address], Always a list, even if only one address was generated. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
[ "Generates", "one", "or", "more", "new", "addresses", "from", "the", "seed", "." ]
python
test
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L835-L838
def show_security_group_rule(self, security_group_rule, **_params): """Fetches information of a certain security group rule.""" return self.get(self.security_group_rule_path % (security_group_rule), params=_params)
[ "def", "show_security_group_rule", "(", "self", ",", "security_group_rule", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "security_group_rule_path", "%", "(", "security_group_rule", ")", ",", "params", "=", "_params", ")"...
Fetches information of a certain security group rule.
[ "Fetches", "information", "of", "a", "certain", "security", "group", "rule", "." ]
python
train
UCL-INGI/INGInious
inginious/agent/docker_agent/__init__.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/docker_agent/__init__.py#L299-L307
async def new_job(self, message: BackendNewJob): """ Handles a new job: starts the grading container """ self._logger.info("Received request for jobid %s", message.job_id) future_results = asyncio.Future() out = await self._loop.run_in_executor(None, lambda: self.__new_job_sync(message, future_results)) self._create_safe_task(self.handle_running_container(**out, future_results=future_results)) await self._timeout_watcher.register_container(out["container_id"], out["orig_time_limit"], out["orig_hard_time_limit"])
[ "async", "def", "new_job", "(", "self", ",", "message", ":", "BackendNewJob", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Received request for jobid %s\"", ",", "message", ".", "job_id", ")", "future_results", "=", "asyncio", ".", "Future", "(", "...
Handles a new job: starts the grading container
[ "Handles", "a", "new", "job", ":", "starts", "the", "grading", "container" ]
python
train
python-rope/rope
rope/contrib/codeassist.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/codeassist.py#L43-L58
def starting_offset(source_code, offset): """Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function. """ word_finder = worder.Worder(source_code, True) expression, starting, starting_offset = \ word_finder.get_splitted_primary_before(offset) return starting_offset
[ "def", "starting_offset", "(", "source_code", ",", "offset", ")", ":", "word_finder", "=", "worder", ".", "Worder", "(", "source_code", ",", "True", ")", "expression", ",", "starting", ",", "starting_offset", "=", "word_finder", ".", "get_splitted_primary_before",...
Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function.
[ "Return", "the", "offset", "in", "which", "the", "completion", "should", "be", "inserted" ]
python
train
tensorflow/tensorboard
tensorboard/plugins/hparams/backend_context.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/backend_context.py#L91-L108
def _find_experiment_tag(self): """Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag. Caches the experiment if it was found. Returns: The experiment or None if no such experiment is found. """ with self._experiment_from_tag_lock: if self._experiment_from_tag is None: mapping = self.multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for tag_to_content in mapping.values(): if metadata.EXPERIMENT_TAG in tag_to_content: self._experiment_from_tag = metadata.parse_experiment_plugin_data( tag_to_content[metadata.EXPERIMENT_TAG]) break return self._experiment_from_tag
[ "def", "_find_experiment_tag", "(", "self", ")", ":", "with", "self", ".", "_experiment_from_tag_lock", ":", "if", "self", ".", "_experiment_from_tag", "is", "None", ":", "mapping", "=", "self", ".", "multiplexer", ".", "PluginRunToTagToContent", "(", "metadata", ...
Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag. Caches the experiment if it was found. Returns: The experiment or None if no such experiment is found.
[ "Finds", "the", "experiment", "associcated", "with", "the", "metadata", ".", "EXPERIMENT_TAG", "tag", "." ]
python
train
devassistant/devassistant
devassistant/gui/gui_helper.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/gui_helper.py#L451-L464
def create_cell_renderer_combo(self, tree_view, title="title", assign=0, editable=False, model=None, function=None): """' Function creates a CellRendererCombo with title, model """ renderer_combo = Gtk.CellRendererCombo() renderer_combo.set_property('editable', editable) if model: renderer_combo.set_property('model', model) if function: renderer_combo.connect("edited", function) renderer_combo.set_property("text-column", 0) renderer_combo.set_property("has-entry", False) column = Gtk.TreeViewColumn(title, renderer_combo, text=assign) tree_view.append_column(column)
[ "def", "create_cell_renderer_combo", "(", "self", ",", "tree_view", ",", "title", "=", "\"title\"", ",", "assign", "=", "0", ",", "editable", "=", "False", ",", "model", "=", "None", ",", "function", "=", "None", ")", ":", "renderer_combo", "=", "Gtk", "...
Function creates a CellRendererCombo with title, model
[ "Function", "creates", "a", "CellRendererCombo", "with", "title", "model" ]
python
train
cmorisse/ikp3db
ikp3db.py
https://github.com/cmorisse/ikp3db/blob/a0f318d4e8494b2e6f2f07ec0f1202ca023c920f/ikp3db.py#L473-L481
def clear(self): """ Clear a breakpoint by removing it from all lists. """ del IKBreakpoint.breakpoints_by_file_and_line[self.file_name, self.line_number] IKBreakpoint.breakpoints_by_number[self.number] = None IKBreakpoint.breakpoints_files[self.file_name].remove(self.line_number) if len(IKBreakpoint.breakpoints_files[self.file_name]) == 0: del IKBreakpoint.breakpoints_files[self.file_name] IKBreakpoint.update_active_breakpoint_flag()
[ "def", "clear", "(", "self", ")", ":", "del", "IKBreakpoint", ".", "breakpoints_by_file_and_line", "[", "self", ".", "file_name", ",", "self", ".", "line_number", "]", "IKBreakpoint", ".", "breakpoints_by_number", "[", "self", ".", "number", "]", "=", "None", ...
Clear a breakpoint by removing it from all lists.
[ "Clear", "a", "breakpoint", "by", "removing", "it", "from", "all", "lists", "." ]
python
train
dswah/pyGAM
pygam/datasets/load_datasets.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L22-L52
def mcycle(return_X_y=True): """motorcyle acceleration dataset Parameters ---------- return_X_y : bool, if True, returns a model-ready tuple of data (X, y) otherwise, returns a Pandas DataFrame Returns ------- model-ready tuple of data (X, y) OR Pandas DataFrame Notes ----- X contains the times after the impact. y contains the acceleration. Source: https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html """ # y is real # recommend LinearGAM motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0) if return_X_y: X = motor.times.values y = motor.accel return _clean_X_y(X, y) return motor
[ "def", "mcycle", "(", "return_X_y", "=", "True", ")", ":", "# y is real", "# recommend LinearGAM", "motor", "=", "pd", ".", "read_csv", "(", "PATH", "+", "'/mcycle.csv'", ",", "index_col", "=", "0", ")", "if", "return_X_y", ":", "X", "=", "motor", ".", "...
motorcyle acceleration dataset Parameters ---------- return_X_y : bool, if True, returns a model-ready tuple of data (X, y) otherwise, returns a Pandas DataFrame Returns ------- model-ready tuple of data (X, y) OR Pandas DataFrame Notes ----- X contains the times after the impact. y contains the acceleration. Source: https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
[ "motorcyle", "acceleration", "dataset" ]
python
train
singularityhub/singularity-cli
spython/oci/cmd/actions.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/oci/cmd/actions.py#L173-L198
def attach(self, container_id=None, sudo=False): '''attach to a container instance based on container_id Parameters ========== container_id: the container_id to delete sudo: whether to issue the command with sudo (or not) a container started with sudo will belong to the root user If started by a user, the user needs to control deleting it Returns ======= return_code: the return code from the delete command. 0 indicates a successful delete, 255 indicates not. ''' sudo = self._get_sudo(sudo) container_id = self.get_container_id(container_id) # singularity oci delete cmd = self._init_command('attach') # Add the container_id cmd.append(container_id) # Delete the container, return code goes to user (message to screen) return self._run_and_return(cmd, sudo)
[ "def", "attach", "(", "self", ",", "container_id", "=", "None", ",", "sudo", "=", "False", ")", ":", "sudo", "=", "self", ".", "_get_sudo", "(", "sudo", ")", "container_id", "=", "self", ".", "get_container_id", "(", "container_id", ")", "# singularity oci...
attach to a container instance based on container_id Parameters ========== container_id: the container_id to delete sudo: whether to issue the command with sudo (or not) a container started with sudo will belong to the root user If started by a user, the user needs to control deleting it Returns ======= return_code: the return code from the delete command. 0 indicates a successful delete, 255 indicates not.
[ "attach", "to", "a", "container", "instance", "based", "on", "container_id" ]
python
train
rchatterjee/pwmodels
src/pwmodel/readpw.py
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/readpw.py#L198-L212
def sample_pws(self, n, asperdist=True): """Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse. """ if asperdist: sample = np.random.choice( self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf ) else: sample = np.random.choice(len(self._T), size=n) return (self._T.restore_key(i) for i in sample)
[ "def", "sample_pws", "(", "self", ",", "n", ",", "asperdist", "=", "True", ")", ":", "if", "asperdist", ":", "sample", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "_freq_list", ".", "shape", "[", "0", "]", ",", "size", "=", "n", ",...
Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse.
[ "Returns", "n", "passwords", "sampled", "from", "this", "password", "dataset", ".", "if", "asperdist", "is", "True", "then", "returns", "the", "password", "sampled", "according", "the", "password", "histogram", "distribution", "(", "with", "replacement", ")", "....
python
train
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L1559-L1594
def cleanup_unattached_disks(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Cleans up all disks associated with the account, which are not attached. *** CAUTION *** This is a destructive function with no undo button, and no "Are you sure?" confirmation! CLI Examples: .. code-block:: bash salt-cloud -f cleanup_unattached_disks my-azure name=my_disk salt-cloud -f cleanup_unattached_disks my-azure name=my_disk delete_vhd=True ''' if call != 'function': raise SaltCloudSystemExit( 'The delete_disk function must be called with -f or --function.' ) if kwargs is None: kwargs = {} disks = list_disks(kwargs=kwargs, conn=conn, call='function') for disk in disks: if disks[disk]['attached_to'] is None: del_kwargs = { 'name': disks[disk]['name'], 'delete_vhd': kwargs.get('delete_vhd', False) } log.info( 'Deleting disk %s, deleting VHD: %s', del_kwargs['name'], del_kwargs['delete_vhd'] ) data = delete_disk(kwargs=del_kwargs, call='function') return True
[ "def", "cleanup_unattached_disks", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The delete_disk function must be called with -f or --function.'",...
.. versionadded:: 2015.8.0 Cleans up all disks associated with the account, which are not attached. *** CAUTION *** This is a destructive function with no undo button, and no "Are you sure?" confirmation! CLI Examples: .. code-block:: bash salt-cloud -f cleanup_unattached_disks my-azure name=my_disk salt-cloud -f cleanup_unattached_disks my-azure name=my_disk delete_vhd=True
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
merll/docker-fabric
dockerfabric/apiclient.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/apiclient.py#L197-L221
def login(self, **kwargs): """ Identical to :meth:`dockermap.client.base.DockerClientWrapper.login` with two enhancements: * additional logging; * login parameters can be passed through ``kwargs``, or set as default using the following ``env`` variables: * ``env.docker_registry_user`` (kwarg: ``username``), * ``env.docker_registry_password`` (kwarg: ``password``), * ``env.docker_registry_mail`` (kwarg: ``email``), * ``env.docker_registry_repository`` (kwarg: ``registry``), * ``env.docker_registry_insecure`` (kwarg: ``insecure_registry``). """ c_user = kwargs.pop('username', env.get('docker_registry_user')) c_pass = kwargs.pop('password', env.get('docker_registry_password')) c_mail = kwargs.pop('email', env.get('docker_registry_mail')) c_registry = kwargs.pop('registry', env.get('docker_registry_repository')) c_insecure = kwargs.pop('insecure_registry', env.get('docker_registry_insecure')) if super(DockerFabricClient, self).login(c_user, password=c_pass, email=c_mail, registry=c_registry, insecure_registry=c_insecure, **kwargs): self.push_log("Login at registry '{0}' succeeded.".format(c_registry)) return True self.push_log("Login at registry '{0}' failed.".format(c_registry)) return False
[ "def", "login", "(", "self", ",", "*", "*", "kwargs", ")", ":", "c_user", "=", "kwargs", ".", "pop", "(", "'username'", ",", "env", ".", "get", "(", "'docker_registry_user'", ")", ")", "c_pass", "=", "kwargs", ".", "pop", "(", "'password'", ",", "env...
Identical to :meth:`dockermap.client.base.DockerClientWrapper.login` with two enhancements: * additional logging; * login parameters can be passed through ``kwargs``, or set as default using the following ``env`` variables: * ``env.docker_registry_user`` (kwarg: ``username``), * ``env.docker_registry_password`` (kwarg: ``password``), * ``env.docker_registry_mail`` (kwarg: ``email``), * ``env.docker_registry_repository`` (kwarg: ``registry``), * ``env.docker_registry_insecure`` (kwarg: ``insecure_registry``).
[ "Identical", "to", ":", "meth", ":", "dockermap", ".", "client", ".", "base", ".", "DockerClientWrapper", ".", "login", "with", "two", "enhancements", ":" ]
python
train
apache/airflow
airflow/models/taskinstance.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L470-L474
def key(self): """ Returns a tuple that identifies the task instance uniquely """ return self.dag_id, self.task_id, self.execution_date, self.try_number
[ "def", "key", "(", "self", ")", ":", "return", "self", ".", "dag_id", ",", "self", ".", "task_id", ",", "self", ".", "execution_date", ",", "self", ".", "try_number" ]
Returns a tuple that identifies the task instance uniquely
[ "Returns", "a", "tuple", "that", "identifies", "the", "task", "instance", "uniquely" ]
python
test
spacetelescope/stsci.tools
lib/stsci/tools/cfgpars.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/cfgpars.py#L386-L390
def setPar(theDict, name, value): """ Sets a par's value without having to give its scope/section. """ section, previousVal = findFirstPar(theDict, name) # "section" is the actual object, not a copy section[name] = value
[ "def", "setPar", "(", "theDict", ",", "name", ",", "value", ")", ":", "section", ",", "previousVal", "=", "findFirstPar", "(", "theDict", ",", "name", ")", "# \"section\" is the actual object, not a copy", "section", "[", "name", "]", "=", "value" ]
Sets a par's value without having to give its scope/section.
[ "Sets", "a", "par", "s", "value", "without", "having", "to", "give", "its", "scope", "/", "section", "." ]
python
train
jbeluch/xbmcswift2
xbmcswift2/plugin.py
https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/plugin.py#L224-L238
def cached_route(self, url_rule, name=None, options=None, TTL=None): '''A decorator to add a route to a view and also apply caching. The url_rule, name and options arguments are the same arguments for the route function. The TTL argument if given will passed along to the caching decorator. ''' route_decorator = self.route(url_rule, name=name, options=options) if TTL: cache_decorator = self.cached(TTL) else: cache_decorator = self.cached() def new_decorator(func): return route_decorator(cache_decorator(func)) return new_decorator
[ "def", "cached_route", "(", "self", ",", "url_rule", ",", "name", "=", "None", ",", "options", "=", "None", ",", "TTL", "=", "None", ")", ":", "route_decorator", "=", "self", ".", "route", "(", "url_rule", ",", "name", "=", "name", ",", "options", "=...
A decorator to add a route to a view and also apply caching. The url_rule, name and options arguments are the same arguments for the route function. The TTL argument if given will passed along to the caching decorator.
[ "A", "decorator", "to", "add", "a", "route", "to", "a", "view", "and", "also", "apply", "caching", ".", "The", "url_rule", "name", "and", "options", "arguments", "are", "the", "same", "arguments", "for", "the", "route", "function", ".", "The", "TTL", "ar...
python
train
sander76/aio-powerview-api
aiopvapi/helpers/api_base.py
https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/helpers/api_base.py#L86-L93
async def get_resources(self, **kwargs) -> dict: """Get a list of resources. :raises PvApiError when an error occurs. """ resources = await self.request.get(self._base_path, **kwargs) self._sanitize_resources(resources) return resources
[ "async", "def", "get_resources", "(", "self", ",", "*", "*", "kwargs", ")", "->", "dict", ":", "resources", "=", "await", "self", ".", "request", ".", "get", "(", "self", ".", "_base_path", ",", "*", "*", "kwargs", ")", "self", ".", "_sanitize_resource...
Get a list of resources. :raises PvApiError when an error occurs.
[ "Get", "a", "list", "of", "resources", "." ]
python
train
bslatkin/dpxdt
dpxdt/client/workers.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L227-L236
def error(self): """Returns the error for this barrier and all work items, if any.""" # Copy the error from any failed item to be the error for the whole # barrier. The first error seen "wins". Also handles the case where # the WorkItems passed into the barrier have already completed and # been marked with errors. for item in self: if isinstance(item, WorkItem) and item.error: return item.error return None
[ "def", "error", "(", "self", ")", ":", "# Copy the error from any failed item to be the error for the whole", "# barrier. The first error seen \"wins\". Also handles the case where", "# the WorkItems passed into the barrier have already completed and", "# been marked with errors.", "for", "ite...
Returns the error for this barrier and all work items, if any.
[ "Returns", "the", "error", "for", "this", "barrier", "and", "all", "work", "items", "if", "any", "." ]
python
train