repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
chainer/chainerui
chainerui/tasks/crawl_result.py
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/tasks/crawl_result.py#L14-L27
def load_result_json(result_path, json_file_name): """load_result_json.""" json_path = os.path.join(result_path, json_file_name) _list = [] if os.path.isfile(json_path): with open(json_path) as json_data: try: _list = json.load(json_data) except ValueError as err: logger.error( 'Failed to load json: {}, {}'.format(json_path, err)) return _list
[ "def", "load_result_json", "(", "result_path", ",", "json_file_name", ")", ":", "json_path", "=", "os", ".", "path", ".", "join", "(", "result_path", ",", "json_file_name", ")", "_list", "=", "[", "]", "if", "os", ".", "path", ".", "isfile", "(", "json_p...
load_result_json.
[ "load_result_json", "." ]
python
train
elyase/geotext
geotext/geotext.py
https://github.com/elyase/geotext/blob/21a8a7f5eebea40f270beef9ede4d9a57e3c81c3/geotext/geotext.py#L15-L60
def read_table(filename, usecols=(0, 1), sep='\t', comment='#', encoding='utf-8', skip=0): """Parse data files from the data directory Parameters ---------- filename: string Full path to file usecols: list, default [0, 1] A list of two elements representing the columns to be parsed into a dictionary. The first element will be used as keys and the second as values. Defaults to the first two columns of `filename`. sep : string, default '\t' Field delimiter. comment : str, default '#' Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. encoding : string, default 'utf-8' Encoding to use for UTF when reading/writing (ex. `utf-8`) skip: int, default 0 Number of lines to skip at the beginning of the file Returns ------- A dictionary with the same length as the number of lines in `filename` """ with io.open(filename, 'r', encoding=encoding) as f: # skip initial lines for _ in range(skip): next(f) # filter comment lines lines = (line for line in f if not line.startswith(comment)) d = dict() for line in lines: columns = line.split(sep) key = columns[usecols[0]].lower() value = columns[usecols[1]].rstrip('\n') d[key] = value return d
[ "def", "read_table", "(", "filename", ",", "usecols", "=", "(", "0", ",", "1", ")", ",", "sep", "=", "'\\t'", ",", "comment", "=", "'#'", ",", "encoding", "=", "'utf-8'", ",", "skip", "=", "0", ")", ":", "with", "io", ".", "open", "(", "filename"...
Parse data files from the data directory Parameters ---------- filename: string Full path to file usecols: list, default [0, 1] A list of two elements representing the columns to be parsed into a dictionary. The first element will be used as keys and the second as values. Defaults to the first two columns of `filename`. sep : string, default '\t' Field delimiter. comment : str, default '#' Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. encoding : string, default 'utf-8' Encoding to use for UTF when reading/writing (ex. `utf-8`) skip: int, default 0 Number of lines to skip at the beginning of the file Returns ------- A dictionary with the same length as the number of lines in `filename`
[ "Parse", "data", "files", "from", "the", "data", "directory" ]
python
train
angr/angr
angr/keyed_region.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/keyed_region.py#L325-L338
def _store(self, start, obj, size, overwrite=False): """ Store a variable into the storage. :param int start: The beginning address of the variable. :param obj: The object to store. :param int size: Size of the object to store. :param bool overwrite: Whether existing objects should be overwritten or not. :return: None """ stored_object = StoredObject(start, obj, size) self._object_mapping[stored_object.obj_id] = stored_object self.__store(stored_object, overwrite=overwrite)
[ "def", "_store", "(", "self", ",", "start", ",", "obj", ",", "size", ",", "overwrite", "=", "False", ")", ":", "stored_object", "=", "StoredObject", "(", "start", ",", "obj", ",", "size", ")", "self", ".", "_object_mapping", "[", "stored_object", ".", ...
Store a variable into the storage. :param int start: The beginning address of the variable. :param obj: The object to store. :param int size: Size of the object to store. :param bool overwrite: Whether existing objects should be overwritten or not. :return: None
[ "Store", "a", "variable", "into", "the", "storage", "." ]
python
train
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L1721-L1729
def ssh_session(key_filename, username, ip_address, *cli): """ opens a ssh shell to the host """ local('ssh -t -i %s %s@%s %s' % (key_filename, username, ip_address, "".join(chain.from_iterable(cli))))
[ "def", "ssh_session", "(", "key_filename", ",", "username", ",", "ip_address", ",", "*", "cli", ")", ":", "local", "(", "'ssh -t -i %s %s@%s %s'", "%", "(", "key_filename", ",", "username", ",", "ip_address", ",", "\"\"", ".", "join", "(", "chain", ".", "f...
opens a ssh shell to the host
[ "opens", "a", "ssh", "shell", "to", "the", "host" ]
python
train
radjkarl/fancyTools
fancytools/math/findPeaks.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/findPeaks.py#L6-L12
def findMax(arr): """ in comparison to argrelmax() more simple and reliable peak finder """ out = np.zeros(shape=arr.shape, dtype=bool) _calcMax(arr, out) return out
[ "def", "findMax", "(", "arr", ")", ":", "out", "=", "np", ".", "zeros", "(", "shape", "=", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "_calcMax", "(", "arr", ",", "out", ")", "return", "out" ]
in comparison to argrelmax() more simple and reliable peak finder
[ "in", "comparison", "to", "argrelmax", "()", "more", "simple", "and", "reliable", "peak", "finder" ]
python
train
projectatomic/osbs-client
osbs/build/plugins_configuration.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L332-L342
def render_import_image(self, use_auth=None): """ Configure the import_image plugin """ # import_image is a multi-phase plugin if self.user_params.imagestream_name.value is None: self.pt.remove_plugin('exit_plugins', 'import_image', 'imagestream not in user parameters') elif self.pt.has_plugin_conf('exit_plugins', 'import_image'): self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream', self.user_params.imagestream_name.value)
[ "def", "render_import_image", "(", "self", ",", "use_auth", "=", "None", ")", ":", "# import_image is a multi-phase plugin", "if", "self", ".", "user_params", ".", "imagestream_name", ".", "value", "is", "None", ":", "self", ".", "pt", ".", "remove_plugin", "(",...
Configure the import_image plugin
[ "Configure", "the", "import_image", "plugin" ]
python
train
hydpy-dev/hydpy
hydpy/models/hland/hland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hland/hland_model.py#L13-L57
def calc_tc_v1(self): """Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.tc[k] = inp.t-con.tcalt[k]*(con.zonez[k]-con.zrelt)
[ "def", "calc_tc_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "inp", "=", "self", ".", "sequences", ".", "inputs", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fas...
Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8)
[ "Adjust", "the", "measured", "air", "temperature", "to", "the", "altitude", "of", "the", "individual", "zones", "." ]
python
train
bokeh/bokeh
bokeh/client/util.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/client/util.py#L73-L99
def websocket_url_for_server_url(url): ''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form. ''' if url.startswith("http:"): reprotocoled = "ws" + url[4:] elif url.startswith("https:"): reprotocoled = "wss" + url[5:] else: raise ValueError("URL has unknown protocol " + url) if reprotocoled.endswith("/"): return reprotocoled + "ws" else: return reprotocoled + "/ws"
[ "def", "websocket_url_for_server_url", "(", "url", ")", ":", "if", "url", ".", "startswith", "(", "\"http:\"", ")", ":", "reprotocoled", "=", "\"ws\"", "+", "url", "[", "4", ":", "]", "elif", "url", ".", "startswith", "(", "\"https:\"", ")", ":", "reprot...
Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form.
[ "Convert", "an", "http", "(", "s", ")", "URL", "for", "a", "Bokeh", "server", "websocket", "endpoint", "into", "the", "appropriate", "ws", "(", "s", ")", "URL" ]
python
train
angr/angr
angr/state_plugins/uc_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/uc_manager.py#L33-L52
def assign(self, dst_addr_ast): """ Assign a new region for under-constrained symbolic execution. :param dst_addr_ast: the symbolic AST which address of the new allocated region will be assigned to. :return: as ast of memory address that points to a new region """ if dst_addr_ast.uc_alloc_depth > self._max_alloc_depth: raise SimUCManagerAllocationError('Current allocation depth %d is greater than the cap (%d)' % \ (dst_addr_ast.uc_alloc_depth, self._max_alloc_depth)) abs_addr = self._region_base + self._pos ptr = self.state.solver.BVV(abs_addr, self.state.arch.bits) self._pos += self._region_size self._alloc_depth_map[(abs_addr - self._region_base) // self._region_size] = dst_addr_ast.uc_alloc_depth l.debug("Assigned new memory region %s", ptr) return ptr
[ "def", "assign", "(", "self", ",", "dst_addr_ast", ")", ":", "if", "dst_addr_ast", ".", "uc_alloc_depth", ">", "self", ".", "_max_alloc_depth", ":", "raise", "SimUCManagerAllocationError", "(", "'Current allocation depth %d is greater than the cap (%d)'", "%", "(", "dst...
Assign a new region for under-constrained symbolic execution. :param dst_addr_ast: the symbolic AST which address of the new allocated region will be assigned to. :return: as ast of memory address that points to a new region
[ "Assign", "a", "new", "region", "for", "under", "-", "constrained", "symbolic", "execution", "." ]
python
train
shoebot/shoebot
lib/web/wikipedia.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/wikipedia.py#L674-L709
def parse_links(self, markup): """ Returns a list of internal Wikipedia links in the markup. # A Wikipedia link looks like: # [[List of operating systems#Embedded | List of embedded operating systems]] # It does not contain a colon, this indicates images, users, languages, etc. The return value is a list containing the first part of the link, without the anchor. """ links = [] m = re.findall(self.re["link"], markup) for link in m: # We don't like [[{{{1|Universe (disambiguation)}}}]] if link.find("{") >= 0: link = re.sub("\{{1,3}[0-9]{0,2}\|", "", link) link = link.replace("{", "") link = link.replace("}", "") link = link.split("|") link[0] = link[0].split("#") page = link[0][0].strip() #anchor = u"" #display = u"" #if len(link[0]) > 1: # anchor = link[0][1].strip() #if len(link) > 1: # display = link[1].strip() if not page in links: links.append(page) #links[page] = WikipediaLink(page, anchor, display) links.sort() return links
[ "def", "parse_links", "(", "self", ",", "markup", ")", ":", "links", "=", "[", "]", "m", "=", "re", ".", "findall", "(", "self", ".", "re", "[", "\"link\"", "]", ",", "markup", ")", "for", "link", "in", "m", ":", "# We don't like [[{{{1|Universe (disam...
Returns a list of internal Wikipedia links in the markup. # A Wikipedia link looks like: # [[List of operating systems#Embedded | List of embedded operating systems]] # It does not contain a colon, this indicates images, users, languages, etc. The return value is a list containing the first part of the link, without the anchor.
[ "Returns", "a", "list", "of", "internal", "Wikipedia", "links", "in", "the", "markup", "." ]
python
valid
saltstack/salt
salt/modules/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L427-L443
def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None): ''' Gets info about the given api key CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_key apigw_api_key ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) response = conn.get_api_key(apiKey=apiKey) return {'apiKey': _convert_datetime_str(response)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "describe_api_key", "(", "apiKey", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "k...
Gets info about the given api key CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_key apigw_api_key
[ "Gets", "info", "about", "the", "given", "api", "key" ]
python
train
eerimoq/bincopy
bincopy.py
https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L1358-L1373
def crop(self, minimum_address, maximum_address): """Keep given range and discard the rest. `minimum_address` is the first word address to keep (including). `maximum_address` is the last word address to keep (excluding). """ minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes maximum_address_address = self._segments.maximum_address self._segments.remove(0, minimum_address) self._segments.remove(maximum_address, maximum_address_address)
[ "def", "crop", "(", "self", ",", "minimum_address", ",", "maximum_address", ")", ":", "minimum_address", "*=", "self", ".", "word_size_bytes", "maximum_address", "*=", "self", ".", "word_size_bytes", "maximum_address_address", "=", "self", ".", "_segments", ".", "...
Keep given range and discard the rest. `minimum_address` is the first word address to keep (including). `maximum_address` is the last word address to keep (excluding).
[ "Keep", "given", "range", "and", "discard", "the", "rest", "." ]
python
train
Legobot/Legobot
Legobot/Connectors/IRC.py
https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/IRC.py#L122-L131
def on_privmsg(self, c, e): """ This function runs when the bot receives a private message (query). """ text = e.arguments[0] logger.debug('{0!s}'.format(e.source)) metadata = self.set_metadata(e) metadata['is_private_message'] = True message = Message(text=text, metadata=metadata).__dict__ self.baseplate.tell(message)
[ "def", "on_privmsg", "(", "self", ",", "c", ",", "e", ")", ":", "text", "=", "e", ".", "arguments", "[", "0", "]", "logger", ".", "debug", "(", "'{0!s}'", ".", "format", "(", "e", ".", "source", ")", ")", "metadata", "=", "self", ".", "set_metada...
This function runs when the bot receives a private message (query).
[ "This", "function", "runs", "when", "the", "bot", "receives", "a", "private", "message", "(", "query", ")", "." ]
python
train
PmagPy/PmagPy
SPD/lib/lib_tail_check_statistics.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_tail_check_statistics.py#L58-L66
def get_delta_TR(tail_check_max, y_int): """ input: tail_check_max, y_intercept output: delta_TR """ if tail_check_max == 0 or numpy.isnan(tail_check_max): return float('nan') delta_TR = (old_div(tail_check_max, abs(y_int))) * 100. return delta_TR
[ "def", "get_delta_TR", "(", "tail_check_max", ",", "y_int", ")", ":", "if", "tail_check_max", "==", "0", "or", "numpy", ".", "isnan", "(", "tail_check_max", ")", ":", "return", "float", "(", "'nan'", ")", "delta_TR", "=", "(", "old_div", "(", "tail_check_m...
input: tail_check_max, y_intercept output: delta_TR
[ "input", ":", "tail_check_max", "y_intercept", "output", ":", "delta_TR" ]
python
train
erdewit/ib_insync
ib_insync/ib.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L1563-L1571
def replaceFA(self, faDataType: int, xml: str): """ Replaces Financial Advisor's settings. Args: faDataType: See :meth:`.requestFA`. xml: The XML-formatted configuration string. """ self.client.replaceFA(faDataType, xml)
[ "def", "replaceFA", "(", "self", ",", "faDataType", ":", "int", ",", "xml", ":", "str", ")", ":", "self", ".", "client", ".", "replaceFA", "(", "faDataType", ",", "xml", ")" ]
Replaces Financial Advisor's settings. Args: faDataType: See :meth:`.requestFA`. xml: The XML-formatted configuration string.
[ "Replaces", "Financial", "Advisor", "s", "settings", "." ]
python
train
bpannier/simpletr64
simpletr64/devicetr64.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/devicetr64.py#L591-L642
def loadDeviceDefinitions(self, urlOfXMLDefinition, timeout=3): """Loads the device definitions from a given URL which points to the root XML in the device. This loads the device definitions which is needed in case you like to: * get additional information's about the device like manufacture, device type, etc * get all support service types of this device * use the convenient actions classes part of this library in the actions module :param str urlOfXMLDefinition: the URL to the root XML which sets the device definitions. :param float timeout: the timeout for downloading :raises ValueError: if the XML could not be parsed correctly :raises requests.exceptions.ConnectionError: when the device definitions can not be downloaded :raises requests.exceptions.ConnectTimeout: when download time out .. seealso:: :meth:`~simpletr64.DeviceTR64.loadSCPD`, :meth:`~simpletr64.DeviceTR64.deviceServiceDefinitions`, :meth:`~simpletr64.DeviceTR64.deviceInformations`, :meth:`~simpletr64.DeviceTR64.deviceSCPD`, :meth:`~simpletr64.DeviceTR64.getSCDPURL`, :meth:`~simpletr64.DeviceTR64.getControlURL`, :meth:`~simpletr64.DeviceTR64.getEventSubURL` """ # setup proxies proxies = {} if self.__httpsProxy: proxies = {"https": self.__httpsProxy} if self.__httpProxy: proxies = {"http": self.__httpProxy} # some devices response differently without a User-Agent headers = {"User-Agent": "Mozilla/5.0; SimpleTR64-1"} # setup authentication auth = None if self.__password: auth = HTTPDigestAuth(self.__username, self.__password) # get the content request = requests.get(urlOfXMLDefinition, proxies=proxies, headers=headers, timeout=float(timeout), auth=auth, verify=self.__verify) if request.status_code != 200: errorStr = DeviceTR64._extractErrorString(request) raise ValueError('Could not get CPE definitions "' + urlOfXMLDefinition + '" : ' + str(request.status_code) + ' - ' + request.reason + " -- " + errorStr) # parse XML return xml = request.text.encode('utf-8') return self._loadDeviceDefinitions(urlOfXMLDefinition, xml)
[ "def", "loadDeviceDefinitions", "(", "self", ",", "urlOfXMLDefinition", ",", "timeout", "=", "3", ")", ":", "# setup proxies", "proxies", "=", "{", "}", "if", "self", ".", "__httpsProxy", ":", "proxies", "=", "{", "\"https\"", ":", "self", ".", "__httpsProxy...
Loads the device definitions from a given URL which points to the root XML in the device. This loads the device definitions which is needed in case you like to: * get additional information's about the device like manufacture, device type, etc * get all support service types of this device * use the convenient actions classes part of this library in the actions module :param str urlOfXMLDefinition: the URL to the root XML which sets the device definitions. :param float timeout: the timeout for downloading :raises ValueError: if the XML could not be parsed correctly :raises requests.exceptions.ConnectionError: when the device definitions can not be downloaded :raises requests.exceptions.ConnectTimeout: when download time out .. seealso:: :meth:`~simpletr64.DeviceTR64.loadSCPD`, :meth:`~simpletr64.DeviceTR64.deviceServiceDefinitions`, :meth:`~simpletr64.DeviceTR64.deviceInformations`, :meth:`~simpletr64.DeviceTR64.deviceSCPD`, :meth:`~simpletr64.DeviceTR64.getSCDPURL`, :meth:`~simpletr64.DeviceTR64.getControlURL`, :meth:`~simpletr64.DeviceTR64.getEventSubURL`
[ "Loads", "the", "device", "definitions", "from", "a", "given", "URL", "which", "points", "to", "the", "root", "XML", "in", "the", "device", "." ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v1/service/channel/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v1/service/channel/__init__.py#L157-L166
def get(self, sid): """ Constructs a ChannelContext :param sid: The sid :returns: twilio.rest.chat.v1.service.channel.ChannelContext :rtype: twilio.rest.chat.v1.service.channel.ChannelContext """ return ChannelContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "ChannelContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a ChannelContext :param sid: The sid :returns: twilio.rest.chat.v1.service.channel.ChannelContext :rtype: twilio.rest.chat.v1.service.channel.ChannelContext
[ "Constructs", "a", "ChannelContext" ]
python
train
Cologler/fsoopify-python
fsoopify/paths.py
https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/paths.py#L152-L160
def from_caller_file(): '''return a `Path` from the path of caller file''' import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) filename = calframe[1].filename if not os.path.isfile(filename): raise RuntimeError('caller is not a file') return Path(filename)
[ "def", "from_caller_file", "(", ")", ":", "import", "inspect", "curframe", "=", "inspect", ".", "currentframe", "(", ")", "calframe", "=", "inspect", ".", "getouterframes", "(", "curframe", ",", "2", ")", "filename", "=", "calframe", "[", "1", "]", ".", ...
return a `Path` from the path of caller file
[ "return", "a", "Path", "from", "the", "path", "of", "caller", "file" ]
python
train
mapbox/mapboxgl-jupyter
mapboxgl/utils.py
https://github.com/mapbox/mapboxgl-jupyter/blob/f6e403c13eaa910e70659c7d179e8e32ce95ae34/mapboxgl/utils.py#L188-L228
def create_color_stops(breaks, colors='RdYlGn', color_ramps=color_ramps): """Convert a list of breaks into color stops using colors from colorBrewer or a custom list of color values in RGB, RGBA, HSL, CSS text, or HEX format. See www.colorbrewer2.org for a list of color options to pass """ num_breaks = len(breaks) stops = [] if isinstance(colors, list): # Check if colors contain a list of color values if len(colors) == 0 or len(colors) != num_breaks: raise ValueError( 'custom color list must be of same length as breaks list') for color in colors: # Check if color is valid string try: Colour(color) except: raise ValueError( 'The color code {color} is in the wrong format'.format(color=color)) for i, b in enumerate(breaks): stops.append([b, colors[i]]) else: if colors not in color_ramps.keys(): raise ValueError('color does not exist in colorBrewer!') else: try: ramp = color_ramps[colors][num_breaks] except KeyError: raise ValueError("Color ramp {} does not have a {} breaks".format( colors, num_breaks)) for i, b in enumerate(breaks): stops.append([b, ramp[i]]) return stops
[ "def", "create_color_stops", "(", "breaks", ",", "colors", "=", "'RdYlGn'", ",", "color_ramps", "=", "color_ramps", ")", ":", "num_breaks", "=", "len", "(", "breaks", ")", "stops", "=", "[", "]", "if", "isinstance", "(", "colors", ",", "list", ")", ":", ...
Convert a list of breaks into color stops using colors from colorBrewer or a custom list of color values in RGB, RGBA, HSL, CSS text, or HEX format. See www.colorbrewer2.org for a list of color options to pass
[ "Convert", "a", "list", "of", "breaks", "into", "color", "stops", "using", "colors", "from", "colorBrewer", "or", "a", "custom", "list", "of", "color", "values", "in", "RGB", "RGBA", "HSL", "CSS", "text", "or", "HEX", "format", ".", "See", "www", ".", ...
python
train
agoragames/chai
chai/stub.py
https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L263-L269
def expect(self): ''' Add an expectation to this stub. Return the expectation. ''' exp = Expectation(self) self._expectations.append(exp) return exp
[ "def", "expect", "(", "self", ")", ":", "exp", "=", "Expectation", "(", "self", ")", "self", ".", "_expectations", ".", "append", "(", "exp", ")", "return", "exp" ]
Add an expectation to this stub. Return the expectation.
[ "Add", "an", "expectation", "to", "this", "stub", ".", "Return", "the", "expectation", "." ]
python
train
numenta/nupic
src/nupic/regions/tm_region.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/tm_region.py#L65-L114
def _buildArgs(f, self=None, kwargs={}): """ Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function. """ # Get the name, description, and default value for each argument argTuples = getArgumentDescriptions(f) argTuples = argTuples[1:] # Remove 'self' # Get the names of the parameters to our own constructor and remove them # Check for _originial_init first, because if LockAttributesMixin is used, # __init__'s signature will be just (self, *args, **kw), but # _original_init is created with the original signature #init = getattr(self, '_original_init', self.__init__) init = TMRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are # computed automatically (e.g. numberOfCols for the TM) ourArgNames += [ 'numberOfCols', # TM ] for argTuple in argTuples[:]: if argTuple[0] in ourArgNames: argTuples.remove(argTuple) # Build the dictionary of arguments if self: for argTuple in argTuples: argName = argTuple[0] if argName in kwargs: # Argument was provided argValue = kwargs.pop(argName) else: # Argument was not provided; use the default value if there is one, and # raise an exception otherwise if len(argTuple) == 2: # No default value raise TypeError("Must provide '%s'" % argName) argValue = argTuple[2] # Set as an instance variable if 'self' was passed in setattr(self, argName, argValue) return argTuples
[ "def", "_buildArgs", "(", "f", ",", "self", "=", "None", ",", "kwargs", "=", "{", "}", ")", ":", "# Get the name, description, and default value for each argument", "argTuples", "=", "getArgumentDescriptions", "(", "f", ")", "argTuples", "=", "argTuples", "[", "1"...
Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function.
[ "Get", "the", "default", "arguments", "from", "the", "function", "and", "assign", "as", "instance", "vars", "." ]
python
valid
lsbardel/python-stdnet
stdnet/utils/jsontools.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L98-L154
def flat_to_nested(data, instance=None, attname=None, separator=None, loads=None): '''Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary''' separator = separator or JSPLITTER val = {} flat_vals = {} for key, value in iteritems(data): if value is None: continue keys = key.split(separator) # first key equal to the attribute name if attname: if keys.pop(0) != attname: continue if loads: value = loads(value) # if an instance is available, inject the flat attribute if not keys: if value is None: val = flat_vals = {} break else: continue else: flat_vals[key] = value d = val lk = keys[-1] for k in keys[:-1]: if k not in d: nd = {} d[k] = nd else: nd = d[k] if not isinstance(nd, dict): nd = {'': nd} d[k] = nd d = nd if lk not in d: d[lk] = value else: d[lk][''] = value if instance and flat_vals: for attr, value in iteritems(flat_vals): setattr(instance, attr, value) return val
[ "def", "flat_to_nested", "(", "data", ",", "instance", "=", "None", ",", "attname", "=", "None", ",", "separator", "=", "None", ",", "loads", "=", "None", ")", ":", "separator", "=", "separator", "or", "JSPLITTER", "val", "=", "{", "}", "flat_vals", "=...
Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary
[ "Convert", "a", "flat", "representation", "of", "a", "dictionary", "to", "a", "nested", "representation", ".", "Fields", "in", "the", "flat", "representation", "are", "separated", "by", "the", "*", "splitter", "*", "parameters", "." ]
python
train
gem/oq-engine
openquake/hmtk/seismicity/catalogue.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/catalogue.py#L192-L221
def write_catalogue(self, output_file, key_list=SORTED_ATTRIBUTE_LIST): """ Writes the catalogue to file using HTMK format (CSV). :param output_file: Name of the output file :param key_list: Optional list of attribute keys to be exported """ with open(output_file, 'w') as of: writer = csv.DictWriter(of, fieldnames=key_list) writer.writeheader() for i in range(self.get_number_events()): row_dict = {} for key in key_list: if len(self.data[key]) > 0: data = self.data[key][i] if key in self.INT_ATTRIBUTE_LIST: if np.isnan(data): data = '' else: data = int(data) if key in self.FLOAT_ATTRIBUTE_LIST: if np.isnan(data): data = '' else: data = float(data) row_dict[key] = data writer.writerow(row_dict)
[ "def", "write_catalogue", "(", "self", ",", "output_file", ",", "key_list", "=", "SORTED_ATTRIBUTE_LIST", ")", ":", "with", "open", "(", "output_file", ",", "'w'", ")", "as", "of", ":", "writer", "=", "csv", ".", "DictWriter", "(", "of", ",", "fieldnames",...
Writes the catalogue to file using HTMK format (CSV). :param output_file: Name of the output file :param key_list: Optional list of attribute keys to be exported
[ "Writes", "the", "catalogue", "to", "file", "using", "HTMK", "format", "(", "CSV", ")", "." ]
python
train
PiotrDabkowski/Js2Py
js2py/legecy_translators/nodevisitor.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/legecy_translators/nodevisitor.py#L75-L261
def translate(self): """Translates outer operation and calls translate on inner operation. Returns fully translated code.""" if not self.code: return '' new = bracket_replace(self.code) #Check comma operator: cand = new.split(',') #every comma in new must be an operator if len(cand) > 1: #LR return self.lr(cand, js_comma) #Check = operator: # dont split at != or !== or == or === or <= or >= #note <<=, >>= or this >>> will NOT be supported # maybe I will change my mind later # Find this crappy ?: if '?' in new: cond_ind = new.find('?') tenary_start = 0 for ass in re.finditer(ASSIGNMENT_MATCH, new): cand = ass.span()[1] if cand < cond_ind: tenary_start = cand else: break actual_tenary = new[tenary_start:] spl = ''.join(split_at_any(new, [':', '?'], translate=trans)) tenary_translation = transform_crap(spl) assignment = new[:tenary_start] + ' PyJsConstantTENARY' return trans(assignment).replace('PyJsConstantTENARY', tenary_translation) cand = list(split_at_single(new, '=', ['!', '=', '<', '>'], ['='])) if len(cand) > 1: # RL it = reversed(cand) res = trans(it.next()) for e in it: e = e.strip() if not e: raise SyntaxError('Missing left-hand in assignment!') op = '' if e[-2:] in OP_METHODS: op = ',' + e[-2:].__repr__() e = e[:-2] elif e[-1:] in OP_METHODS: op = ',' + e[-1].__repr__() e = e[:-1] e = trans(e) #Now replace last get method with put and change args c = list(bracket_split(e, ['()'])) beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip( ) #strips just to make sure... I will remove it later if beg[-4:] != '.get': raise SyntaxError('Invalid left-hand side in assignment') beg = beg[0:-3] + 'put' arglist = arglist[0:-1] + ', ' + res + op + ')' res = beg + arglist return res #Now check remaining 2 arg operators that are not handled by python #They all have Left to Right (LR) associativity order = [OR, AND, BOR, BXOR, BAND, EQS, COMPS, BSHIFTS, ADDS, MULTS] # actually we dont need OR and AND because they can be handled easier. But just for fun dangerous = ['<', '>'] for typ in order: #we have to use special method for ADDS since they can be also unary operation +/++ or -/-- FUCK if '+' in typ: cand = list(split_add_ops(new)) else: #dont translate. cant start or end on dangerous op. cand = list( split_at_any( new, typ.keys(), False, dangerous, dangerous, validitate=comb_validitator)) if not len(cand) > 1: continue n = 1 res = trans(cand[0]) if not res: raise SyntaxError("Missing operand!") while n < len(cand): e = cand[n] if not e: raise SyntaxError("Missing operand!") if n % 2: op = typ[e] else: res = op(res, trans(e)) n += 1 return res #Now replace unary operators - only they are left cand = list( split_at_any( new, UNARY.keys(), False, validitate=unary_validitator)) if len(cand) > 1: #contains unary operators if '++' in cand or '--' in cand: #it cant contain both ++ and -- if '--' in cand: op = '--' meths = js_post_dec, js_pre_dec else: op = '++' meths = js_post_inc, js_pre_inc pos = cand.index(op) if cand[pos - 1].strip(): # post increment a = cand[pos - 1] meth = meths[0] elif cand[pos + 1].strip(): #pre increment a = cand[pos + 1] meth = meths[1] else: raise SyntaxError('Invalid use of ++ operator') if cand[pos + 2:]: raise SyntaxError('Too many operands') operand = meth(trans(a)) cand = cand[:pos - 1] # now last cand should be operand and every other odd element should be empty else: operand = trans(cand[-1]) del cand[-1] for i, e in enumerate(reversed(cand)): if i % 2: if e.strip(): raise SyntaxError('Too many operands') else: operand = UNARY[e](operand) return operand #Replace brackets if new[0] == '@' or new[0] == '#': if len( list(bracket_split(new, ('#{', '@}'))) ) == 1: # we have only one bracket, otherwise pseudobracket like @@.... assert new in REPL if new[0] == '#': raise SyntaxError( '[] cant be used as brackets! Use () instead.') return '(' + trans(REPL[new][1:-1]) + ')' #Replace function calls and prop getters # 'now' must be a reference like: a or b.c.d but it can have also calls or getters ( for example a["b"](3)) #From here @@ means a function call and ## means get operation (note they dont have to present) it = bracket_split(new, ('#{', '@}')) res = [] for e in it: if e[0] != '#' and e[0] != '@': res += [x.strip() for x in e.split('.')] else: res += [e.strip()] # res[0] can be inside @@ (name)... res = filter(lambda x: x, res) if is_internal(res[0]): out = res[0] elif res[0][0] in {'#', '@'}: out = '(' + trans(REPL[res[0]][1:-1]) + ')' elif is_valid_lval( res[0]) or res[0] in {'this', 'false', 'true', 'null'}: out = 'var.get(' + res[0].__repr__() + ')' else: if is_reserved(res[0]): raise SyntaxError('Unexpected reserved word: "%s"' % res[0]) raise SyntaxError('Invalid identifier: "%s"' % res[0]) if len(res) == 1: return out n = 1 while n < len(res): #now every func call is a prop call e = res[n] if e[0] == '@': # direct call out += trans_args(REPL[e]) n += 1 continue args = False #assume not prop call if n + 1 < len(res) and res[n + 1][0] == '@': #prop call args = trans_args(REPL[res[n + 1]])[1:] if args != ')': args = ',' + args if e[0] == '#': prop = trans(REPL[e][1:-1]) else: if not is_lval(e): raise SyntaxError('Invalid identifier: "%s"' % e) prop = e.__repr__() if args: # prop call n += 1 out += '.callprop(' + prop + args else: #prop get out += '.get(' + prop + ')' n += 1 return out
[ "def", "translate", "(", "self", ")", ":", "if", "not", "self", ".", "code", ":", "return", "''", "new", "=", "bracket_replace", "(", "self", ".", "code", ")", "#Check comma operator:", "cand", "=", "new", ".", "split", "(", "','", ")", "#every comma in ...
Translates outer operation and calls translate on inner operation. Returns fully translated code.
[ "Translates", "outer", "operation", "and", "calls", "translate", "on", "inner", "operation", ".", "Returns", "fully", "translated", "code", "." ]
python
valid
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L3119-L3131
def confd_state_internal_cdb_client_lock(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") cdb = ET.SubElement(internal, "cdb") client = ET.SubElement(cdb, "client") lock = ET.SubElement(client, "lock") lock.text = kwargs.pop('lock') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_internal_cdb_client_lock", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "xmlns", "=",...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
EventTeam/beliefs
src/beliefs/cells/lazy.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/lazy.py#L42-L52
def size(self): """ Size of LazyCell: the size of the intension plus accounting for excluded and included additions. The exclusions are assumed to be part of the set The inclusions are assumed to NOT be part of the intension """ return self._size_full_intension \ - len(self.exclude)\ + len(self.include)
[ "def", "size", "(", "self", ")", ":", "return", "self", ".", "_size_full_intension", "-", "len", "(", "self", ".", "exclude", ")", "+", "len", "(", "self", ".", "include", ")" ]
Size of LazyCell: the size of the intension plus accounting for excluded and included additions. The exclusions are assumed to be part of the set The inclusions are assumed to NOT be part of the intension
[ "Size", "of", "LazyCell", ":", "the", "size", "of", "the", "intension", "plus", "accounting", "for", "excluded", "and", "included", "additions", "." ]
python
train
goshuirc/irc
girc/client.py
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/client.py#L548-L559
def nickserv_identify(self, password, use_nick=None): """Identify to NickServ (legacy).""" if self.ready: if use_nick: self.msg(use_nick, 'IDENTIFY {}'.format(password)) else: self.send('NICKSERV', params=['IDENTIFY', password]) else: self.connect_info['nickserv'] = { 'password': password, 'use_nick': use_nick, }
[ "def", "nickserv_identify", "(", "self", ",", "password", ",", "use_nick", "=", "None", ")", ":", "if", "self", ".", "ready", ":", "if", "use_nick", ":", "self", ".", "msg", "(", "use_nick", ",", "'IDENTIFY {}'", ".", "format", "(", "password", ")", ")...
Identify to NickServ (legacy).
[ "Identify", "to", "NickServ", "(", "legacy", ")", "." ]
python
train
ZEDGR/pychal
challonge/api.py
https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/api.py#L100-L132
def _parse(data): """Recursively convert a json into python data types""" if not data: return [] elif isinstance(data, (tuple, list)): return [_parse(subdata) for subdata in data] # extract the nested dict. ex. {"tournament": {"url": "7k1safq" ...}} d = {ik: v for k in data.keys() for ik, v in data[k].items()} # convert datetime strings to datetime objects # and float number strings to float to_parse = dict(d) for k, v in to_parse.items(): if k in { "name", "display_name", "display_name_with_invitation_email_address", "username", "challonge_username"}: continue # do not test type of fields which are always strings if isinstance(v, TEXT_TYPE): try: dt = iso8601.parse_date(v) d[k] = dt.astimezone(tz) except iso8601.ParseError: try: d[k] = float(v) except ValueError: pass return d
[ "def", "_parse", "(", "data", ")", ":", "if", "not", "data", ":", "return", "[", "]", "elif", "isinstance", "(", "data", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "_parse", "(", "subdata", ")", "for", "subdata", "in", "data", ...
Recursively convert a json into python data types
[ "Recursively", "convert", "a", "json", "into", "python", "data", "types" ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/distro_lib/ip_forwarding_utils.py#L262-L282
def GetForwardedIps(self, interface, interface_ip=None): """Retrieve the list of configured forwarded IP addresses. Args: interface: string, the output device to query. interface_ip: string, current interface ip address. Returns: list, the IP address strings. """ try: ips = netifaces.ifaddresses(interface) ips = ips[netifaces.AF_INET] except (ValueError, IndexError): return [] forwarded_ips = [] for ip in ips: if ip['addr'] != interface_ip: full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits()) forwarded_ips.append(full_addr) return self.ParseForwardedIps(forwarded_ips)
[ "def", "GetForwardedIps", "(", "self", ",", "interface", ",", "interface_ip", "=", "None", ")", ":", "try", ":", "ips", "=", "netifaces", ".", "ifaddresses", "(", "interface", ")", "ips", "=", "ips", "[", "netifaces", ".", "AF_INET", "]", "except", "(", ...
Retrieve the list of configured forwarded IP addresses. Args: interface: string, the output device to query. interface_ip: string, current interface ip address. Returns: list, the IP address strings.
[ "Retrieve", "the", "list", "of", "configured", "forwarded", "IP", "addresses", "." ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/ledger.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/ledger.py#L981-L1018
async def parse_get_revoc_reg_def_response(get_revoc_ref_def_response: str) -> (str, str): """ Parse a GET_REVOC_REG_DEF response to get Revocation Registry Definition in the format compatible with Anoncreds API. :param get_revoc_ref_def_response: response of GET_REVOC_REG_DEF request. :return: Revocation Registry Definition Id and Revocation Registry Definition json. { "id": string - ID of the Revocation Registry, "revocDefType": string - Revocation Registry type (only CL_ACCUM is supported for now), "tag": string - Unique descriptive ID of the Registry, "credDefId": string - ID of the corresponding CredentialDefinition, "value": Registry-specific data { "issuanceType": string - Type of Issuance(ISSUANCE_BY_DEFAULT or ISSUANCE_ON_DEMAND), "maxCredNum": number - Maximum number of credentials the Registry can serve. "tailsHash": string - Hash of tails. "tailsLocation": string - Location of tails file. "publicKeys": <public_keys> - Registry's public key. }, "ver": string - version of revocation registry definition json. } """ logger = logging.getLogger(__name__) logger.debug("parse_get_revoc_reg_def_response: >>> get_revoc_ref_def_response: %r", get_revoc_ref_def_response) if not hasattr(parse_get_revoc_reg_def_response, "cb"): logger.debug("parse_get_revoc_reg_def_response: Creating callback") parse_get_revoc_reg_def_response.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_get_revoc_ref_def_response = c_char_p(get_revoc_ref_def_response.encode('utf-8')) (revoc_reg_def_id, revoc_reg_def_json) = await do_call('indy_parse_get_revoc_reg_def_response', c_get_revoc_ref_def_response, parse_get_revoc_reg_def_response.cb) res = (revoc_reg_def_id.decode(), revoc_reg_def_json.decode()) logger.debug("parse_get_revoc_reg_def_response: <<< res: %r", res) return res
[ "async", "def", "parse_get_revoc_reg_def_response", "(", "get_revoc_ref_def_response", ":", "str", ")", "->", "(", "str", ",", "str", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"parse_get_revoc_reg_...
Parse a GET_REVOC_REG_DEF response to get Revocation Registry Definition in the format compatible with Anoncreds API. :param get_revoc_ref_def_response: response of GET_REVOC_REG_DEF request. :return: Revocation Registry Definition Id and Revocation Registry Definition json. { "id": string - ID of the Revocation Registry, "revocDefType": string - Revocation Registry type (only CL_ACCUM is supported for now), "tag": string - Unique descriptive ID of the Registry, "credDefId": string - ID of the corresponding CredentialDefinition, "value": Registry-specific data { "issuanceType": string - Type of Issuance(ISSUANCE_BY_DEFAULT or ISSUANCE_ON_DEMAND), "maxCredNum": number - Maximum number of credentials the Registry can serve. "tailsHash": string - Hash of tails. "tailsLocation": string - Location of tails file. "publicKeys": <public_keys> - Registry's public key. }, "ver": string - version of revocation registry definition json. }
[ "Parse", "a", "GET_REVOC_REG_DEF", "response", "to", "get", "Revocation", "Registry", "Definition", "in", "the", "format", "compatible", "with", "Anoncreds", "API", "." ]
python
train
pkgw/pwkit
pwkit/lmmin.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L2511-L2529
def _lmder1_rosenbrock(): """Rosenbrock function (lmder test #4)""" def func(params, vec): vec[0] = 10 * (params[1] - params[0]**2) vec[1] = 1 - params[0] def jac(params, jac): jac[0,0] = -20 * params[0] jac[0,1] = -1 jac[1,0] = 10 jac[1,1] = 0 guess = np.asfarray([-1.2, 1]) norm1s = [0.491934955050e+01, 0.134006305822e+04, 0.1430000511923e+06] for i in range(3): _lmder1_driver(2, func, jac, guess * 10**i, norm1s[i], 0, [1, 1])
[ "def", "_lmder1_rosenbrock", "(", ")", ":", "def", "func", "(", "params", ",", "vec", ")", ":", "vec", "[", "0", "]", "=", "10", "*", "(", "params", "[", "1", "]", "-", "params", "[", "0", "]", "**", "2", ")", "vec", "[", "1", "]", "=", "1"...
Rosenbrock function (lmder test #4)
[ "Rosenbrock", "function", "(", "lmder", "test", "#4", ")" ]
python
train
kislyuk/aegea
aegea/packages/github3/repos/release.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/release.py#L169-L197
def download(self, path=''): """Download the data for this asset. :param path: (optional), path where the file should be saved to, default is the filename provided in the headers and will be written in the current directory. it can take a file-like object as well :type path: str, file :returns: bool -- True if successful, False otherwise """ headers = { 'Accept': 'application/octet-stream' } resp = self._get(self._api, allow_redirects=False, stream=True, headers=headers) if resp.status_code == 302: # Amazon S3 will reject the redirected request unless we omit # certain request headers headers.update({ 'Content-Type': None, }) with self._session.no_auth(): resp = self._get(resp.headers['location'], stream=True, headers=headers) if self._boolean(resp, 200, 404): stream_response_to_file(resp, path) return True return False
[ "def", "download", "(", "self", ",", "path", "=", "''", ")", ":", "headers", "=", "{", "'Accept'", ":", "'application/octet-stream'", "}", "resp", "=", "self", ".", "_get", "(", "self", ".", "_api", ",", "allow_redirects", "=", "False", ",", "stream", ...
Download the data for this asset. :param path: (optional), path where the file should be saved to, default is the filename provided in the headers and will be written in the current directory. it can take a file-like object as well :type path: str, file :returns: bool -- True if successful, False otherwise
[ "Download", "the", "data", "for", "this", "asset", "." ]
python
train
programa-stic/barf-project
barf/arch/x86/translator.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/arch/x86/translator.py#L261-L322
def resolve_memory_access(self, tb, x86_mem_operand): """Return operand memory access translation. """ size = self.__get_memory_access_size(x86_mem_operand) addr = None if x86_mem_operand.base: addr = ReilRegisterOperand(x86_mem_operand.base, size) if x86_mem_operand.index and x86_mem_operand.scale != 0x0: index = ReilRegisterOperand(x86_mem_operand.index, size) scale = ReilImmediateOperand(x86_mem_operand.scale, size) scaled_index = tb.temporal(size) tb.add(tb._builder.gen_mul(index, scale, scaled_index)) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, scaled_index, tmp)) addr = tmp else: addr = scaled_index if x86_mem_operand.displacement != 0x0: disp = ReilImmediateOperand(x86_mem_operand.displacement, size) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, disp, tmp)) addr = tmp else: addr = disp else: if not addr: disp = ReilImmediateOperand(x86_mem_operand.displacement, size) addr = disp # TODO Improve this code and add support for the rest of the segments. if x86_mem_operand.segment in ["gs", "fs"]: seg_base_addr_map = { "gs": "gs_base_addr", "fs": "fs_base_addr", } seg_base = ReilRegisterOperand(seg_base_addr_map[x86_mem_operand.segment], size) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, seg_base, tmp)) addr = tmp else: addr = seg_base return addr
[ "def", "resolve_memory_access", "(", "self", ",", "tb", ",", "x86_mem_operand", ")", ":", "size", "=", "self", ".", "__get_memory_access_size", "(", "x86_mem_operand", ")", "addr", "=", "None", "if", "x86_mem_operand", ".", "base", ":", "addr", "=", "ReilRegis...
Return operand memory access translation.
[ "Return", "operand", "memory", "access", "translation", "." ]
python
train
avelino/bottle-auth
bottle_auth/core/auth.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/auth.py#L142-L146
def require_setting(self, name, feature="this feature"): """Raises an exception if the given app setting is not defined.""" if name not in self.settings: raise Exception("You must define the '%s' setting in your " "application to use %s" % (name, feature))
[ "def", "require_setting", "(", "self", ",", "name", ",", "feature", "=", "\"this feature\"", ")", ":", "if", "name", "not", "in", "self", ".", "settings", ":", "raise", "Exception", "(", "\"You must define the '%s' setting in your \"", "\"application to use %s\"", "...
Raises an exception if the given app setting is not defined.
[ "Raises", "an", "exception", "if", "the", "given", "app", "setting", "is", "not", "defined", "." ]
python
test
orbingol/NURBS-Python
geomdl/helpers.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/helpers.py#L71-L91
def find_span_linear(degree, knot_vector, num_ctrlpts, knot, **kwargs): """ Finds the span of a single knot over the knot vector using linear search. Alternative implementation for the Algorithm A2.1 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param num_ctrlpts: number of control points, :math:`n + 1` :type num_ctrlpts: int :param knot: knot or parameter, :math:`u` :type knot: float :return: knot span :rtype: int """ span = 0 # Knot span index starts from zero while span < num_ctrlpts and knot_vector[span] <= knot: span += 1 return span - 1
[ "def", "find_span_linear", "(", "degree", ",", "knot_vector", ",", "num_ctrlpts", ",", "knot", ",", "*", "*", "kwargs", ")", ":", "span", "=", "0", "# Knot span index starts from zero", "while", "span", "<", "num_ctrlpts", "and", "knot_vector", "[", "span", "]...
Finds the span of a single knot over the knot vector using linear search. Alternative implementation for the Algorithm A2.1 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param num_ctrlpts: number of control points, :math:`n + 1` :type num_ctrlpts: int :param knot: knot or parameter, :math:`u` :type knot: float :return: knot span :rtype: int
[ "Finds", "the", "span", "of", "a", "single", "knot", "over", "the", "knot", "vector", "using", "linear", "search", "." ]
python
train
chrislit/abydos
abydos/corpus/_ngram_corpus.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/corpus/_ngram_corpus.py#L222-L258
def tf(self, term): r"""Return term frequency. Parameters ---------- term : str The term for which to calculate tf Returns ------- float The term frequency (tf) Raises ------ ValueError tf can only calculate the frequency of individual words Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).tf('the') 1.3010299956639813 >>> NGramCorpus(Corpus(tqbf)).tf('fox') 1.0 """ if ' ' in term: raise ValueError( 'tf can only calculate the term frequency of individual words' ) tcount = self.get_count(term) if tcount == 0: return 0.0 return 1 + log10(tcount)
[ "def", "tf", "(", "self", ",", "term", ")", ":", "if", "' '", "in", "term", ":", "raise", "ValueError", "(", "'tf can only calculate the term frequency of individual words'", ")", "tcount", "=", "self", ".", "get_count", "(", "term", ")", "if", "tcount", "==",...
r"""Return term frequency. Parameters ---------- term : str The term for which to calculate tf Returns ------- float The term frequency (tf) Raises ------ ValueError tf can only calculate the frequency of individual words Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).tf('the') 1.3010299956639813 >>> NGramCorpus(Corpus(tqbf)).tf('fox') 1.0
[ "r", "Return", "term", "frequency", "." ]
python
valid
ibis-project/ibis
ibis/mapd/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/mapd/client.py#L473-L483
def create_database(self, name, owner=None): """ Create a new MapD database Parameters ---------- name : string Database name """ statement = ddl.CreateDatabase(name, owner=owner) self._execute(statement)
[ "def", "create_database", "(", "self", ",", "name", ",", "owner", "=", "None", ")", ":", "statement", "=", "ddl", ".", "CreateDatabase", "(", "name", ",", "owner", "=", "owner", ")", "self", ".", "_execute", "(", "statement", ")" ]
Create a new MapD database Parameters ---------- name : string Database name
[ "Create", "a", "new", "MapD", "database" ]
python
train
jeffh/describe
describe/spec/containers.py
https://github.com/jeffh/describe/blob/6a33ffecc3340b57e60bc8a7095521882ff9a156/describe/spec/containers.py#L175-L178
def before(self, context): "Invokes all before functions with context passed to them." run.before_each.execute(context) self._invoke(self._before, context)
[ "def", "before", "(", "self", ",", "context", ")", ":", "run", ".", "before_each", ".", "execute", "(", "context", ")", "self", ".", "_invoke", "(", "self", ".", "_before", ",", "context", ")" ]
Invokes all before functions with context passed to them.
[ "Invokes", "all", "before", "functions", "with", "context", "passed", "to", "them", "." ]
python
train
lago-project/lago
lago/workdir.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/workdir.py#L258-L284
def add_prefix(self, name, *args, **kwargs): """ Adds a new prefix to the workdir. Args: name(str): Name of the new prefix to add *args: args to pass along to the prefix constructor *kwargs: kwargs to pass along to the prefix constructor Returns: The newly created prefix Raises: LagoPrefixAlreadyExistsError: if prefix name already exists in the workdir """ if os.path.exists(self.join(name)): raise LagoPrefixAlreadyExistsError(name, self.path) self.prefixes[name] = self.prefix_class( self.join(name), *args, **kwargs ) self.prefixes[name].initialize() if self.current is None: self.set_current(name) return self.prefixes[name]
[ "def", "add_prefix", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "join", "(", "name", ")", ")", ":", "raise", "LagoPrefixAlreadyExistsError", "(", "name", ...
Adds a new prefix to the workdir. Args: name(str): Name of the new prefix to add *args: args to pass along to the prefix constructor *kwargs: kwargs to pass along to the prefix constructor Returns: The newly created prefix Raises: LagoPrefixAlreadyExistsError: if prefix name already exists in the workdir
[ "Adds", "a", "new", "prefix", "to", "the", "workdir", "." ]
python
train
google/prettytensor
prettytensor/recurrent_networks.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/recurrent_networks.py#L554-L573
def create_sequence_pretty_tensor(sequence_input, shape=None, save_state=True): """Creates a PrettyTensor object for the given sequence. The first dimension is treated as a time-dimension * batch and a default is set for `unroll` and `state_saver`. TODO(eiderman): Remove shape. Args: sequence_input: A SequenceInput or StateSavingSequenceInput shape: The shape of each item in the sequence (including batch). save_state: If true, use the sequence_input's state and save_state methods. Returns: 2 Layers: inputs, targets """ inputs = prettytensor.wrap_sequence(sequence_input.inputs, tensor_shape=shape) targets = prettytensor.wrap_sequence(sequence_input.targets) if save_state: bookkeeper.set_recurrent_state_saver(sequence_input) return inputs, targets
[ "def", "create_sequence_pretty_tensor", "(", "sequence_input", ",", "shape", "=", "None", ",", "save_state", "=", "True", ")", ":", "inputs", "=", "prettytensor", ".", "wrap_sequence", "(", "sequence_input", ".", "inputs", ",", "tensor_shape", "=", "shape", ")",...
Creates a PrettyTensor object for the given sequence. The first dimension is treated as a time-dimension * batch and a default is set for `unroll` and `state_saver`. TODO(eiderman): Remove shape. Args: sequence_input: A SequenceInput or StateSavingSequenceInput shape: The shape of each item in the sequence (including batch). save_state: If true, use the sequence_input's state and save_state methods. Returns: 2 Layers: inputs, targets
[ "Creates", "a", "PrettyTensor", "object", "for", "the", "given", "sequence", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/models/abstract_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/abstract_state.py#L466-L522
def load_meta_data(self, path=None): """Load meta data of state model from the file system The meta data of the state model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of all state elements (data ports, outcomes, etc) are loaded, as those stored in the same file as the meta data of the state. This is either called on the __init__ of a new state model or if a state model for a container state is created, which then calls load_meta_data for all its children. :param str path: Optional file system path to the meta data file. If not given, the path will be derived from the state's path on the filesystem :return: if meta data file was loaded True otherwise False :rtype: bool """ # TODO: for an Execution state this method is called for each hierarchy level again and again, still?? check it! # print("1AbstractState_load_meta_data: ", path, not path) if not path: path = self.state.file_system_path # print("2AbstractState_load_meta_data: ", path) if path is None: self.meta = Vividict({}) return False path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA) # TODO: Should be removed with next minor release if not os.path.exists(path_meta_data): logger.debug("Because meta data was not found in {0} use backup option {1}" "".format(path_meta_data, os.path.join(path, storage.FILE_NAME_META_DATA_OLD))) path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA_OLD) # TODO use the following logger message to debug meta data load process and to avoid maybe repetitive loads # if not os.path.exists(path_meta_data): # logger.info("path not found {0}".format(path_meta_data)) try: # print("try to load meta data from {0} for state {1}".format(path_meta_data, self.state)) tmp_meta = storage.load_data_file(path_meta_data) except ValueError as e: # if no element which is newly generated log a warning # if os.path.exists(os.path.dirname(path)): # logger.debug("Because '{1}' meta data of {0} was not loaded properly.".format(self, e)) if not path.startswith(constants.RAFCON_TEMP_PATH_STORAGE) and not os.path.exists(os.path.dirname(path)): logger.debug("Because '{1}' meta data of {0} was not loaded properly.".format(self, e)) tmp_meta = {} # JSON returns a dict, which must be converted to a Vividict tmp_meta = Vividict(tmp_meta) if tmp_meta: self._parse_for_element_meta_data(tmp_meta) # assign the meta data to the state self.meta = tmp_meta self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True)) return True else: # print("nothing to parse", tmp_meta) return False
[ "def", "load_meta_data", "(", "self", ",", "path", "=", "None", ")", ":", "# TODO: for an Execution state this method is called for each hierarchy level again and again, still?? check it!", "# print(\"1AbstractState_load_meta_data: \", path, not path)", "if", "not", "path", ":", "pat...
Load meta data of state model from the file system The meta data of the state model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of all state elements (data ports, outcomes, etc) are loaded, as those stored in the same file as the meta data of the state. This is either called on the __init__ of a new state model or if a state model for a container state is created, which then calls load_meta_data for all its children. :param str path: Optional file system path to the meta data file. If not given, the path will be derived from the state's path on the filesystem :return: if meta data file was loaded True otherwise False :rtype: bool
[ "Load", "meta", "data", "of", "state", "model", "from", "the", "file", "system" ]
python
train
sergiocorreia/panflute
panflute/tools.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L173-L207
def stringify(element, newlines=True): """ Return the raw text version of an elements (and its children element). Example: >>> from panflute import * >>> e1 = Emph(Str('Hello'), Space, Str('world!')) >>> e2 = Strong(Str('Bye!')) >>> para = Para(e1, Space, e2) >>> stringify(para) 'Hello world! Bye!\n\n' :param newlines: add a new line after a paragraph (default True) :type newlines: :class:`bool` :rtype: :class:`str` """ def attach_str(e, doc, answer): if hasattr(e, 'text'): ans = e.text elif isinstance(e, HorizontalSpaces): ans = ' ' elif isinstance(e, VerticalSpaces) and newlines: ans = '\n\n' elif type(e) == Citation: ans = '' else: ans = '' answer.append(ans) answer = [] f = partial(attach_str, answer=answer) element.walk(f) return ''.join(answer)
[ "def", "stringify", "(", "element", ",", "newlines", "=", "True", ")", ":", "def", "attach_str", "(", "e", ",", "doc", ",", "answer", ")", ":", "if", "hasattr", "(", "e", ",", "'text'", ")", ":", "ans", "=", "e", ".", "text", "elif", "isinstance", ...
Return the raw text version of an elements (and its children element). Example: >>> from panflute import * >>> e1 = Emph(Str('Hello'), Space, Str('world!')) >>> e2 = Strong(Str('Bye!')) >>> para = Para(e1, Space, e2) >>> stringify(para) 'Hello world! Bye!\n\n' :param newlines: add a new line after a paragraph (default True) :type newlines: :class:`bool` :rtype: :class:`str`
[ "Return", "the", "raw", "text", "version", "of", "an", "elements", "(", "and", "its", "children", "element", ")", "." ]
python
train
apache/incubator-heron
heron/tools/ui/src/python/handlers/api/topology.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/ui/src/python/handlers/api/topology.py#L148-L167
def get(self, cluster, environ, topology): ''' :param cluster: :param environ: :param topology: :return: ''' start_time = time.time() pplan = yield access.get_physical_plan(cluster, environ, topology) result_map = dict( status="success", message="", version=common.VERSION, executiontime=time.time() - start_time, result=pplan ) self.write(result_map)
[ "def", "get", "(", "self", ",", "cluster", ",", "environ", ",", "topology", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "pplan", "=", "yield", "access", ".", "get_physical_plan", "(", "cluster", ",", "environ", ",", "topology", ")", "re...
:param cluster: :param environ: :param topology: :return:
[ ":", "param", "cluster", ":", ":", "param", "environ", ":", ":", "param", "topology", ":", ":", "return", ":" ]
python
valid
facebook/watchman
python/pywatchman_aio/__init__.py
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman_aio/__init__.py#L291-L297
def close(self): """Close the underlying connection.""" self._closed = True if self.receive_task: self.receive_task.cancel() if self.connection: self.connection.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "_closed", "=", "True", "if", "self", ".", "receive_task", ":", "self", ".", "receive_task", ".", "cancel", "(", ")", "if", "self", ".", "connection", ":", "self", ".", "connection", ".", "close", "...
Close the underlying connection.
[ "Close", "the", "underlying", "connection", "." ]
python
train
gem/oq-engine
openquake/calculators/base.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L65-L79
def fix_ones(pmap): """ Physically, an extremely small intensity measure level can have an extremely large probability of exceedence, however that probability cannot be exactly 1 unless the level is exactly 0. Numerically, the PoE can be 1 and this give issues when calculating the damage (there is a log(0) in :class:`openquake.risklib.scientific.annual_frequency_of_exceedence`). Here we solve the issue by replacing the unphysical probabilities 1 with .9999999999999999 (the float64 closest to 1). """ for sid in pmap: array = pmap[sid].array array[array == 1.] = .9999999999999999 return pmap
[ "def", "fix_ones", "(", "pmap", ")", ":", "for", "sid", "in", "pmap", ":", "array", "=", "pmap", "[", "sid", "]", ".", "array", "array", "[", "array", "==", "1.", "]", "=", ".9999999999999999", "return", "pmap" ]
Physically, an extremely small intensity measure level can have an extremely large probability of exceedence, however that probability cannot be exactly 1 unless the level is exactly 0. Numerically, the PoE can be 1 and this give issues when calculating the damage (there is a log(0) in :class:`openquake.risklib.scientific.annual_frequency_of_exceedence`). Here we solve the issue by replacing the unphysical probabilities 1 with .9999999999999999 (the float64 closest to 1).
[ "Physically", "an", "extremely", "small", "intensity", "measure", "level", "can", "have", "an", "extremely", "large", "probability", "of", "exceedence", "however", "that", "probability", "cannot", "be", "exactly", "1", "unless", "the", "level", "is", "exactly", ...
python
train
pyrogram/pyrogram
pyrogram/client/types/messages_and_media/message.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/types/messages_and_media/message.py#L652-L727
def reply( self, text: str, quote: bool = None, parse_mode: str = "", disable_web_page_preview: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup=None ) -> "Message": """Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>` """ if quote is None: quote = self.chat.type != "private" if reply_to_message_id is None and quote: reply_to_message_id = self.message_id return self._client.send_message( chat_id=self.chat.id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup )
[ "def", "reply", "(", "self", ",", "text", ":", "str", ",", "quote", ":", "bool", "=", "None", ",", "parse_mode", ":", "str", "=", "\"\"", ",", "disable_web_page_preview", ":", "bool", "=", "None", ",", "disable_notification", ":", "bool", "=", "None", ...
Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>`
[ "Bound", "method", "*", "reply", "*", "of", ":", "obj", ":", "Message", "<pyrogram", ".", "Message", ">", "." ]
python
train
pyapi-gitlab/pyapi-gitlab
gitlab/__init__.py
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L2175-L2193
def editlabel(self, project_id, name, new_name=None, color=None): """ Updates an existing label with new name or now color. At least one parameter is required, to update the label. :param project_id: The ID of a project :param name: The name of the label :return: True if succeed """ data = {'name': name, 'new_name': new_name, 'color': color} request = requests.put( '{0}/{1}/labels'.format(self.projects_url, project_id), data=data, verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
[ "def", "editlabel", "(", "self", ",", "project_id", ",", "name", ",", "new_name", "=", "None", ",", "color", "=", "None", ")", ":", "data", "=", "{", "'name'", ":", "name", ",", "'new_name'", ":", "new_name", ",", "'color'", ":", "color", "}", "reque...
Updates an existing label with new name or now color. At least one parameter is required, to update the label. :param project_id: The ID of a project :param name: The name of the label :return: True if succeed
[ "Updates", "an", "existing", "label", "with", "new", "name", "or", "now", "color", ".", "At", "least", "one", "parameter", "is", "required", "to", "update", "the", "label", "." ]
python
train
cocaine/cocaine-tools
cocaine/tools/actions/mql/__init__.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/actions/mql/__init__.py#L186-L205
def expr(self): """ expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | const | LPAREN expr RPAREN func ::= lit LPAREN expr (,expr)* RPAREN lit ::= alphanum """ node = self.term() while True: token = self.current_token if token.type == AND: self.eat(AND) elif token.type == OR: self.eat(OR) else: break node = Op(op=token, children=[node, self.term()]) return node
[ "def", "expr", "(", "self", ")", ":", "node", "=", "self", ".", "term", "(", ")", "while", "True", ":", "token", "=", "self", ".", "current_token", "if", "token", ".", "type", "==", "AND", ":", "self", ".", "eat", "(", "AND", ")", "elif", "token"...
expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | const | LPAREN expr RPAREN func ::= lit LPAREN expr (,expr)* RPAREN lit ::= alphanum
[ "expr", "::", "=", "term", "((", "AND", "|", "OR", ")", "term", ")", "*", "term", "::", "=", "factor", "((", "EQ", "|", "NE", ")", "factor", ")", "*", "factor", "::", "=", "func", "|", "const", "|", "LPAREN", "expr", "RPAREN", "func", "::", "="...
python
train
yychen/twd97
twd97/converter.py
https://github.com/yychen/twd97/blob/2fe05dbca335be425a1f451e0ef8f210ec864de1/twd97/converter.py#L80-L86
def tomindecstr(origin): """ Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM.MMMM' """ degrees, minutes = tomindec(origin) return u'%d°%f\'' % (degrees, minutes)
[ "def", "tomindecstr", "(", "origin", ")", ":", "degrees", ",", "minutes", "=", "tomindec", "(", "origin", ")", "return", "u'%d°%f\\'' ", " ", "d", "egrees,", " ", "inutes)", "" ]
Convert [+/-]DDD.DDDDD to [+/-]DDD°MMM.MMMM'
[ "Convert", "[", "+", "/", "-", "]", "DDD", ".", "DDDDD", "to", "[", "+", "/", "-", "]", "DDD°MMM", ".", "MMMM" ]
python
train
thomasjiangcy/django-rest-mock
rest_mock_server/builder.py
https://github.com/thomasjiangcy/django-rest-mock/blob/09e91de20d1a5efd5c47c6e3d7fe979443012e2c/rest_mock_server/builder.py#L177-L245
def build(port=8000, fixtures=None): """ Builds a server file. 1. Extract mock response details from all valid docstrings in existing views 2. Parse and generate mock values 3. Create a store of all endpoints and data 4. Construct server file """ extractor = Extractor() parser = Parser(extractor.url_details, fixtures) parser.parse() url_details = parser.results _store = get_store(url_details) store = json.dumps(_store) variables = str(Variable('let', 'store', store)) functions = DATA_FINDER + GET_HANDLER + MODIFY_HANDLER + POST_HANDLER endpoints = [] endpoint_uris = [] for u in parser.results: endpoint = Endpoint() if u['method'].lower() in ['get', 'post']: method = u['method'].lower() else: method = 'modify' response = str(ResponseBody(method)) # Check in store if the base url has individual instances u['url'], list_url = clean_url(u['full_url'], _store, u['method'].lower()) if list_url is not None and u['method'].lower() == 'get': list_endpoint = Endpoint() list_endpoint.construct('get', list_url, response) if str(list_endpoint) not in endpoints: endpoints.append(str(list_endpoint)) if list_endpoint.uri not in endpoint_uris: endpoint_uris.append(list_endpoint.uri) if method == 'modify': without_prefix = re.sub(r'\/(\w+)\_\_', '', u['url']) for k, v in _store.items(): if without_prefix in k: options = v.get('options', '{}') options = ast.literal_eval(options) modifiers = [] if options is not None: modifiers = options.get('modifiers', []) if modifiers: for mod in modifiers: if u['method'].lower() == mod: mod_endpoint = Endpoint() uri = without_prefix if v.get('position') is not None and v['position'] == 'url': uri = re.sub(r'\/?\_\_key', '/:id', u['full_url']) mod_endpoint.construct(u['method'].lower(), uri, response) if str(mod_endpoint) not in endpoints: endpoints.append(str(mod_endpoint)) if mod_endpoint.uri not in endpoint_uris: endpoint_uris.append(mod_endpoint.uri) else: endpoint.construct(u['method'], u['url'], response) if str(endpoint) not in endpoints: endpoints.append(str(endpoint)) if endpoint.uri not in endpoint_uris: endpoint_uris.append(endpoint.uri) endpoints = ''.join(endpoints) express = ExpressServer() express.construct(variables, functions, endpoints, port) return express
[ "def", "build", "(", "port", "=", "8000", ",", "fixtures", "=", "None", ")", ":", "extractor", "=", "Extractor", "(", ")", "parser", "=", "Parser", "(", "extractor", ".", "url_details", ",", "fixtures", ")", "parser", ".", "parse", "(", ")", "url_detai...
Builds a server file. 1. Extract mock response details from all valid docstrings in existing views 2. Parse and generate mock values 3. Create a store of all endpoints and data 4. Construct server file
[ "Builds", "a", "server", "file", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L689-L699
def stop_request(self, stop_now=False): """Send a stop request to the daemon :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: the daemon response (True) """ logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now) res = self.con.get('stop_request', {'stop_now': '1' if stop_now else '0'}) return res
[ "def", "stop_request", "(", "self", ",", "stop_now", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Sending stop request to %s, stop now: %s\"", ",", "self", ".", "name", ",", "stop_now", ")", "res", "=", "self", ".", "con", ".", "get", "(", "'sto...
Send a stop request to the daemon :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: the daemon response (True)
[ "Send", "a", "stop", "request", "to", "the", "daemon" ]
python
train
trentm/cmdln
bin/mkmanpage.py
https://github.com/trentm/cmdln/blob/55e980cf52c9b03e62d2349a7e62c9101d08ae10/bin/mkmanpage.py#L18-L24
def mkmanpage(name): """Return man page content for the given `cmdln.Cmdln` subclass name.""" mod_name, class_name = name.rsplit('.', 1) mod = __import__(mod_name) inst = getattr(mod, class_name)() sections = cmdln.man_sections_from_cmdln(inst) sys.stdout.write(''.join(sections))
[ "def", "mkmanpage", "(", "name", ")", ":", "mod_name", ",", "class_name", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "mod", "=", "__import__", "(", "mod_name", ")", "inst", "=", "getattr", "(", "mod", ",", "class_name", ")", "(", ")", "...
Return man page content for the given `cmdln.Cmdln` subclass name.
[ "Return", "man", "page", "content", "for", "the", "given", "cmdln", ".", "Cmdln", "subclass", "name", "." ]
python
train
nabetama/slacky
slacky/rest/rest.py
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L377-L385
def info(self, file, **kwargs): """ https://slack.com/api/files.info """ self.params.update({ 'file': file, }) if kwargs: self.params.update(kwargs) return FromUrl('https://slack.com/api/files.info', self._requests)(data=self.params).get()
[ "def", "info", "(", "self", ",", "file", ",", "*", "*", "kwargs", ")", ":", "self", ".", "params", ".", "update", "(", "{", "'file'", ":", "file", ",", "}", ")", "if", "kwargs", ":", "self", ".", "params", ".", "update", "(", "kwargs", ")", "re...
https://slack.com/api/files.info
[ "https", ":", "//", "slack", ".", "com", "/", "api", "/", "files", ".", "info" ]
python
train
Cornices/cornice.ext.swagger
examples/minimalist.py
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/examples/minimalist.py#L46-L51
def set_value(request): """Set the value and returns *True* or *False*.""" key = request.matchdict['key'] _VALUES[key] = request.json_body return _VALUES.get(key)
[ "def", "set_value", "(", "request", ")", ":", "key", "=", "request", ".", "matchdict", "[", "'key'", "]", "_VALUES", "[", "key", "]", "=", "request", ".", "json_body", "return", "_VALUES", ".", "get", "(", "key", ")" ]
Set the value and returns *True* or *False*.
[ "Set", "the", "value", "and", "returns", "*", "True", "*", "or", "*", "False", "*", "." ]
python
valid
aaugustin/websockets
src/websockets/headers.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/headers.py#L207-L221
def parse_upgrade_protocol( header: str, pos: int, header_name: str ) -> Tuple[UpgradeProtocol, int]: """ Parse an Upgrade protocol from ``header`` at the given position. Return the protocol value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. """ match = _protocol_re.match(header, pos) if match is None: raise InvalidHeaderFormat(header_name, "expected protocol", header, pos) return cast(UpgradeProtocol, match.group()), match.end()
[ "def", "parse_upgrade_protocol", "(", "header", ":", "str", ",", "pos", ":", "int", ",", "header_name", ":", "str", ")", "->", "Tuple", "[", "UpgradeProtocol", ",", "int", "]", ":", "match", "=", "_protocol_re", ".", "match", "(", "header", ",", "pos", ...
Parse an Upgrade protocol from ``header`` at the given position. Return the protocol value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
[ "Parse", "an", "Upgrade", "protocol", "from", "header", "at", "the", "given", "position", "." ]
python
train
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/admin.py
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/admin.py#L106-L114
def get_reporters(self): """ Converts the report_generators list to a dictionary, and caches the result. :return: A dictionary with such references. """ if not hasattr(self, '_report_generators_by_key'): self._report_generators_by_key = {r.key: r for r in self.report_generators} return self._report_generators_by_key
[ "def", "get_reporters", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_report_generators_by_key'", ")", ":", "self", ".", "_report_generators_by_key", "=", "{", "r", ".", "key", ":", "r", "for", "r", "in", "self", ".", "report_generato...
Converts the report_generators list to a dictionary, and caches the result. :return: A dictionary with such references.
[ "Converts", "the", "report_generators", "list", "to", "a", "dictionary", "and", "caches", "the", "result", ".", ":", "return", ":", "A", "dictionary", "with", "such", "references", "." ]
python
train
Gandi/gandi.cli
gandi/cli/commands/mail.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/mail.py#L140-L158
def purge(gandi, email, background, force, alias): """Purge a mailbox.""" login, domain = email if alias: if not force: proceed = click.confirm('Are you sure to purge all aliases for ' 'mailbox %s@%s ?' % (login, domain)) if not proceed: return result = gandi.mail.set_alias(domain, login, []) else: if not force: proceed = click.confirm('Are you sure to purge mailbox %s@%s ?' % (login, domain)) if not proceed: return result = gandi.mail.purge(domain, login, background) return result
[ "def", "purge", "(", "gandi", ",", "email", ",", "background", ",", "force", ",", "alias", ")", ":", "login", ",", "domain", "=", "email", "if", "alias", ":", "if", "not", "force", ":", "proceed", "=", "click", ".", "confirm", "(", "'Are you sure to pu...
Purge a mailbox.
[ "Purge", "a", "mailbox", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/inner.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/inner.py#L531-L578
def build_inside(input_method, input_args=None, substitutions=None): """ use requested input plugin to load configuration and then initiate build """ def process_keyvals(keyvals): """ ["key=val", "x=y"] -> {"key": "val", "x": "y"} """ keyvals = keyvals or [] processed_keyvals = {} for arg in keyvals: key, value = arg.split("=", 1) processed_keyvals[key] = value return processed_keyvals main = __name__.split('.', 1)[0] log_encoding = get_logging_encoding(main) logger.info("log encoding: %s", log_encoding) if not input_method: raise RuntimeError("No input method specified!") logger.debug("getting build json from input %s", input_method) cleaned_input_args = process_keyvals(input_args) cleaned_input_args['substitutions'] = process_keyvals(substitutions) input_runner = InputPluginsRunner([{'name': input_method, 'args': cleaned_input_args}]) build_json = input_runner.run()[input_method] if isinstance(build_json, Exception): raise RuntimeError("Input plugin raised exception: {}".format(build_json)) logger.debug("build json: %s", build_json) if not build_json: raise RuntimeError("No valid build json!") if not isinstance(build_json, dict): raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json)) dbw = DockerBuildWorkflow(**build_json) try: build_result = dbw.build_docker_image() except Exception as e: logger.error('image build failed: %s', e) raise else: if not build_result or build_result.is_failed(): raise RuntimeError("no image built") else: logger.info("build has finished successfully \\o/")
[ "def", "build_inside", "(", "input_method", ",", "input_args", "=", "None", ",", "substitutions", "=", "None", ")", ":", "def", "process_keyvals", "(", "keyvals", ")", ":", "\"\"\" [\"key=val\", \"x=y\"] -> {\"key\": \"val\", \"x\": \"y\"} \"\"\"", "keyvals", "=", "keyv...
use requested input plugin to load configuration and then initiate build
[ "use", "requested", "input", "plugin", "to", "load", "configuration", "and", "then", "initiate", "build" ]
python
train
timknip/pyswf
swf/export.py
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/export.py#L879-L886
def export(self, swf, frame, **export_opts): """ Exports a frame of the specified SWF to SVG. @param swf The SWF. @param frame Which frame to export, by 0-based index (int) """ self.wanted_frame = frame return super(FrameSVGExporterMixin, self).export(swf, *export_opts)
[ "def", "export", "(", "self", ",", "swf", ",", "frame", ",", "*", "*", "export_opts", ")", ":", "self", ".", "wanted_frame", "=", "frame", "return", "super", "(", "FrameSVGExporterMixin", ",", "self", ")", ".", "export", "(", "swf", ",", "*", "export_o...
Exports a frame of the specified SWF to SVG. @param swf The SWF. @param frame Which frame to export, by 0-based index (int)
[ "Exports", "a", "frame", "of", "the", "specified", "SWF", "to", "SVG", "." ]
python
train
google/tangent
tangent/optimization.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/optimization.py#L62-L88
def dead_code_elimination(node): """Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST. """ to_remove = set(def_[1] for def_ in annotate.unused(node) if not isinstance(def_[1], (gast.arguments, gast.For))) for n in list(to_remove): for succ in gast.walk(n): if anno.getanno(succ, 'push', False): to_remove.add(anno.getanno(succ, 'push')) transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
[ "def", "dead_code_elimination", "(", "node", ")", ":", "to_remove", "=", "set", "(", "def_", "[", "1", "]", "for", "def_", "in", "annotate", ".", "unused", "(", "node", ")", "if", "not", "isinstance", "(", "def_", "[", "1", "]", ",", "(", "gast", "...
Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST.
[ "Perform", "a", "simple", "form", "of", "dead", "code", "elimination", "on", "a", "Python", "AST", "." ]
python
train
racitup/static-ranges
static_ranges.py
https://github.com/racitup/static-ranges/blob/a15c2e2bd6f643279ae046494b8714634dd380a4/static_ranges.py#L125-L136
def convert_ranges(cls, ranges, length): """Converts to valid byte ranges""" result = [] for start, end in ranges: if end is None: result.append( (start, length-1) ) elif start is None: s = length - end result.append( (0 if s < 0 else s, length-1) ) else: result.append( (start, end if end < length else length-1) ) return result
[ "def", "convert_ranges", "(", "cls", ",", "ranges", ",", "length", ")", ":", "result", "=", "[", "]", "for", "start", ",", "end", "in", "ranges", ":", "if", "end", "is", "None", ":", "result", ".", "append", "(", "(", "start", ",", "length", "-", ...
Converts to valid byte ranges
[ "Converts", "to", "valid", "byte", "ranges" ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/alias.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/alias.py#L150-L155
def soft_define_alias(self, name, cmd): """Define an alias, but don't raise on an AliasError.""" try: self.define_alias(name, cmd) except AliasError, e: error("Invalid alias: %s" % e)
[ "def", "soft_define_alias", "(", "self", ",", "name", ",", "cmd", ")", ":", "try", ":", "self", ".", "define_alias", "(", "name", ",", "cmd", ")", "except", "AliasError", ",", "e", ":", "error", "(", "\"Invalid alias: %s\"", "%", "e", ")" ]
Define an alias, but don't raise on an AliasError.
[ "Define", "an", "alias", "but", "don", "t", "raise", "on", "an", "AliasError", "." ]
python
test
rgmining/ria
ria/bipartite.py
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L412-L429
def retrieve_reviewers(self, product): """Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product))
[ "def", "retrieve_reviewers", "(", "self", ",", "product", ")", ":", "if", "not", "isinstance", "(", "product", ",", "self", ".", "_product_cls", ")", ":", "raise", "TypeError", "(", "\"Type of given product isn't acceptable:\"", ",", "product", ",", "\", expected:...
Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed.
[ "Retrieve", "reviewers", "who", "reviewed", "a", "given", "product", "." ]
python
train
gem/oq-engine
openquake/hazardlib/geo/mesh.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/mesh.py#L297-L317
def _get_proj_convex_hull(self): """ Create a projection centered in the center of this mesh and define a convex polygon in that projection, enveloping all the points of the mesh. :returns: Tuple of two items: projection function and shapely 2d polygon. Note that the result geometry can be line or point depending on number of points in the mesh and their arrangement. """ # create a projection centered in the center of points collection proj = geo_utils.OrthographicProjection( *geo_utils.get_spherical_bounding_box(self.lons, self.lats)) # project all the points and create a shapely multipoint object. # need to copy an array because otherwise shapely misinterprets it coords = numpy.transpose(proj(self.lons.flat, self.lats.flat)).copy() multipoint = shapely.geometry.MultiPoint(coords) # create a 2d polygon from a convex hull around that multipoint return proj, multipoint.convex_hull
[ "def", "_get_proj_convex_hull", "(", "self", ")", ":", "# create a projection centered in the center of points collection", "proj", "=", "geo_utils", ".", "OrthographicProjection", "(", "*", "geo_utils", ".", "get_spherical_bounding_box", "(", "self", ".", "lons", ",", "s...
Create a projection centered in the center of this mesh and define a convex polygon in that projection, enveloping all the points of the mesh. :returns: Tuple of two items: projection function and shapely 2d polygon. Note that the result geometry can be line or point depending on number of points in the mesh and their arrangement.
[ "Create", "a", "projection", "centered", "in", "the", "center", "of", "this", "mesh", "and", "define", "a", "convex", "polygon", "in", "that", "projection", "enveloping", "all", "the", "points", "of", "the", "mesh", "." ]
python
train
panzarino/mlbgame
mlbgame/game.py
https://github.com/panzarino/mlbgame/blob/0a2d10540de793fdc3b8476aa18f5cf3b53d0b54/mlbgame/game.py#L332-L364
def overview(game_id): """Gets the overview information for the game with matching id.""" output = {} # get data overview = mlbgame.data.get_overview(game_id) # parse data overview_root = etree.parse(overview).getroot() try: output = add_raw_box_score_attributes(output, game_id) except ValueError: pass # get overview attributes for x in overview_root.attrib: output[x] = overview_root.attrib[x] # Get probable starter attributes if they exist home_pitcher_tree = overview_root.find('home_probable_pitcher') if home_pitcher_tree is not None: output.update(build_namespaced_attributes( 'home_probable_pitcher', home_pitcher_tree)) else: output.update(build_probable_starter_defaults('home')) away_pitcher_tree = overview_root.find('away_probable_pitcher') if away_pitcher_tree is not None: output.update(build_namespaced_attributes( 'away_probable_pitcher', away_pitcher_tree)) else: output.update(build_probable_starter_defaults('away')) return output
[ "def", "overview", "(", "game_id", ")", ":", "output", "=", "{", "}", "# get data", "overview", "=", "mlbgame", ".", "data", ".", "get_overview", "(", "game_id", ")", "# parse data", "overview_root", "=", "etree", ".", "parse", "(", "overview", ")", ".", ...
Gets the overview information for the game with matching id.
[ "Gets", "the", "overview", "information", "for", "the", "game", "with", "matching", "id", "." ]
python
train
gagneurlab/concise
concise/legacy/concise.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1062-L1098
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object. """ # convert the output into a proper form obj_dict['output'] = helper.rec_dict_to_numpy_dict(obj_dict["output"]) helper.dict_to_numpy_dict(obj_dict['output']) if "trained_global_model" in obj_dict.keys(): raise Exception("Found trained_global_model feature in dictionary. Use ConciseCV.load to load this file.") dc = Concise(**obj_dict["param"]) # touch the hidden arguments dc._param = obj_dict["param"] if obj_dict["output"]["weights"] is None: dc._model_fitted = False else: dc._model_fitted = True dc._exec_time = obj_dict["execution_time"] dc.unused_param = obj_dict["unused_param"] dc._accuracy = obj_dict["output"]["accuracy"] dc._splines = obj_dict["output"]["splines"] weights = obj_dict["output"]["weights"] if weights is not None: # fix the dimensionality of X_feat in case it was 0 dimensional if weights["feature_weights"].shape == (0,): weights["feature_weights"].shape = (0, obj_dict["param"]["num_tasks"]) dc._set_var_res(weights) return dc
[ "def", "from_dict", "(", "cls", ",", "obj_dict", ")", ":", "# convert the output into a proper form", "obj_dict", "[", "'output'", "]", "=", "helper", ".", "rec_dict_to_numpy_dict", "(", "obj_dict", "[", "\"output\"", "]", ")", "helper", ".", "dict_to_numpy_dict", ...
Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object.
[ "Load", "the", "object", "from", "a", "dictionary", "(", "produced", "with", ":", "py", ":", "func", ":", "Concise", ".", "to_dict", ")" ]
python
train
sdispater/orator
orator/migrations/migration_creator.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/migrations/migration_creator.py#L21-L50
def create(self, name, path, table=None, create=False): """ Create a new migration at the given path. :param name: The name of the migration :type name: str :param path: The path of the migrations :type path: str :param table: The table name :type table: str :param create: Whether it's a create migration or not :type create: bool :rtype: str """ path = self._get_path(name, path) if not os.path.exists(os.path.dirname(path)): mkdir_p(os.path.dirname(path)) parent = os.path.join(os.path.dirname(path), "__init__.py") if not os.path.exists(parent): with open(parent, "w"): pass stub = self._get_stub(table, create) with open(path, "w") as fh: fh.write(self._populate_stub(name, stub, table)) return path
[ "def", "create", "(", "self", ",", "name", ",", "path", ",", "table", "=", "None", ",", "create", "=", "False", ")", ":", "path", "=", "self", ".", "_get_path", "(", "name", ",", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", ...
Create a new migration at the given path. :param name: The name of the migration :type name: str :param path: The path of the migrations :type path: str :param table: The table name :type table: str :param create: Whether it's a create migration or not :type create: bool :rtype: str
[ "Create", "a", "new", "migration", "at", "the", "given", "path", "." ]
python
train
devopshq/youtrack
youtrack/connection.py
https://github.com/devopshq/youtrack/blob/c4ec19aca253ae30ac8eee7976a2f330e480a73b/youtrack/connection.py#L312-L315
def get_user(self, login): """ http://confluence.jetbrains.net/display/YTD2/GET+user """ return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self)
[ "def", "get_user", "(", "self", ",", "login", ")", ":", "return", "youtrack", ".", "User", "(", "self", ".", "_get", "(", "\"/admin/user/\"", "+", "urlquote", "(", "login", ".", "encode", "(", "'utf8'", ")", ")", ")", ",", "self", ")" ]
http://confluence.jetbrains.net/display/YTD2/GET+user
[ "http", ":", "//", "confluence", ".", "jetbrains", ".", "net", "/", "display", "/", "YTD2", "/", "GET", "+", "user" ]
python
train
fulfilio/python-magento
magento/sales.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/sales.py#L418-L432
def addcomment(self, invoice_increment_id, comment=None, email=False, include_comment=False): """ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID """ if comment is None: comment = "" return bool( self.call( 'sales_order_invoice.addComment', [invoice_increment_id, comment, email, include_comment] ) )
[ "def", "addcomment", "(", "self", ",", "invoice_increment_id", ",", "comment", "=", "None", ",", "email", "=", "False", ",", "include_comment", "=", "False", ")", ":", "if", "comment", "is", "None", ":", "comment", "=", "\"\"", "return", "bool", "(", "se...
Add comment to invoice or change its state :param invoice_increment_id: Invoice ID
[ "Add", "comment", "to", "invoice", "or", "change", "its", "state" ]
python
train
trendels/rhino
rhino/mapper.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L405-L413
def add_property(self, name, fn, cached=True): """Adds a property to the Context. See `Mapper.add_ctx_property`, which uses this method to install the properties added on the Mapper level. """ if name in self.__properties: raise KeyError("Trying to add a property '%s' that already exists on this %s object." % (name, self.__class__.__name__)) self.__properties[name] = (fn, cached)
[ "def", "add_property", "(", "self", ",", "name", ",", "fn", ",", "cached", "=", "True", ")", ":", "if", "name", "in", "self", ".", "__properties", ":", "raise", "KeyError", "(", "\"Trying to add a property '%s' that already exists on this %s object.\"", "%", "(", ...
Adds a property to the Context. See `Mapper.add_ctx_property`, which uses this method to install the properties added on the Mapper level.
[ "Adds", "a", "property", "to", "the", "Context", "." ]
python
train
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/holder_prover.py
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L957-L1089
async def create_proof(self, proof_req: dict, creds: dict, requested_creds: dict) -> str: """ Create proof as HolderProver. Raise: * AbsentLinkSecret if link secret not set * CredentialFocus on attempt to create proof on no creds or multiple creds for a credential definition * AbsentTails if missing required tails file * BadRevStateTime if a timestamp for a revocation registry state in the proof request occurs before revocation registry creation * IndyError for any other indy-sdk error. * AbsentInterval if creds missing non-revocation interval, but cred def supports revocation :param proof_req: proof request as per get_creds() above :param creds: credentials to prove :param requested_creds: data structure with self-attested attribute info, requested attribute info and requested predicate info, assembled from get_creds() and filtered for content of interest. I.e., :: { 'self_attested_attributes': {}, 'requested_attributes': { 'attr0_uuid': { 'cred_id': string, 'timestamp': integer, # for revocation state 'revealed': bool }, ... }, 'requested_predicates': { 'predicate0_uuid': { 'cred_id': string, 'timestamp': integer # for revocation state } } } :return: proof json """ LOGGER.debug( 'HolderProver.create_proof >>> proof_req: %s, creds: %s, requested_creds: %s', proof_req, creds, requested_creds) self._assert_link_secret('create_proof') x_uuids = [attr_uuid for attr_uuid in creds['attrs'] if len(creds['attrs'][attr_uuid]) != 1] if x_uuids: LOGGER.debug('HolderProver.create_proof: <!< creds specification out of focus (non-uniqueness)') raise CredentialFocus('Proof request requires unique cred per attribute; violators: {}'.format(x_uuids)) s_id2schema = {} # schema identifier to schema cd_id2cred_def = {} # credential definition identifier to credential definition rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None) rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier for referents in {**creds['attrs'], **creds['predicates']}.values(): interval = referents[0].get('interval', None) cred_info = referents[0]['cred_info'] s_id = cred_info['schema_id'] if s_id not in s_id2schema: schema = json.loads(await self.get_schema(s_id)) # add to cache en passant if not schema: LOGGER.debug( 'HolderProver.create_proof: <!< absent schema %s, proof req may be for another ledger', s_id) raise AbsentSchema( 'Absent schema {}, proof req may be for another ledger'.format(s_id)) s_id2schema[s_id] = schema cd_id = cred_info['cred_def_id'] if cd_id not in cd_id2cred_def: cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant cd_id2cred_def[cd_id] = cred_def rr_id = cred_info['rev_reg_id'] if rr_id: await self._sync_revoc(rr_id) # link tails file to its rr_id if it's new if interval: if rr_id not in rr_id2timestamp: if interval['to'] > int(time()): LOGGER.debug( 'HolderProver.create_proof: <!< interval to %s for rev reg %s is in the future', interval['to'], rr_id) raise BadRevStateTime('Revocation registry {} timestamp {} is in the future'.format( rr_id, interval['to'])) rr_id2timestamp[rr_id] = interval['to'] elif 'revocation' in cd_id2cred_def[cd_id]['value']: LOGGER.debug( 'HolderProver.create_proof: <!< creds on cred def id %s missing non-revocation interval', cd_id) raise AbsentInterval('Creds on cred def id {} missing non-revocation interval'.format(cd_id)) if rr_id in rr_id2cr_id: continue rr_id2cr_id[rr_id] = cred_info['cred_rev_id'] rr_id2rev_state = {} # revocation registry identifier to its state with REVO_CACHE.lock: for rr_id in rr_id2timestamp: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = revo_cache_entry.tails if revo_cache_entry else None if tails is None: # missing tails file LOGGER.debug('HolderProver.create_proof: <!< missing tails file for rev reg id %s', rr_id) raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id)) rr_def_json = await self._get_rev_reg_def(rr_id) (rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json( self._build_rr_delta_json, rr_id2timestamp[rr_id], rr_id2timestamp[rr_id]) rr_state_json = await anoncreds.create_revocation_state( tails.reader_handle, rr_def_json, rr_delta_json, ledger_timestamp, rr_id2cr_id[rr_id]) rr_id2rev_state[rr_id] = { rr_id2timestamp[rr_id]: json.loads(rr_state_json) } rv = await anoncreds.prover_create_proof( self.wallet.handle, json.dumps(proof_req), json.dumps(requested_creds), self._link_secret, json.dumps(s_id2schema), json.dumps(cd_id2cred_def), json.dumps(rr_id2rev_state)) LOGGER.debug('HolderProver.create_proof <<< %s', rv) return rv
[ "async", "def", "create_proof", "(", "self", ",", "proof_req", ":", "dict", ",", "creds", ":", "dict", ",", "requested_creds", ":", "dict", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'HolderProver.create_proof >>> proof_req: %s, creds: %s, requested_creds:...
Create proof as HolderProver. Raise: * AbsentLinkSecret if link secret not set * CredentialFocus on attempt to create proof on no creds or multiple creds for a credential definition * AbsentTails if missing required tails file * BadRevStateTime if a timestamp for a revocation registry state in the proof request occurs before revocation registry creation * IndyError for any other indy-sdk error. * AbsentInterval if creds missing non-revocation interval, but cred def supports revocation :param proof_req: proof request as per get_creds() above :param creds: credentials to prove :param requested_creds: data structure with self-attested attribute info, requested attribute info and requested predicate info, assembled from get_creds() and filtered for content of interest. I.e., :: { 'self_attested_attributes': {}, 'requested_attributes': { 'attr0_uuid': { 'cred_id': string, 'timestamp': integer, # for revocation state 'revealed': bool }, ... }, 'requested_predicates': { 'predicate0_uuid': { 'cred_id': string, 'timestamp': integer # for revocation state } } } :return: proof json
[ "Create", "proof", "as", "HolderProver", "." ]
python
train
mitsei/dlkit
dlkit/services/grading.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/grading.py#L1401-L1409
def use_federated_gradebook_view(self): """Pass through to provider GradeSystemLookupSession.use_federated_gradebook_view""" self._gradebook_view = FEDERATED # self._get_provider_session('grade_system_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_federated_gradebook_view() except AttributeError: pass
[ "def", "use_federated_gradebook_view", "(", "self", ")", ":", "self", ".", "_gradebook_view", "=", "FEDERATED", "# self._get_provider_session('grade_system_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(",...
Pass through to provider GradeSystemLookupSession.use_federated_gradebook_view
[ "Pass", "through", "to", "provider", "GradeSystemLookupSession", ".", "use_federated_gradebook_view" ]
python
train
uw-it-aca/uw-restclients-canvas
uw_canvas/courses.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/courses.py#L98-L106
def update_sis_id(self, course_id, sis_course_id): """ Updates the SIS ID for the course identified by the passed course ID. https://canvas.instructure.com/doc/api/courses.html#method.courses.update """ url = COURSES_API.format(course_id) body = {"course": {"sis_course_id": sis_course_id}} return CanvasCourse(data=self._put_resource(url, body))
[ "def", "update_sis_id", "(", "self", ",", "course_id", ",", "sis_course_id", ")", ":", "url", "=", "COURSES_API", ".", "format", "(", "course_id", ")", "body", "=", "{", "\"course\"", ":", "{", "\"sis_course_id\"", ":", "sis_course_id", "}", "}", "return", ...
Updates the SIS ID for the course identified by the passed course ID. https://canvas.instructure.com/doc/api/courses.html#method.courses.update
[ "Updates", "the", "SIS", "ID", "for", "the", "course", "identified", "by", "the", "passed", "course", "ID", "." ]
python
test
LonamiWebs/Telethon
telethon/sessions/sqlite.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/sessions/sqlite.py#L251-L256
def list_sessions(cls): """Lists all the sessions of the users who have ever connected using this client and never logged out """ return [os.path.splitext(os.path.basename(f))[0] for f in os.listdir('.') if f.endswith(EXTENSION)]
[ "def", "list_sessions", "(", "cls", ")", ":", "return", "[", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "[", "0", "]", "for", "f", "in", "os", ".", "listdir", "(", "'.'", ")", "if", "f", "...
Lists all the sessions of the users who have ever connected using this client and never logged out
[ "Lists", "all", "the", "sessions", "of", "the", "users", "who", "have", "ever", "connected", "using", "this", "client", "and", "never", "logged", "out" ]
python
train
touilleMan/marshmallow-mongoengine
marshmallow_mongoengine/schema.py
https://github.com/touilleMan/marshmallow-mongoengine/blob/21223700ea1f1d0209c967761e5c22635ee721e7/marshmallow_mongoengine/schema.py#L49-L88
def get_declared_fields(mcs, klass, *args, **kwargs): """Updates declared fields with fields converted from the Mongoengine model passed as the `model` class Meta option. """ declared_fields = kwargs.get('dict_class', dict)() # Generate the fields provided through inheritance opts = klass.opts model = getattr(opts, 'model', None) if model: converter = opts.model_converter() declared_fields.update(converter.fields_for_model( model, fields=opts.fields )) # Generate the fields provided in the current class base_fields = super(SchemaMeta, mcs).get_declared_fields( klass, *args, **kwargs ) declared_fields.update(base_fields) # Customize fields with provided kwargs for field_name, field_kwargs in klass.opts.model_fields_kwargs.items(): field = declared_fields.get(field_name, None) if field: # Copy to prevent alteration of a possible parent class's field field = copy.copy(field) for key, value in field_kwargs.items(): setattr(field, key, value) declared_fields[field_name] = field if opts.model_dump_only_pk and opts.model: # If primary key is automatically generated (nominal case), we # must make sure this field is read-only if opts.model._auto_id_field is True: field_name = opts.model._meta['id_field'] id_field = declared_fields.get(field_name) if id_field: # Copy to prevent alteration of a possible parent class's field id_field = copy.copy(id_field) id_field.dump_only = True declared_fields[field_name] = id_field return declared_fields
[ "def", "get_declared_fields", "(", "mcs", ",", "klass", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "declared_fields", "=", "kwargs", ".", "get", "(", "'dict_class'", ",", "dict", ")", "(", ")", "# Generate the fields provided through inheritance", "o...
Updates declared fields with fields converted from the Mongoengine model passed as the `model` class Meta option.
[ "Updates", "declared", "fields", "with", "fields", "converted", "from", "the", "Mongoengine", "model", "passed", "as", "the", "model", "class", "Meta", "option", "." ]
python
train
pgxcentre/geneparse
geneparse/core.py
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/core.py#L139-L150
def complement_alleles(self): """Complement the alleles of this variant. This will call this module's `complement_alleles` function. Note that this will not create a new object, but modify the state of the current instance. """ self.alleles = self._encode_alleles( [complement_alleles(i) for i in self.alleles] )
[ "def", "complement_alleles", "(", "self", ")", ":", "self", ".", "alleles", "=", "self", ".", "_encode_alleles", "(", "[", "complement_alleles", "(", "i", ")", "for", "i", "in", "self", ".", "alleles", "]", ")" ]
Complement the alleles of this variant. This will call this module's `complement_alleles` function. Note that this will not create a new object, but modify the state of the current instance.
[ "Complement", "the", "alleles", "of", "this", "variant", "." ]
python
train
rgarcia-herrera/pyveplot
pyveplot/__init__.py
https://github.com/rgarcia-herrera/pyveplot/blob/57ceadcca47e79c94ee22efc9ba1e4962f462015/pyveplot/__init__.py#L74-L121
def connect(self, axis0, n0_index, source_angle, axis1, n1_index, target_angle, **kwargs): """Draw edges as Bézier curves. Start and end points map to the coordinates of the given nodes which in turn are set when adding nodes to an axis with the Axis.add_node() method, by using the placement information of the axis and a specified offset from its start point. Control points are set at the same distance from the start (or end) point of an axis as their corresponding nodes, but along an invisible axis that shares its origin but diverges by a given angle. Parameters ---------- axis0 : source Axis object n0_index : key of source node in nodes dictionary of axis0 source_angle : angle of departure for invisible axis that diverges from axis0 and holds first control points axis1 : target Axis object n1_index : key of target node in nodes dictionary of axis1 target_angle : angle of departure for invisible axis that diverges from axis1 and holds second control points kwargs : extra SVG attributes for path element, optional Set or change attributes using key=value """ n0 = axis0.nodes[n0_index] n1 = axis1.nodes[n1_index] pth = self.dwg.path(d="M %s %s" % (n0.x, n0.y), fill='none', **kwargs) # source # compute source control point alfa = axis0.angle() + radians(source_angle) length = sqrt( ((n0.x - axis0.start[0])**2) + ((n0.y-axis0.start[1])**2)) x = axis0.start[0] + length * cos(alfa); y = axis0.start[1] + length * sin(alfa); pth.push("C %s %s" % (x, y)) # first control point in path # compute target control point alfa = axis1.angle() + radians(target_angle) length = sqrt( ((n1.x - axis1.start[0])**2) + ((n1.y-axis1.start[1])**2)) x = axis1.start[0] + length * cos(alfa); y = axis1.start[1] + length * sin(alfa); pth.push("%s %s" % (x, y)) # second control point in path pth.push("%s %s" % (n1.x, n1.y)) # target self.dwg.add(pth)
[ "def", "connect", "(", "self", ",", "axis0", ",", "n0_index", ",", "source_angle", ",", "axis1", ",", "n1_index", ",", "target_angle", ",", "*", "*", "kwargs", ")", ":", "n0", "=", "axis0", ".", "nodes", "[", "n0_index", "]", "n1", "=", "axis1", ".",...
Draw edges as Bézier curves. Start and end points map to the coordinates of the given nodes which in turn are set when adding nodes to an axis with the Axis.add_node() method, by using the placement information of the axis and a specified offset from its start point. Control points are set at the same distance from the start (or end) point of an axis as their corresponding nodes, but along an invisible axis that shares its origin but diverges by a given angle. Parameters ---------- axis0 : source Axis object n0_index : key of source node in nodes dictionary of axis0 source_angle : angle of departure for invisible axis that diverges from axis0 and holds first control points axis1 : target Axis object n1_index : key of target node in nodes dictionary of axis1 target_angle : angle of departure for invisible axis that diverges from axis1 and holds second control points kwargs : extra SVG attributes for path element, optional Set or change attributes using key=value
[ "Draw", "edges", "as", "Bézier", "curves", "." ]
python
valid
jim-easterbrook/pywws
src/pywws/sqlite3data.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/sqlite3data.py#L516-L520
def items(self): """D.items() -> a set-like object providing a view on D's items""" keycol = self._keycol for row in self.__iter__(): yield (row[keycol], dict(row))
[ "def", "items", "(", "self", ")", ":", "keycol", "=", "self", ".", "_keycol", "for", "row", "in", "self", ".", "__iter__", "(", ")", ":", "yield", "(", "row", "[", "keycol", "]", ",", "dict", "(", "row", ")", ")" ]
D.items() -> a set-like object providing a view on D's items
[ "D", ".", "items", "()", "-", ">", "a", "set", "-", "like", "object", "providing", "a", "view", "on", "D", "s", "items" ]
python
train
litl/park
park.py
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L210-L215
def ibatch(iterable, size): """Yield a series of batches from iterable, each size elements long.""" source = iter(iterable) while True: batch = itertools.islice(source, size) yield itertools.chain([next(batch)], batch)
[ "def", "ibatch", "(", "iterable", ",", "size", ")", ":", "source", "=", "iter", "(", "iterable", ")", "while", "True", ":", "batch", "=", "itertools", ".", "islice", "(", "source", ",", "size", ")", "yield", "itertools", ".", "chain", "(", "[", "next...
Yield a series of batches from iterable, each size elements long.
[ "Yield", "a", "series", "of", "batches", "from", "iterable", "each", "size", "elements", "long", "." ]
python
train
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L2276-L2294
def toDict(self): """To Dict Returns the Parent as a dictionary in the same format as is used in constructing it Returns: dict """ # Get the parents dict as the starting point of our return dRet = super(Parent,self).toDict() # Go through each field and add it to the return for k,v in iteritems(self._nodes): dRet[k] = v.toDict() # Return return dRet
[ "def", "toDict", "(", "self", ")", ":", "# Get the parents dict as the starting point of our return", "dRet", "=", "super", "(", "Parent", ",", "self", ")", ".", "toDict", "(", ")", "# Go through each field and add it to the return", "for", "k", ",", "v", "in", "ite...
To Dict Returns the Parent as a dictionary in the same format as is used in constructing it Returns: dict
[ "To", "Dict" ]
python
train
ciena/afkak
afkak/consumer.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L554-L587
def _handle_offset_error(self, failure): """ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. """ # outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching offset from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching offset from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching offset from kafka: %r", self, failure) self._retry_fetch()
[ "def", "_handle_offset_error", "(", "self", ",", "failure", ")", ":", "# outstanding request got errback'd, clear it", "self", ".", "_request_d", "=", "None", "if", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ":", "# Not r...
Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors.
[ "Retry", "the", "offset", "fetch", "request", "if", "appropriate", "." ]
python
train
NetEaseGame/ATX
atx/adbkit/device.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/adbkit/device.py#L153-L165
def packages(self): """ Show all packages """ pattern = re.compile(r'package:(/[^=]+\.apk)=([^\s]+)') packages = [] for line in self.shell('pm', 'list', 'packages', '-f').splitlines(): m = pattern.match(line) if not m: continue path, name = m.group(1), m.group(2) packages.append(self.Package(name, path)) return packages
[ "def", "packages", "(", "self", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'package:(/[^=]+\\.apk)=([^\\s]+)'", ")", "packages", "=", "[", "]", "for", "line", "in", "self", ".", "shell", "(", "'pm'", ",", "'list'", ",", "'packages'", ",", "'-...
Show all packages
[ "Show", "all", "packages" ]
python
train
neo4j-contrib/neomodel
neomodel/match.py
https://github.com/neo4j-contrib/neomodel/blob/cca5de4c4e90998293558b871b1b529095c91a38/neomodel/match.py#L129-L179
def process_filter_args(cls, kwargs): """ loop through properties in filter parameters check they match class definition deflate them and convert into something easy to generate cypher from """ output = {} for key, value in kwargs.items(): if '__' in key: prop, operator = key.rsplit('__') operator = OPERATOR_TABLE[operator] else: prop = key operator = '=' if prop not in cls.defined_properties(rels=False): raise ValueError("No such property {0} on {1}".format(prop, cls.__name__)) property_obj = getattr(cls, prop) if isinstance(property_obj, AliasProperty): prop = property_obj.aliased_to() deflated_value = getattr(cls, prop).deflate(value) else: # handle special operators if operator == _SPECIAL_OPERATOR_IN: if not isinstance(value, tuple) and not isinstance(value, list): raise ValueError('Value must be a tuple or list for IN operation {0}={1}'.format(key, value)) deflated_value = [property_obj.deflate(v) for v in value] elif operator == _SPECIAL_OPERATOR_ISNULL: if not isinstance(value, bool): raise ValueError('Value must be a bool for isnull operation on {0}'.format(key)) operator = 'IS NULL' if value else 'IS NOT NULL' deflated_value = None elif operator in _REGEX_OPERATOR_TABLE.values(): deflated_value = property_obj.deflate(value) if not isinstance(deflated_value, basestring): raise ValueError('Must be a string value for {0}'.format(key)) if operator in _STRING_REGEX_OPERATOR_TABLE.values(): deflated_value = re.escape(deflated_value) deflated_value = operator.format(deflated_value) operator = _SPECIAL_OPERATOR_REGEX else: deflated_value = property_obj.deflate(value) # map property to correct property name in the database db_property = cls.defined_properties(rels=False)[prop].db_property or prop output[db_property] = (operator, deflated_value) return output
[ "def", "process_filter_args", "(", "cls", ",", "kwargs", ")", ":", "output", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "'__'", "in", "key", ":", "prop", ",", "operator", "=", "key", ".", "rsplit",...
loop through properties in filter parameters check they match class definition deflate them and convert into something easy to generate cypher from
[ "loop", "through", "properties", "in", "filter", "parameters", "check", "they", "match", "class", "definition", "deflate", "them", "and", "convert", "into", "something", "easy", "to", "generate", "cypher", "from" ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/regression/linear_regression.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L427-L451
def export_coreml(self, filename): """ Export the model in Core ML format. Parameters ---------- filename: str A valid filename where the model can be saved. Examples -------- >>> model.export_coreml("MyModel.mlmodel") """ from turicreate.extensions import _linear_regression_export_as_model_asset from turicreate.toolkits import _coreml_utils display_name = "linear regression" short_description = _coreml_utils._mlmodel_short_description(display_name) context = {"class": self.__class__.__name__, "version": _turicreate.__version__, "short_description": short_description, 'user_defined':{ 'turicreate_version': _turicreate.__version__ } } _linear_regression_export_as_model_asset(self.__proxy__, filename, context)
[ "def", "export_coreml", "(", "self", ",", "filename", ")", ":", "from", "turicreate", ".", "extensions", "import", "_linear_regression_export_as_model_asset", "from", "turicreate", ".", "toolkits", "import", "_coreml_utils", "display_name", "=", "\"linear regression\"", ...
Export the model in Core ML format. Parameters ---------- filename: str A valid filename where the model can be saved. Examples -------- >>> model.export_coreml("MyModel.mlmodel")
[ "Export", "the", "model", "in", "Core", "ML", "format", "." ]
python
train
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L216-L231
def get_seconds_description(self): """Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description """ return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
[ "def", "get_seconds_description", "(", "self", ")", ":", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "0", "]", ",", "_", "(", "\"every second\"", ")", ",", "lambda", "s", ":", "s", ",", "lambda", "s", ":", ...
Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description
[ "Generates", "a", "description", "for", "only", "the", "SECONDS", "portion", "of", "the", "expression" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/client_options.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/client_options.py#L101-L123
def _parse_pool_options(options): """Parse connection pool options.""" max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) max_idle_time_ms = options.get('maxidletimems', common.MAX_IDLE_TIME_MS) if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) socket_keepalive = options.get('socketkeepalive', True) socket_timeout = options.get('sockettimeoutms') wait_queue_timeout = options.get('waitqueuetimeoutms') wait_queue_multiple = options.get('waitqueuemultiple') event_listeners = options.get('event_listeners') appname = options.get('appname') ssl_context, ssl_match_hostname = _parse_ssl_options(options) return PoolOptions(max_pool_size, min_pool_size, max_idle_time_ms, connect_timeout, socket_timeout, wait_queue_timeout, wait_queue_multiple, ssl_context, ssl_match_hostname, socket_keepalive, _EventListeners(event_listeners), appname)
[ "def", "_parse_pool_options", "(", "options", ")", ":", "max_pool_size", "=", "options", ".", "get", "(", "'maxpoolsize'", ",", "common", ".", "MAX_POOL_SIZE", ")", "min_pool_size", "=", "options", ".", "get", "(", "'minpoolsize'", ",", "common", ".", "MIN_POO...
Parse connection pool options.
[ "Parse", "connection", "pool", "options", "." ]
python
train
sorgerlab/indra
indra/assemblers/sbgn/assembler.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L125-L136
def save_model(self, file_name='model.sbgn'): """Save the assembled SBGN model in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the SBGN network to. Default: model.sbgn """ model = self.print_model() with open(file_name, 'wb') as fh: fh.write(model)
[ "def", "save_model", "(", "self", ",", "file_name", "=", "'model.sbgn'", ")", ":", "model", "=", "self", ".", "print_model", "(", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "fh", ":", "fh", ".", "write", "(", "model", ")" ]
Save the assembled SBGN model in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the SBGN network to. Default: model.sbgn
[ "Save", "the", "assembled", "SBGN", "model", "in", "a", "file", "." ]
python
train
turicas/rows
rows/operations.py
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/operations.py#L26-L53
def join(keys, tables): """Merge a list of `Table` objects using `keys` to group rows""" # Make new (merged) Table fields fields = OrderedDict() for table in tables: fields.update(table.fields) # TODO: may raise an error if a same field is different in some tables # Check if all keys are inside merged Table's fields fields_keys = set(fields.keys()) for key in keys: if key not in fields_keys: raise ValueError('Invalid key: "{}"'.format(key)) # Group rows by key, without missing ordering none_fields = lambda: OrderedDict({field: None for field in fields.keys()}) data = OrderedDict() for table in tables: for row in table: row_key = tuple([getattr(row, key) for key in keys]) if row_key not in data: data[row_key] = none_fields() data[row_key].update(row._asdict()) merged = Table(fields=fields) merged.extend(data.values()) return merged
[ "def", "join", "(", "keys", ",", "tables", ")", ":", "# Make new (merged) Table fields", "fields", "=", "OrderedDict", "(", ")", "for", "table", "in", "tables", ":", "fields", ".", "update", "(", "table", ".", "fields", ")", "# TODO: may raise an error if a same...
Merge a list of `Table` objects using `keys` to group rows
[ "Merge", "a", "list", "of", "Table", "objects", "using", "keys", "to", "group", "rows" ]
python
train
UniversalDevicesInc/polyglot-v2-python-interface
polyinterface/polyinterface.py
https://github.com/UniversalDevicesInc/polyglot-v2-python-interface/blob/fe613135b762731a41a081222e43d2a8ae4fc53f/polyinterface/polyinterface.py#L349-L361
def send(self, message): """ Formatted Message to send to Polyglot. Connection messages are sent automatically from this module so this method is used to send commands to/from Polyglot and formats it for consumption """ if not isinstance(message, dict) and self.connected: warnings.warn('payload not a dictionary') return False try: message['node'] = self.profileNum self._mqttc.publish(self.topicInput, json.dumps(message), retain=False) except TypeError as err: LOGGER.error('MQTT Send Error: {}'.format(err), exc_info=True)
[ "def", "send", "(", "self", ",", "message", ")", ":", "if", "not", "isinstance", "(", "message", ",", "dict", ")", "and", "self", ".", "connected", ":", "warnings", ".", "warn", "(", "'payload not a dictionary'", ")", "return", "False", "try", ":", "mess...
Formatted Message to send to Polyglot. Connection messages are sent automatically from this module so this method is used to send commands to/from Polyglot and formats it for consumption
[ "Formatted", "Message", "to", "send", "to", "Polyglot", ".", "Connection", "messages", "are", "sent", "automatically", "from", "this", "module", "so", "this", "method", "is", "used", "to", "send", "commands", "to", "/", "from", "Polyglot", "and", "formats", ...
python
train
SmartTeleMax/iktomi
iktomi/forms/fields.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/fields.py#L202-L209
def accept(self): '''Extracts raw value from form's raw data and passes it to converter''' value = self.raw_value if not self._check_value_type(value): # XXX should this be silent or TypeError? value = [] if self.multiple else self._null_value self.clean_value = self.conv.accept(value) return {self.name: self.clean_value}
[ "def", "accept", "(", "self", ")", ":", "value", "=", "self", ".", "raw_value", "if", "not", "self", ".", "_check_value_type", "(", "value", ")", ":", "# XXX should this be silent or TypeError?", "value", "=", "[", "]", "if", "self", ".", "multiple", "else",...
Extracts raw value from form's raw data and passes it to converter
[ "Extracts", "raw", "value", "from", "form", "s", "raw", "data", "and", "passes", "it", "to", "converter" ]
python
train
kubernetes-client/python
kubernetes/client/apis/networking_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/networking_v1beta1_api.py#L997-L1020
def read_namespaced_ingress(self, name, namespace, **kwargs): """ read the specified Ingress This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_ingress(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. :param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. :return: NetworkingV1beta1Ingress If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_ingress_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_ingress_with_http_info(name, namespace, **kwargs) return data
[ "def", "read_namespaced_ingress", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", ...
read the specified Ingress This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_ingress(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. :param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. :return: NetworkingV1beta1Ingress If the method is called asynchronously, returns the request thread.
[ "read", "the", "specified", "Ingress", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", "."...
python
train
Qiskit/qiskit-terra
qiskit/visualization/text.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/text.py#L60-L69
def bot(self): """ Constructs the bottom line of the element""" ret = self.bot_format % self.bot_connect.center( self.width, self.bot_pad) if self.right_fill: ret = ret.ljust(self.right_fill, self.bot_pad) if self.left_fill: ret = ret.rjust(self.left_fill, self.bot_pad) ret = ret.center(self.layer_width, self.bot_bck) return ret
[ "def", "bot", "(", "self", ")", ":", "ret", "=", "self", ".", "bot_format", "%", "self", ".", "bot_connect", ".", "center", "(", "self", ".", "width", ",", "self", ".", "bot_pad", ")", "if", "self", ".", "right_fill", ":", "ret", "=", "ret", ".", ...
Constructs the bottom line of the element
[ "Constructs", "the", "bottom", "line", "of", "the", "element" ]
python
test
sods/paramz
paramz/core/indexable.py
https://github.com/sods/paramz/blob/ae6fc6274b70fb723d91e48fc5026a9bc5a06508/paramz/core/indexable.py#L164-L184
def _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi
[ "def", "_raveled_index_for_transformed", "(", "self", ",", "param", ")", ":", "ravi", "=", "self", ".", "_raveled_index_for", "(", "param", ")", "if", "self", ".", "_has_fixes", "(", ")", ":", "fixes", "=", "self", ".", "_fixes_", "### Transformed indices, han...
get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns!
[ "get", "the", "raveled", "index", "for", "a", "param", "for", "the", "transformed", "parameter", "array", "(", "optimizer", "array", ")", "." ]
python
train
rosshamish/catanlog
catanlog.py
https://github.com/rosshamish/catanlog/blob/6f204920d9b67fd53fc6ff6a1c7b6a756b009bf0/catanlog.py#L151-L156
def log_player_roll(self, player, roll): """ :param player: catan.game.Player :param roll: integer or string, the sum of the dice """ self._logln('{0} rolls {1}{2}'.format(player.color, roll, ' ...DEUCES!' if int(roll) == 2 else ''))
[ "def", "log_player_roll", "(", "self", ",", "player", ",", "roll", ")", ":", "self", ".", "_logln", "(", "'{0} rolls {1}{2}'", ".", "format", "(", "player", ".", "color", ",", "roll", ",", "' ...DEUCES!'", "if", "int", "(", "roll", ")", "==", "2", "els...
:param player: catan.game.Player :param roll: integer or string, the sum of the dice
[ ":", "param", "player", ":", "catan", ".", "game", ".", "Player", ":", "param", "roll", ":", "integer", "or", "string", "the", "sum", "of", "the", "dice" ]
python
train
inspirehep/inspire-schemas
inspire_schemas/builders/authors.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/authors.py#L102-L110
def add_name_variant(self, name): """Add name variant. Args: :param name: name variant for the current author. :type name: string """ self._ensure_field('name', {}) self.obj['name'].setdefault('name_variants', []).append(name)
[ "def", "add_name_variant", "(", "self", ",", "name", ")", ":", "self", ".", "_ensure_field", "(", "'name'", ",", "{", "}", ")", "self", ".", "obj", "[", "'name'", "]", ".", "setdefault", "(", "'name_variants'", ",", "[", "]", ")", ".", "append", "(",...
Add name variant. Args: :param name: name variant for the current author. :type name: string
[ "Add", "name", "variant", "." ]
python
train
saltstack/salt
salt/modules/infoblox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/infoblox.py#L58-L75
def _get_config(**api_opts): ''' Return configuration user passed api_opts override salt config.get vars ''' config = { 'api_sslverify': True, 'api_url': 'https://INFOBLOX/wapi/v1.2.1', 'api_user': '', 'api_key': '', } if '__salt__' in globals(): config_key = '{0}.config'.format(__virtualname__) config.update(__salt__['config.get'](config_key, {})) # pylint: disable=C0201 for k in set(config.keys()) & set(api_opts.keys()): config[k] = api_opts[k] return config
[ "def", "_get_config", "(", "*", "*", "api_opts", ")", ":", "config", "=", "{", "'api_sslverify'", ":", "True", ",", "'api_url'", ":", "'https://INFOBLOX/wapi/v1.2.1'", ",", "'api_user'", ":", "''", ",", "'api_key'", ":", "''", ",", "}", "if", "'__salt__'", ...
Return configuration user passed api_opts override salt config.get vars
[ "Return", "configuration", "user", "passed", "api_opts", "override", "salt", "config", ".", "get", "vars" ]
python
train
pypa/pipenv
pipenv/core.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/core.py#L318-L339
def find_a_system_python(line): """Find a Python installation from a given line. This tries to parse the line in various of ways: * Looks like an absolute path? Use it directly. * Looks like a py.exe call? Use py.exe to get the executable. * Starts with "py" something? Looks like a python command. Try to find it in PATH, and use it directly. * Search for "python" and "pythonX.Y" executables in PATH to find a match. * Nothing fits, return None. """ from .vendor.pythonfinder import Finder finder = Finder(system=False, global_search=True) if not line: return next(iter(finder.find_all_python_versions()), None) # Use the windows finder executable if (line.startswith("py ") or line.startswith("py.exe ")) and os.name == "nt": line = line.split(" ", 1)[1].lstrip("-") python_entry = find_python(finder, line) return python_entry
[ "def", "find_a_system_python", "(", "line", ")", ":", "from", ".", "vendor", ".", "pythonfinder", "import", "Finder", "finder", "=", "Finder", "(", "system", "=", "False", ",", "global_search", "=", "True", ")", "if", "not", "line", ":", "return", "next", ...
Find a Python installation from a given line. This tries to parse the line in various of ways: * Looks like an absolute path? Use it directly. * Looks like a py.exe call? Use py.exe to get the executable. * Starts with "py" something? Looks like a python command. Try to find it in PATH, and use it directly. * Search for "python" and "pythonX.Y" executables in PATH to find a match. * Nothing fits, return None.
[ "Find", "a", "Python", "installation", "from", "a", "given", "line", "." ]
python
train
klahnakoski/pyLibrary
jx_elasticsearch/meta.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_elasticsearch/meta.py#L631-L638
def query_paths(self): """ RETURN A LIST OF ALL NESTED COLUMNS """ output = self.namespace.alias_to_query_paths.get(self.name) if output: return output Log.error("Can not find index {{index|quote}}", index=self.name)
[ "def", "query_paths", "(", "self", ")", ":", "output", "=", "self", ".", "namespace", ".", "alias_to_query_paths", ".", "get", "(", "self", ".", "name", ")", "if", "output", ":", "return", "output", "Log", ".", "error", "(", "\"Can not find index {{index|quo...
RETURN A LIST OF ALL NESTED COLUMNS
[ "RETURN", "A", "LIST", "OF", "ALL", "NESTED", "COLUMNS" ]
python
train
spyder-ide/spyder
spyder/widgets/browser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/browser.py#L185-L198
def setHtml(self, html, baseUrl=QUrl()): """ Reimplement Qt method to prevent WebEngine to steal focus when setting html on the page Solution taken from https://bugreports.qt.io/browse/QTBUG-52999 """ if WEBENGINE: self.setEnabled(False) super(WebView, self).setHtml(html, baseUrl) self.setEnabled(True) else: super(WebView, self).setHtml(html, baseUrl)
[ "def", "setHtml", "(", "self", ",", "html", ",", "baseUrl", "=", "QUrl", "(", ")", ")", ":", "if", "WEBENGINE", ":", "self", ".", "setEnabled", "(", "False", ")", "super", "(", "WebView", ",", "self", ")", ".", "setHtml", "(", "html", ",", "baseUrl...
Reimplement Qt method to prevent WebEngine to steal focus when setting html on the page Solution taken from https://bugreports.qt.io/browse/QTBUG-52999
[ "Reimplement", "Qt", "method", "to", "prevent", "WebEngine", "to", "steal", "focus", "when", "setting", "html", "on", "the", "page", "Solution", "taken", "from", "https", ":", "//", "bugreports", ".", "qt", ".", "io", "/", "browse", "/", "QTBUG", "-", "5...
python
train
penguinmenac3/opendatalake
opendatalake/detection/utils.py
https://github.com/penguinmenac3/opendatalake/blob/77c888377095e1812a16982c8efbd2f6b1697a33/opendatalake/detection/utils.py#L771-L784
def move_detections(label, dy, dx): """ Move detections in direction dx, dy. :param label: The label dict containing all detection lists. :param dy: The delta in y direction as a number. :param dx: The delta in x direction as a number. :return: """ for k in label.keys(): if k.startswith("detection"): detections = label[k] for detection in detections: detection.move_image(-dx, -dy)
[ "def", "move_detections", "(", "label", ",", "dy", ",", "dx", ")", ":", "for", "k", "in", "label", ".", "keys", "(", ")", ":", "if", "k", ".", "startswith", "(", "\"detection\"", ")", ":", "detections", "=", "label", "[", "k", "]", "for", "detectio...
Move detections in direction dx, dy. :param label: The label dict containing all detection lists. :param dy: The delta in y direction as a number. :param dx: The delta in x direction as a number. :return:
[ "Move", "detections", "in", "direction", "dx", "dy", "." ]
python
test