repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
monero-ecosystem/monero-python
monero/wallet.py
https://github.com/monero-ecosystem/monero-python/blob/64149f6323af57a3924f45ed87997d64387c5ee0/monero/wallet.py#L104-L119
def confirmations(self, txn_or_pmt): """ Returns the number of confirmations for given :class:`Transaction <monero.transaction.Transaction>` or :class:`Payment <monero.transaction.Payment>` object. :rtype: int """ if isinstance(txn_or_pmt, Payment): txn = txn_or_pmt.transaction else: txn = txn_or_pmt try: return max(0, self.height() - txn.height) except TypeError: return 0
[ "def", "confirmations", "(", "self", ",", "txn_or_pmt", ")", ":", "if", "isinstance", "(", "txn_or_pmt", ",", "Payment", ")", ":", "txn", "=", "txn_or_pmt", ".", "transaction", "else", ":", "txn", "=", "txn_or_pmt", "try", ":", "return", "max", "(", "0",...
Returns the number of confirmations for given :class:`Transaction <monero.transaction.Transaction>` or :class:`Payment <monero.transaction.Payment>` object. :rtype: int
[ "Returns", "the", "number", "of", "confirmations", "for", "given", ":", "class", ":", "Transaction", "<monero", ".", "transaction", ".", "Transaction", ">", "or", ":", "class", ":", "Payment", "<monero", ".", "transaction", ".", "Payment", ">", "object", "."...
python
valid
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L29858-L29890
def set_network_settings(self, mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv): """Sets network configuration of the NAT engine. in mtu of type int MTU (maximum transmission unit) of the NAT engine in bytes. in sock_snd of type int Capacity of the socket send buffer in bytes when creating a new socket. in sock_rcv of type int Capacity of the socket receive buffer in bytes when creating a new socket. in tcp_wnd_snd of type int Initial size of the NAT engine's sending TCP window in bytes when establishing a new TCP connection. in tcp_wnd_rcv of type int Initial size of the NAT engine's receiving TCP window in bytes when establishing a new TCP connection. """ if not isinstance(mtu, baseinteger): raise TypeError("mtu can only be an instance of type baseinteger") if not isinstance(sock_snd, baseinteger): raise TypeError("sock_snd can only be an instance of type baseinteger") if not isinstance(sock_rcv, baseinteger): raise TypeError("sock_rcv can only be an instance of type baseinteger") if not isinstance(tcp_wnd_snd, baseinteger): raise TypeError("tcp_wnd_snd can only be an instance of type baseinteger") if not isinstance(tcp_wnd_rcv, baseinteger): raise TypeError("tcp_wnd_rcv can only be an instance of type baseinteger") self._call("setNetworkSettings", in_p=[mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv])
[ "def", "set_network_settings", "(", "self", ",", "mtu", ",", "sock_snd", ",", "sock_rcv", ",", "tcp_wnd_snd", ",", "tcp_wnd_rcv", ")", ":", "if", "not", "isinstance", "(", "mtu", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"mtu can only be an in...
Sets network configuration of the NAT engine. in mtu of type int MTU (maximum transmission unit) of the NAT engine in bytes. in sock_snd of type int Capacity of the socket send buffer in bytes when creating a new socket. in sock_rcv of type int Capacity of the socket receive buffer in bytes when creating a new socket. in tcp_wnd_snd of type int Initial size of the NAT engine's sending TCP window in bytes when establishing a new TCP connection. in tcp_wnd_rcv of type int Initial size of the NAT engine's receiving TCP window in bytes when establishing a new TCP connection.
[ "Sets", "network", "configuration", "of", "the", "NAT", "engine", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/sessions.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/sessions.py#L198-L222
def get_cookie_domain(self, app): """Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used. """ if app.config['SESSION_COOKIE_DOMAIN'] is not None: return app.config['SESSION_COOKIE_DOMAIN'] if app.config['SERVER_NAME'] is not None: # chop of the port which is usually not supported by browsers rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] # Google chrome does not like cookies set to .localhost, so # we just go with no domain then. Flask documents anyways that # cross domain cookies need a fully qualified domain name if rv == '.localhost': rv = None # If we infer the cookie domain from the server name we need # to check if we are in a subpath. In that case we can't # set a cross domain cookie. if rv is not None: path = self.get_cookie_path(app) if path != '/': rv = rv.lstrip('.') return rv
[ "def", "get_cookie_domain", "(", "self", ",", "app", ")", ":", "if", "app", ".", "config", "[", "'SESSION_COOKIE_DOMAIN'", "]", "is", "not", "None", ":", "return", "app", ".", "config", "[", "'SESSION_COOKIE_DOMAIN'", "]", "if", "app", ".", "config", "[", ...
Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used.
[ "Helpful", "helper", "method", "that", "returns", "the", "cookie", "domain", "that", "should", "be", "used", "for", "the", "session", "cookie", "if", "session", "cookies", "are", "used", "." ]
python
test
Chilipp/model-organization
model_organization/__init__.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L1584-L1589
def get_parser(cls): """Function returning the command line parser for this class""" organizer = cls() organizer.setup_parser() organizer._finish_parser() return organizer.parser
[ "def", "get_parser", "(", "cls", ")", ":", "organizer", "=", "cls", "(", ")", "organizer", ".", "setup_parser", "(", ")", "organizer", ".", "_finish_parser", "(", ")", "return", "organizer", ".", "parser" ]
Function returning the command line parser for this class
[ "Function", "returning", "the", "command", "line", "parser", "for", "this", "class" ]
python
train
nion-software/nionswift
nion/swift/model/Utility.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/Utility.py#L127-L136
def clean_tuple(t0, clean_item_fn=None): """ Return a json-clean tuple. Will log info message for failures. """ clean_item_fn = clean_item_fn if clean_item_fn else clean_item l = list() for index, item in enumerate(t0): cleaned_item = clean_item_fn(item) l.append(cleaned_item) return tuple(l)
[ "def", "clean_tuple", "(", "t0", ",", "clean_item_fn", "=", "None", ")", ":", "clean_item_fn", "=", "clean_item_fn", "if", "clean_item_fn", "else", "clean_item", "l", "=", "list", "(", ")", "for", "index", ",", "item", "in", "enumerate", "(", "t0", ")", ...
Return a json-clean tuple. Will log info message for failures.
[ "Return", "a", "json", "-", "clean", "tuple", ".", "Will", "log", "info", "message", "for", "failures", "." ]
python
train
bodylabs/lace
lace/serialization/dae.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/serialization/dae.py#L14-L25
def dumps(mesh): ''' Generates a UTF-8 XML string containing the mesh, in collada format. ''' from lxml import etree dae = mesh_to_collada(mesh) # Update the xmlnode. dae.save() return etree.tostring(dae.xmlnode, encoding='UTF-8')
[ "def", "dumps", "(", "mesh", ")", ":", "from", "lxml", "import", "etree", "dae", "=", "mesh_to_collada", "(", "mesh", ")", "# Update the xmlnode.", "dae", ".", "save", "(", ")", "return", "etree", ".", "tostring", "(", "dae", ".", "xmlnode", ",", "encodi...
Generates a UTF-8 XML string containing the mesh, in collada format.
[ "Generates", "a", "UTF", "-", "8", "XML", "string", "containing", "the", "mesh", "in", "collada", "format", "." ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/layout.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/layout.py#L607-L651
def isom(self,coolingFactor=None,EdgeAttribute=None,initialAdaptation=None,\ maxEpoch=None,minAdaptation=None,minRadius=None,network=None,NodeAttribute=None,\ nodeList=None,radius=None,radiusConstantTime=None,singlePartition=None,\ sizeFactor=None,verbose=None): """ Execute the Inverted Self-Organizing Map Layout on a network. :param coolingFactor (string, optional): Cooling factor, in numeric value :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param initialAdaptation (string, optional): Initial adaptation, in numeric value :param maxEpoch (string, optional): Number of iterations, in numeric value :param minAdaptation (string, optional): Minimum adaptation value, in numer ic value :param minRadius (string, optional): Minimum radius, in numeric value :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param radius (string, optional): Radius, in numeric value :param radiusConstantTime (string, optional): Radius constant, in numeric v alue :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param sizeFactor (string, optional): Size factor, in numeric value """ network=check_network(self,network,verbose=verbose) PARAMS=set_param(['coolingFactor','EdgeAttribute','initialAdaptation',\ 'maxEpoch','minAdaptation','minRadius','network','NodeAttribute','nodeList',\ 'radius','radiusConstantTime','singlePartition','sizeFactor'],[coolingFactor,\ EdgeAttribute,initialAdaptation,maxEpoch,minAdaptation,minRadius,network,\ NodeAttribute,nodeList,radius,radiusConstantTime,singlePartition,sizeFactor]) response=api(url=self.__url+"/isom", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "isom", "(", "self", ",", "coolingFactor", "=", "None", ",", "EdgeAttribute", "=", "None", ",", "initialAdaptation", "=", "None", ",", "maxEpoch", "=", "None", ",", "minAdaptation", "=", "None", ",", "minRadius", "=", "None", ",", "network", "=", ...
Execute the Inverted Self-Organizing Map Layout on a network. :param coolingFactor (string, optional): Cooling factor, in numeric value :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param initialAdaptation (string, optional): Initial adaptation, in numeric value :param maxEpoch (string, optional): Number of iterations, in numeric value :param minAdaptation (string, optional): Minimum adaptation value, in numer ic value :param minRadius (string, optional): Minimum radius, in numeric value :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param radius (string, optional): Radius, in numeric value :param radiusConstantTime (string, optional): Radius constant, in numeric v alue :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param sizeFactor (string, optional): Size factor, in numeric value
[ "Execute", "the", "Inverted", "Self", "-", "Organizing", "Map", "Layout", "on", "a", "network", "." ]
python
train
twidi/django-extended-choices
extended_choices/helpers.py
https://github.com/twidi/django-extended-choices/blob/bb310c5da4d53685c69173541172e4b813a6afb2/extended_choices/helpers.py#L200-L222
def create_choice_attribute(creator_type, value, choice_entry): """Create an instance of a subclass of ChoiceAttributeMixin for the given value. Parameters ---------- creator_type : type ``ChoiceAttributeMixin`` or a subclass, from which we'll call the ``get_class_for_value`` class-method. value : ? The value for which we want to create an instance of a new subclass of ``creator_type``. choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Returns ------- ChoiceAttributeMixin An instance of a subclass of ``creator_type`` for the given value """ klass = creator_type.get_class_for_value(value) return klass(value, choice_entry)
[ "def", "create_choice_attribute", "(", "creator_type", ",", "value", ",", "choice_entry", ")", ":", "klass", "=", "creator_type", ".", "get_class_for_value", "(", "value", ")", "return", "klass", "(", "value", ",", "choice_entry", ")" ]
Create an instance of a subclass of ChoiceAttributeMixin for the given value. Parameters ---------- creator_type : type ``ChoiceAttributeMixin`` or a subclass, from which we'll call the ``get_class_for_value`` class-method. value : ? The value for which we want to create an instance of a new subclass of ``creator_type``. choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Returns ------- ChoiceAttributeMixin An instance of a subclass of ``creator_type`` for the given value
[ "Create", "an", "instance", "of", "a", "subclass", "of", "ChoiceAttributeMixin", "for", "the", "given", "value", "." ]
python
train
GeorgeArgyros/sfalearn
sfalearn/observationtableinit.py
https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/observationtableinit.py#L155-L198
def _init_smi(self, graph, access_strings_map): """ Args: graph (DFA): The DFA states access_strings_map (list): a dict containing all the access strings for each state Return: list: SMI transition table """ smi = [] for selected_state in sorted(graph.states, key=attrgetter('initial'), reverse=True): # Initially gather all transitions of the state into a dictionary transitions_map = defaultdict(list) for character in self.alphabet: destination_state = self._delta(graph, selected_state, character) transitions_map[destination_state.stateid].append(character) chars_in_smi = [] sorted_transitions = sorted( transitions_map.items(), key=lambda x: len( x[1])) if len(sorted_transitions) == 1: # Just put 1 symbol is enough all other'input_string will be generalized # by the guardgen algorithm chars_in_smi.append(self.alphabet[0]) else: # Otherwise insert in smi_vector all transitions as explicit except # the one from the sink transition where we add just enough # explicity transitions to make sure that this state will be # selected as the sink state. # # If no transition has a clear advantage in terms of symbols then # just add all transitions in explicit form because it may be the # case the guardgen() will generalize in the wrong transition. for (_, char_list) in sorted_transitions[:-1]: chars_in_smi += char_list sink_chars = len(sorted_transitions[-2][1]) + 1 chars_in_smi.extend(sorted_transitions[-1][1][:sink_chars]) access_string = access_strings_map[selected_state.stateid] smi.extend([access_string + character for character in chars_in_smi]) return smi
[ "def", "_init_smi", "(", "self", ",", "graph", ",", "access_strings_map", ")", ":", "smi", "=", "[", "]", "for", "selected_state", "in", "sorted", "(", "graph", ".", "states", ",", "key", "=", "attrgetter", "(", "'initial'", ")", ",", "reverse", "=", "...
Args: graph (DFA): The DFA states access_strings_map (list): a dict containing all the access strings for each state Return: list: SMI transition table
[ "Args", ":", "graph", "(", "DFA", ")", ":", "The", "DFA", "states", "access_strings_map", "(", "list", ")", ":", "a", "dict", "containing", "all", "the", "access", "strings", "for", "each", "state", "Return", ":", "list", ":", "SMI", "transition", "table...
python
train
QunarOPS/qg.core
qg/core/timeutils.py
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L96-L103
def utcnow_ts(): """Timestamp version of our utcnow function.""" if utcnow.override_time is None: # NOTE(kgriffs): This is several times faster # than going through calendar.timegm(...) return int(time.time()) return calendar.timegm(utcnow().timetuple())
[ "def", "utcnow_ts", "(", ")", ":", "if", "utcnow", ".", "override_time", "is", "None", ":", "# NOTE(kgriffs): This is several times faster", "# than going through calendar.timegm(...)", "return", "int", "(", "time", ".", "time", "(", ")", ")", "return", "calendar", ...
Timestamp version of our utcnow function.
[ "Timestamp", "version", "of", "our", "utcnow", "function", "." ]
python
train
bitlabstudio/django-document-library
document_library/south_migrations/0022_move_from_simple_trans_to_hvad.py
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/south_migrations/0022_move_from_simple_trans_to_hvad.py#L27-L49
def forwards(self, orm): "Write your forwards methods here." for category in orm['document_library.DocumentCategory'].objects.all(): for trans_old in orm['document_library.DocumentCategoryTitle'].objects.filter(category=category): orm['document_library.DocumentCategoryTranslation'].objects.create( master=category, language_code=trans_old.language, title=trans_old.title, ) for document in orm['document_library.Document'].objects.all(): for trans_old in orm['document_library.DocumentTitle'].objects.filter(document=document): orm['document_library.DocumentTranslation'].objects.create( master=document, language_code=trans_old.language, title=trans_old.title, description=trans_old.description, filer_file=trans_old.filer_file, thumbnail=trans_old.thumbnail, copyright_notice=trans_old.copyright_notice, is_published=trans_old.is_published, meta_description=trans_old.meta_description, )
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "for", "category", "in", "orm", "[", "'document_library.DocumentCategory'", "]", ".", "objects", ".", "all", "(", ")", ":", "for", "trans_old", "in", "orm", "[", "'document_library.DocumentCategoryTitle'", ...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
python
train
vertexproject/synapse
synapse/lib/crypto/tinfoil.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/crypto/tinfoil.py#L53-L75
def dec(self, byts): ''' Decode an envelope dict and decrypt the given bytes. Args: byts (bytes): Bytes to decrypt. Returns: bytes: Decrypted message. ''' envl = s_msgpack.un(byts) iv = envl.get('iv', b'') asscd = envl.get('asscd', b'') data = envl.get('data', b'') decryptor = AESGCM(self.ekey) try: data = decryptor.decrypt(iv, data, asscd) except Exception: logger.exception('Error decrypting data') return None return data
[ "def", "dec", "(", "self", ",", "byts", ")", ":", "envl", "=", "s_msgpack", ".", "un", "(", "byts", ")", "iv", "=", "envl", ".", "get", "(", "'iv'", ",", "b''", ")", "asscd", "=", "envl", ".", "get", "(", "'asscd'", ",", "b''", ")", "data", "...
Decode an envelope dict and decrypt the given bytes. Args: byts (bytes): Bytes to decrypt. Returns: bytes: Decrypted message.
[ "Decode", "an", "envelope", "dict", "and", "decrypt", "the", "given", "bytes", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/os_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/os_file_entry.py#L209-L215
def access_time(self): """dfdatetime.DateTimeValues: access time or None if not available.""" if self._stat_info is None: return None timestamp = int(self._stat_info.st_atime) return dfdatetime_posix_time.PosixTime(timestamp=timestamp)
[ "def", "access_time", "(", "self", ")", ":", "if", "self", ".", "_stat_info", "is", "None", ":", "return", "None", "timestamp", "=", "int", "(", "self", ".", "_stat_info", ".", "st_atime", ")", "return", "dfdatetime_posix_time", ".", "PosixTime", "(", "tim...
dfdatetime.DateTimeValues: access time or None if not available.
[ "dfdatetime", ".", "DateTimeValues", ":", "access", "time", "or", "None", "if", "not", "available", "." ]
python
train
loli/medpy
medpy/features/intensity.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L59-L96
def centerdistance(image, voxelspacing = None, mask = slice(None)): r""" Takes a simple or multi-spectral image and returns its voxel-wise center distance in mm. A multi-spectral image must be supplied as a list or tuple of its spectra. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. The center distance is the exact euclidean distance in mm of each voxels center to the central point of the overal image volume. Note that this feature is independent of the actual image content, but depends solely on its shape. Therefore always a one-dimensional feature is returned, even if a multi-spectral image has been supplied. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- centerdistance : ndarray The distance of each voxel to the images center. See Also -------- centerdistance_xdminus1 """ if type(image) == tuple or type(image) == list: image = image[0] return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing)
[ "def", "centerdistance", "(", "image", ",", "voxelspacing", "=", "None", ",", "mask", "=", "slice", "(", "None", ")", ")", ":", "if", "type", "(", "image", ")", "==", "tuple", "or", "type", "(", "image", ")", "==", "list", ":", "image", "=", "image...
r""" Takes a simple or multi-spectral image and returns its voxel-wise center distance in mm. A multi-spectral image must be supplied as a list or tuple of its spectra. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. The center distance is the exact euclidean distance in mm of each voxels center to the central point of the overal image volume. Note that this feature is independent of the actual image content, but depends solely on its shape. Therefore always a one-dimensional feature is returned, even if a multi-spectral image has been supplied. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- centerdistance : ndarray The distance of each voxel to the images center. See Also -------- centerdistance_xdminus1
[ "r", "Takes", "a", "simple", "or", "multi", "-", "spectral", "image", "and", "returns", "its", "voxel", "-", "wise", "center", "distance", "in", "mm", ".", "A", "multi", "-", "spectral", "image", "must", "be", "supplied", "as", "a", "list", "or", "tupl...
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L143-L153
def get_template_folder(): """Get path to the folder where th HTML templates are.""" cfg = get_project_configuration() if 'templates' not in cfg: home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") cfg['templates'] = pkg_resources.resource_filename('hwrt', 'templates/') with open(rcfile, 'w') as f: yaml.dump(cfg, f, default_flow_style=False) return cfg['templates']
[ "def", "get_template_folder", "(", ")", ":", "cfg", "=", "get_project_configuration", "(", ")", "if", "'templates'", "not", "in", "cfg", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "rcfile", "=", "os", ".", "path", ".", "...
Get path to the folder where th HTML templates are.
[ "Get", "path", "to", "the", "folder", "where", "th", "HTML", "templates", "are", "." ]
python
train
Dallinger/Dallinger
dallinger/heroku/tools.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/heroku/tools.py#L262-L270
def scale_down_dynos(self): """Turn off web and worker dynos, plus clock process if there is one and it's active. """ processes = ["web", "worker"] if self.clock_is_on: processes.append("clock") for process in processes: self.scale_down_dyno(process)
[ "def", "scale_down_dynos", "(", "self", ")", ":", "processes", "=", "[", "\"web\"", ",", "\"worker\"", "]", "if", "self", ".", "clock_is_on", ":", "processes", ".", "append", "(", "\"clock\"", ")", "for", "process", "in", "processes", ":", "self", ".", "...
Turn off web and worker dynos, plus clock process if there is one and it's active.
[ "Turn", "off", "web", "and", "worker", "dynos", "plus", "clock", "process", "if", "there", "is", "one", "and", "it", "s", "active", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin.py#L96-L106
def db_update_record(self, table_name, column, value): """Insert records into DB. Args: table_name (str): The name of the table. column (str): The column name in which the value is to be updated. value (str): The value to update in the column. """ sql = 'UPDATE {} SET {} = \'{}\''.format(table_name, column, value) cur = self.db_conn.cursor() cur.execute(sql)
[ "def", "db_update_record", "(", "self", ",", "table_name", ",", "column", ",", "value", ")", ":", "sql", "=", "'UPDATE {} SET {} = \\'{}\\''", ".", "format", "(", "table_name", ",", "column", ",", "value", ")", "cur", "=", "self", ".", "db_conn", ".", "cur...
Insert records into DB. Args: table_name (str): The name of the table. column (str): The column name in which the value is to be updated. value (str): The value to update in the column.
[ "Insert", "records", "into", "DB", "." ]
python
train
projecthamster/hamster
src/hamster/widgets/facttree.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/widgets/facttree.py#L258-L261
def current_fact_index(self): """Current fact index in the self.facts list.""" facts_ids = [fact.id for fact in self.facts] return facts_ids.index(self.current_fact.id)
[ "def", "current_fact_index", "(", "self", ")", ":", "facts_ids", "=", "[", "fact", ".", "id", "for", "fact", "in", "self", ".", "facts", "]", "return", "facts_ids", ".", "index", "(", "self", ".", "current_fact", ".", "id", ")" ]
Current fact index in the self.facts list.
[ "Current", "fact", "index", "in", "the", "self", ".", "facts", "list", "." ]
python
train
johntruckenbrodt/spatialist
spatialist/raster.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/raster.py#L1037-L1254
def stack(srcfiles, dstfile, resampling, targetres, dstnodata, srcnodata=None, shapefile=None, layernames=None, sortfun=None, separate=False, overwrite=False, compress=True, cores=4): """ function for mosaicking, resampling and stacking of multiple raster files into a 3D data cube Parameters ---------- srcfiles: list a list of file names or a list of lists; each sub-list is treated as a task to mosaic its containing files dstfile: str the destination file or a directory (if `separate` is True) resampling: {near, bilinear, cubic, cubicspline, lanczos, average, mode, max, min, med, Q1, Q3} the resampling method; see `documentation of gdalwarp <https://www.gdal.org/gdalwarp.html>`_. targetres: tuple or list two entries for x and y spatial resolution in units of the source CRS srcnodata: int, float or None the nodata value of the source files; if left at the default (None), the nodata values are read from the files dstnodata: int or float the nodata value of the destination file(s) shapefile: str, Vector or None a shapefile for defining the spatial extent of the destination files layernames: list the names of the output layers; if `None`, the basenames of the input files are used; overrides sortfun sortfun: function a function for sorting the input files; not used if layernames is not None. This is first used for sorting the items in each sub-list of srcfiles; the basename of the first item in a sub-list will then be used as the name for the mosaic of this group. After mosaicing, the function is again used for sorting the names in the final output (only relevant if `separate` is False) separate: bool should the files be written to a single raster stack (ENVI format) or separate files (GTiff format)? overwrite: bool overwrite the file if it already exists? compress: bool compress the geotiff files? cores: int the number of CPU threads to use; this is only relevant if `separate` is True, in which case each mosaicing/resampling job is passed to a different CPU Returns ------- Notes ----- This function does not reproject any raster files. Thus, the CRS must be the same for all input raster files. This is checked prior to executing gdalwarp. In case a shapefile is defined, it is internally reprojected to the raster CRS prior to retrieving its extent. Examples -------- .. code-block:: python from pyroSAR.ancillary import groupbyTime, find_datasets, seconds from spatialist.raster import stack # find pyroSAR files by metadata attributes archive_s1 = '/.../sentinel1/GRD/processed' scenes_s1 = find_datasets(archive_s1, sensor=('S1A', 'S1B'), acquisition_mode='IW') # group images by acquisition time groups = groupbyTime(images=scenes_s1, function=seconds, time=30) # mosaic individual groups and stack the mosaics to a single ENVI file # only files overlapping with the shapefile are selected and resampled to its extent stack(srcfiles=groups, dstfile='stack', resampling='bilinear', targetres=(20, 20), srcnodata=-99, dstnodata=-99, shapefile='site.shp', separate=False) """ # perform some checks on the input data if len(dissolve(srcfiles)) == 0: raise RuntimeError('no input files provided to function raster.stack') if layernames is not None: if len(layernames) != len(srcfiles): raise RuntimeError('mismatch between number of source file groups and layernames') if not isinstance(targetres, (list, tuple)) or len(targetres) != 2: raise RuntimeError('targetres must be a list or tuple with two entries for x and y resolution') if len(srcfiles) == 1 and not isinstance(srcfiles[0], list): raise RuntimeError('only one file specified; nothing to be done') if resampling not in ['near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'average', 'mode', 'max', 'min', 'med', 'Q1', 'Q3']: raise RuntimeError('resampling method not supported') projections = list() for x in dissolve(srcfiles): try: projection = Raster(x).projection except RuntimeError as e: print('cannot read file: {}'.format(x)) raise e projections.append(projection) projections = list(set(projections)) if len(projections) > 1: raise RuntimeError('raster projection mismatch') elif projections[0] == '': raise RuntimeError('could not retrieve the projection from any of the {} input images'.format(len(srcfiles))) else: srs = projections[0] ########################################################################################## # read shapefile bounding coordinates and reduce list of rasters to those overlapping with the shapefile if shapefile is not None: shp = shapefile.clone() if isinstance(shapefile, Vector) else Vector(shapefile) shp.reproject(srs) ext = shp.extent arg_ext = (ext['xmin'], ext['ymin'], ext['xmax'], ext['ymax']) for i, item in enumerate(srcfiles): group = item if isinstance(item, list) else [item] if layernames is None and sortfun is not None: group = sorted(group, key=sortfun) group = [x for x in group if intersect(shp, Raster(x).bbox())] if len(group) > 1: srcfiles[i] = group elif len(group) == 1: srcfiles[i] = group[0] else: srcfiles[i] = None shp.close() srcfiles = list(filter(None, srcfiles)) else: arg_ext = None ########################################################################################## # set general options and parametrization dst_base = os.path.splitext(dstfile)[0] options_warp = {'options': ['-q'], 'format': 'GTiff' if separate else 'ENVI', 'outputBounds': arg_ext, 'multithread': True, 'dstNodata': dstnodata, 'xRes': targetres[0], 'yRes': targetres[1], 'resampleAlg': resampling} if overwrite: options_warp['options'] += ['-overwrite'] if separate and compress: options_warp['options'] += ['-co', 'COMPRESS=DEFLATE', '-co', 'PREDICTOR=2'] options_buildvrt = {'outputBounds': arg_ext} if srcnodata is not None: options_warp['srcNodata'] = srcnodata options_buildvrt['srcNodata'] = srcnodata ########################################################################################## # create VRT files for mosaicing for i, group in enumerate(srcfiles): if isinstance(group, list): if len(group) > 1: base = group[0] # in-memory VRT files cannot be shared between multiple processes on Windows # this has to do with different process forking behaviour # see function spatialist.ancillary.multicore and this link: # https://stackoverflow.com/questions/38236211/why-multiprocessing-process-behave-differently-on-windows-and-linux-for-global-o vrt_base = os.path.splitext(os.path.basename(base))[0] + '.vrt' if platform.system() == 'Windows': vrt = os.path.join(tempfile.gettempdir(), vrt_base) else: vrt = '/vsimem/' + vrt_base gdalbuildvrt(group, vrt, options_buildvrt) srcfiles[i] = vrt else: srcfiles[i] = group[0] else: srcfiles[i] = group ########################################################################################## # define the output band names # if no specific layernames are defined, sort files by custom function if layernames is None and sortfun is not None: srcfiles = sorted(srcfiles, key=sortfun) # use the file basenames without extension as band names if none are defined bandnames = [os.path.splitext(os.path.basename(x))[0] for x in srcfiles] if layernames is None else layernames if len(list(set(bandnames))) != len(bandnames): raise RuntimeError('output bandnames are not unique') ########################################################################################## # create the actual image files if separate: if not os.path.isdir(dstfile): os.makedirs(dstfile) dstfiles = [os.path.join(dstfile, x) + '.tif' for x in bandnames] jobs = [x for x in zip(srcfiles, dstfiles)] if not overwrite: jobs = [x for x in jobs if not os.path.isfile(x[1])] if len(jobs) == 0: print('all target tiff files already exist, nothing to be done') return srcfiles, dstfiles = map(list, zip(*jobs)) multicore(gdalwarp, cores=cores, multiargs={'src': srcfiles, 'dst': dstfiles}, options=options_warp) else: if len(srcfiles) == 1: options_warp['format'] = 'GTiff' if not dstfile.endswith('.tif'): dstfile = os.path.splitext(dstfile)[0] + '.tif' gdalwarp(srcfiles[0], dstfile, options_warp) else: # create VRT for stacking vrt = '/vsimem/' + os.path.basename(dst_base) + '.vrt' options_buildvrt['options'] = ['-separate'] gdalbuildvrt(srcfiles, vrt, options_buildvrt) # warp files gdalwarp(vrt, dstfile, options_warp) # edit ENVI HDR files to contain specific layer names with envi.HDRobject(dstfile + '.hdr') as hdr: hdr.band_names = bandnames hdr.write()
[ "def", "stack", "(", "srcfiles", ",", "dstfile", ",", "resampling", ",", "targetres", ",", "dstnodata", ",", "srcnodata", "=", "None", ",", "shapefile", "=", "None", ",", "layernames", "=", "None", ",", "sortfun", "=", "None", ",", "separate", "=", "Fals...
function for mosaicking, resampling and stacking of multiple raster files into a 3D data cube Parameters ---------- srcfiles: list a list of file names or a list of lists; each sub-list is treated as a task to mosaic its containing files dstfile: str the destination file or a directory (if `separate` is True) resampling: {near, bilinear, cubic, cubicspline, lanczos, average, mode, max, min, med, Q1, Q3} the resampling method; see `documentation of gdalwarp <https://www.gdal.org/gdalwarp.html>`_. targetres: tuple or list two entries for x and y spatial resolution in units of the source CRS srcnodata: int, float or None the nodata value of the source files; if left at the default (None), the nodata values are read from the files dstnodata: int or float the nodata value of the destination file(s) shapefile: str, Vector or None a shapefile for defining the spatial extent of the destination files layernames: list the names of the output layers; if `None`, the basenames of the input files are used; overrides sortfun sortfun: function a function for sorting the input files; not used if layernames is not None. This is first used for sorting the items in each sub-list of srcfiles; the basename of the first item in a sub-list will then be used as the name for the mosaic of this group. After mosaicing, the function is again used for sorting the names in the final output (only relevant if `separate` is False) separate: bool should the files be written to a single raster stack (ENVI format) or separate files (GTiff format)? overwrite: bool overwrite the file if it already exists? compress: bool compress the geotiff files? cores: int the number of CPU threads to use; this is only relevant if `separate` is True, in which case each mosaicing/resampling job is passed to a different CPU Returns ------- Notes ----- This function does not reproject any raster files. Thus, the CRS must be the same for all input raster files. This is checked prior to executing gdalwarp. In case a shapefile is defined, it is internally reprojected to the raster CRS prior to retrieving its extent. Examples -------- .. code-block:: python from pyroSAR.ancillary import groupbyTime, find_datasets, seconds from spatialist.raster import stack # find pyroSAR files by metadata attributes archive_s1 = '/.../sentinel1/GRD/processed' scenes_s1 = find_datasets(archive_s1, sensor=('S1A', 'S1B'), acquisition_mode='IW') # group images by acquisition time groups = groupbyTime(images=scenes_s1, function=seconds, time=30) # mosaic individual groups and stack the mosaics to a single ENVI file # only files overlapping with the shapefile are selected and resampled to its extent stack(srcfiles=groups, dstfile='stack', resampling='bilinear', targetres=(20, 20), srcnodata=-99, dstnodata=-99, shapefile='site.shp', separate=False)
[ "function", "for", "mosaicking", "resampling", "and", "stacking", "of", "multiple", "raster", "files", "into", "a", "3D", "data", "cube" ]
python
train
quantumlib/Cirq
cirq/protocols/resolve_parameters.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/resolve_parameters.py#L65-L99
def resolve_parameters( val: Any, param_resolver: 'cirq.ParamResolverOrSimilarType') -> Any: """Resolves symbol parameters in the effect using the param resolver. This function will use the `_resolve_parameters_` magic method of `val` to resolve any Symbols with concrete values from the given parameter resolver. Args: val: The object to resolve (e.g. the gate, operation, etc) param_resolver: the object to use for resolving all symbols Returns: a gate or operation of the same type, but with all Symbols replaced with floats according to the given ParamResolver. If `val` has no `_resolve_parameters_` method or if it returns NotImplemented, `val` itself is returned. """ if not param_resolver: return val # Ensure its a dictionary wrapped in a ParamResolver. from cirq import ParamResolver # HACK: break cycle. param_resolver = ParamResolver(param_resolver) if isinstance(val, sympy.Basic): return param_resolver.value_of(val) getter = getattr(val, '_resolve_parameters_', None) result = NotImplemented if getter is None else getter(param_resolver) if result is not NotImplemented: return result else: return val
[ "def", "resolve_parameters", "(", "val", ":", "Any", ",", "param_resolver", ":", "'cirq.ParamResolverOrSimilarType'", ")", "->", "Any", ":", "if", "not", "param_resolver", ":", "return", "val", "# Ensure its a dictionary wrapped in a ParamResolver.", "from", "cirq", "im...
Resolves symbol parameters in the effect using the param resolver. This function will use the `_resolve_parameters_` magic method of `val` to resolve any Symbols with concrete values from the given parameter resolver. Args: val: The object to resolve (e.g. the gate, operation, etc) param_resolver: the object to use for resolving all symbols Returns: a gate or operation of the same type, but with all Symbols replaced with floats according to the given ParamResolver. If `val` has no `_resolve_parameters_` method or if it returns NotImplemented, `val` itself is returned.
[ "Resolves", "symbol", "parameters", "in", "the", "effect", "using", "the", "param", "resolver", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L2370-L2426
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000): """ Tries to read an ASCII or Unicode string from the address space of the process. @see: L{read_string} @type lpBaseAddress: int @param lpBaseAddress: Memory address to begin reading. @type fUnicode: bool @param fUnicode: C{True} is the string is expected to be Unicode, C{False} if it's expected to be ANSI. @type dwMaxSize: int @param dwMaxSize: Maximum allowed string length to read, in bytes. @rtype: str, compat.unicode @return: String read from the process memory space. It B{doesn't} include the terminating null character. Returns an empty string on failure. """ # Validate the parameters. if not lpBaseAddress or dwMaxSize == 0: if fUnicode: return u'' return '' if not dwMaxSize: dwMaxSize = 0x1000 # Read the string. szString = self.peek(lpBaseAddress, dwMaxSize) # If the string is Unicode... if fUnicode: # Decode the string. szString = compat.unicode(szString, 'U16', 'replace') ## try: ## szString = compat.unicode(szString, 'U16') ## except UnicodeDecodeError: ## szString = struct.unpack('H' * (len(szString) / 2), szString) ## szString = [ unichr(c) for c in szString ] ## szString = u''.join(szString) # Truncate the string when the first null char is found. szString = szString[ : szString.find(u'\0') ] # If the string is ANSI... else: # Truncate the string when the first null char is found. szString = szString[ : szString.find('\0') ] # Return the decoded string. return szString
[ "def", "peek_string", "(", "self", ",", "lpBaseAddress", ",", "fUnicode", "=", "False", ",", "dwMaxSize", "=", "0x1000", ")", ":", "# Validate the parameters.", "if", "not", "lpBaseAddress", "or", "dwMaxSize", "==", "0", ":", "if", "fUnicode", ":", "return", ...
Tries to read an ASCII or Unicode string from the address space of the process. @see: L{read_string} @type lpBaseAddress: int @param lpBaseAddress: Memory address to begin reading. @type fUnicode: bool @param fUnicode: C{True} is the string is expected to be Unicode, C{False} if it's expected to be ANSI. @type dwMaxSize: int @param dwMaxSize: Maximum allowed string length to read, in bytes. @rtype: str, compat.unicode @return: String read from the process memory space. It B{doesn't} include the terminating null character. Returns an empty string on failure.
[ "Tries", "to", "read", "an", "ASCII", "or", "Unicode", "string", "from", "the", "address", "space", "of", "the", "process", "." ]
python
train
matousc89/padasip
padasip/filters/gngd.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/gngd.py#L158-L175
def adapt(self, d, x): """ Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array) """ y = np.dot(self.w, x) e = d - y self.eps = self.eps - self.ro * self.mu * e * self.last_e * \ np.dot(x, self.last_x) / \ (np.dot(self.last_x, self.last_x) + self.eps)**2 nu = self.mu / (self.eps + np.dot(x, x)) self.w += nu * e * x self.last_e = e
[ "def", "adapt", "(", "self", ",", "d", ",", "x", ")", ":", "y", "=", "np", ".", "dot", "(", "self", ".", "w", ",", "x", ")", "e", "=", "d", "-", "y", "self", ".", "eps", "=", "self", ".", "eps", "-", "self", ".", "ro", "*", "self", ".",...
Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array)
[ "Adapt", "weights", "according", "one", "desired", "value", "and", "its", "input", "." ]
python
train
ffcalculator/fantasydata-python
fantasy_data/FantasyData.py
https://github.com/ffcalculator/fantasydata-python/blob/af90cac1e80d8356cffaa80621ee513201f6c661/fantasy_data/FantasyData.py#L38-L70
def _method_call(self, method, category, **kwargs): """ Call API method. Generate request. Parse response. Process errors `method` str API method url for request. Contains parameters `params` dict parameters for method url """ session = requests.Session() try: response = session.get("http://" + self._api_address) except requests.exceptions.ConnectionError: raise FantasyDataError('Error: Cannot connect to the FantasyData API') method = method.format(format=self._response_format, **kwargs) request_url = "/v3/{game_type}/{category}/{format}/{method}?{get_params}".format( game_type=self.game_type, category=category, format=self._response_format, method=method, get_params=self._get_params) response = session.get(self._api_schema + self._api_address + request_url, headers=self._headers) result = response.json() if isinstance(result, dict) and response.status_code: if response.status_code == 401: raise FantasyDataError('Error: Invalid API key') elif response.status_code == 200: # for NBA everything is ok here. pass else: raise FantasyDataError('Error: Failed to get response') return result
[ "def", "_method_call", "(", "self", ",", "method", ",", "category", ",", "*", "*", "kwargs", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "try", ":", "response", "=", "session", ".", "get", "(", "\"http://\"", "+", "self", ".", "_a...
Call API method. Generate request. Parse response. Process errors `method` str API method url for request. Contains parameters `params` dict parameters for method url
[ "Call", "API", "method", ".", "Generate", "request", ".", "Parse", "response", ".", "Process", "errors", "method", "str", "API", "method", "url", "for", "request", ".", "Contains", "parameters", "params", "dict", "parameters", "for", "method", "url" ]
python
train
hubo1016/vlcp
vlcp/utils/http.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/http.py#L567-L572
def outputjson(self, obj): """ Serialize `obj` with JSON and output to the client """ self.header('Content-Type', 'application/json') self.outputdata(json.dumps(obj).encode('ascii'))
[ "def", "outputjson", "(", "self", ",", "obj", ")", ":", "self", ".", "header", "(", "'Content-Type'", ",", "'application/json'", ")", "self", ".", "outputdata", "(", "json", ".", "dumps", "(", "obj", ")", ".", "encode", "(", "'ascii'", ")", ")" ]
Serialize `obj` with JSON and output to the client
[ "Serialize", "obj", "with", "JSON", "and", "output", "to", "the", "client" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1025-L1032
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidObjectForm._init_map(self, record_types=record_types) self._my_map['rubricId'] = self._rubric_default self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])] self._my_map['levelId'] = self._level_default if self._supports_simple_sequencing(): self._my_map['childIds'] = []
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidObjectForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "self", ".", "_my_map", "[", "'rubric...
Initialize form map
[ "Initialize", "form", "map" ]
python
train
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L156-L167
def strand_unknown(db, transcript): """ for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them """ features = list(db.children(transcript)) strand = features[0].strand if strand == ".": return True else: return False
[ "def", "strand_unknown", "(", "db", ",", "transcript", ")", ":", "features", "=", "list", "(", "db", ".", "children", "(", "transcript", ")", ")", "strand", "=", "features", "[", "0", "]", ".", "strand", "if", "strand", "==", "\".\"", ":", "return", ...
for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them
[ "for", "unstranded", "data", "with", "novel", "transcripts", "single", "exon", "genes", "will", "have", "no", "strand", "information", ".", "single", "exon", "novel", "genes", "are", "also", "a", "source", "of", "noise", "in", "the", "Cufflinks", "assembly", ...
python
train
alejandroautalan/pygubu
pygubudesigner/util/selecttool.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L75-L89
def _finish_selecting(self, event): """Finaliza la seleccion. Marca como seleccionados todos los objetos que se encuentran dentro del recuadro de seleccion.""" self._selecting = False canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) canvas.coords(self._sobject, -1, -1, -1, -1) canvas.itemconfigure(self._sobject, state=tk.HIDDEN) sel_region = self._sstart[0], self._sstart[1], x, y canvas.region_selected = sel_region canvas.event_generate('<<RegionSelected>>')
[ "def", "_finish_selecting", "(", "self", ",", "event", ")", ":", "self", ".", "_selecting", "=", "False", "canvas", "=", "self", ".", "_canvas", "x", "=", "canvas", ".", "canvasx", "(", "event", ".", "x", ")", "y", "=", "canvas", ".", "canvasy", "(",...
Finaliza la seleccion. Marca como seleccionados todos los objetos que se encuentran dentro del recuadro de seleccion.
[ "Finaliza", "la", "seleccion", ".", "Marca", "como", "seleccionados", "todos", "los", "objetos", "que", "se", "encuentran", "dentro", "del", "recuadro", "de", "seleccion", "." ]
python
train
Capitains/MyCapytain
MyCapytain/resources/prototypes/metadata.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/metadata.py#L125-L140
def get_label(self, lang=None): """ Return label for given lang or any default :param lang: Language to request :return: Label value :rtype: Literal """ x = None if lang is None: for obj in self.graph.objects(self.asNode(), RDFS.label): return obj for obj in self.graph.objects(self.asNode(), RDFS.label): x = obj if x.language == lang: return x return x
[ "def", "get_label", "(", "self", ",", "lang", "=", "None", ")", ":", "x", "=", "None", "if", "lang", "is", "None", ":", "for", "obj", "in", "self", ".", "graph", ".", "objects", "(", "self", ".", "asNode", "(", ")", ",", "RDFS", ".", "label", "...
Return label for given lang or any default :param lang: Language to request :return: Label value :rtype: Literal
[ "Return", "label", "for", "given", "lang", "or", "any", "default" ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L5767-L5777
def create(self, create_missing=None): """Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1232855 <https://bugzilla.redhat.com/show_bug.cgi?id=1232855>`_. """ return Realm( self._server_config, id=self.create_json(create_missing)['id'], ).read()
[ "def", "create", "(", "self", ",", "create_missing", "=", "None", ")", ":", "return", "Realm", "(", "self", ".", "_server_config", ",", "id", "=", "self", ".", "create_json", "(", "create_missing", ")", "[", "'id'", "]", ",", ")", ".", "read", "(", "...
Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1232855 <https://bugzilla.redhat.com/show_bug.cgi?id=1232855>`_.
[ "Do", "extra", "work", "to", "fetch", "a", "complete", "set", "of", "attributes", "for", "this", "entity", "." ]
python
train
assemblerflow/flowcraft
flowcraft/generator/engine.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L1589-L1643
def build(self): """Main pipeline builder This method is responsible for building the :py:attr:`NextflowGenerator.template` attribute that will contain the nextflow code of the pipeline. First it builds the header, then sets the main channels, the secondary inputs, secondary channels and finally the status channels. When the pipeline is built, is writes the code to a nextflow file. """ logger.info(colored_print( "\tSuccessfully connected {} process(es) with {} " "fork(s) across {} lane(s) \u2713".format( len(self.processes[1:]), len(self._fork_tree), self.lanes))) # Generate regular nextflow header that sets up the shebang, imports # and all possible initial channels self._build_header() self._set_channels() self._set_init_process() self._set_secondary_channels() logger.info(colored_print( "\tSuccessfully set {} secondary channel(s) \u2713".format( len(self.secondary_channels)))) self._set_compiler_channels() self._set_configurations() logger.info(colored_print( "\tFinished configurations \u2713")) for p in self.processes: self.template += "\n{}".format(p.template_str) self._build_footer() project_root = dirname(self.nf_file) # Write configs self.write_configs(project_root) # Write pipeline file with open(self.nf_file, "w") as fh: fh.write(self.template) logger.info(colored_print( "\tPipeline written into {} \u2713".format(self.nf_file)))
[ "def", "build", "(", "self", ")", ":", "logger", ".", "info", "(", "colored_print", "(", "\"\\tSuccessfully connected {} process(es) with {} \"", "\"fork(s) across {} lane(s) \\u2713\"", ".", "format", "(", "len", "(", "self", ".", "processes", "[", "1", ":", "]", ...
Main pipeline builder This method is responsible for building the :py:attr:`NextflowGenerator.template` attribute that will contain the nextflow code of the pipeline. First it builds the header, then sets the main channels, the secondary inputs, secondary channels and finally the status channels. When the pipeline is built, is writes the code to a nextflow file.
[ "Main", "pipeline", "builder" ]
python
test
Capitains/MyCapytain
MyCapytain/resources/prototypes/metadata.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/metadata.py#L162-L172
def parents(self) -> List["Collection"]: """ Iterator to find parents of current collection, from closest to furthest :rtype: Generator[:class:`Collection`] """ p = self.parent parents = [] while p is not None: parents.append(p) p = p.parent return parents
[ "def", "parents", "(", "self", ")", "->", "List", "[", "\"Collection\"", "]", ":", "p", "=", "self", ".", "parent", "parents", "=", "[", "]", "while", "p", "is", "not", "None", ":", "parents", ".", "append", "(", "p", ")", "p", "=", "p", ".", "...
Iterator to find parents of current collection, from closest to furthest :rtype: Generator[:class:`Collection`]
[ "Iterator", "to", "find", "parents", "of", "current", "collection", "from", "closest", "to", "furthest" ]
python
train
pyviz/holoviews
holoviews/util/parser.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/parser.py#L44-L47
def _strip_commas(cls, kw): "Strip out any leading/training commas from the token" kw = kw[:-1] if kw[-1]==',' else kw return kw[1:] if kw[0]==',' else kw
[ "def", "_strip_commas", "(", "cls", ",", "kw", ")", ":", "kw", "=", "kw", "[", ":", "-", "1", "]", "if", "kw", "[", "-", "1", "]", "==", "','", "else", "kw", "return", "kw", "[", "1", ":", "]", "if", "kw", "[", "0", "]", "==", "','", "els...
Strip out any leading/training commas from the token
[ "Strip", "out", "any", "leading", "/", "training", "commas", "from", "the", "token" ]
python
train
treyhunner/django-simple-history
simple_history/admin.py
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/admin.py#L203-L206
def save_model(self, request, obj, form, change): """Set special model attribute to user for reference after save""" obj._history_user = request.user super(SimpleHistoryAdmin, self).save_model(request, obj, form, change)
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "obj", ".", "_history_user", "=", "request", ".", "user", "super", "(", "SimpleHistoryAdmin", ",", "self", ")", ".", "save_model", "(", "request", ",", ...
Set special model attribute to user for reference after save
[ "Set", "special", "model", "attribute", "to", "user", "for", "reference", "after", "save" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/app_builder.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L140-L165
def is_link_local(link_target): """ :param link_target: The target of a symbolic link, as given by os.readlink() :type link_target: string :returns: A boolean indicating the link is local to the current directory. This is defined to mean that os.path.isabs(link_target) == False and the link NEVER references the parent directory, so "./foo/../../curdir/foo" would return False. :rtype: boolean """ is_local=(not os.path.isabs(link_target)) if is_local: # make sure that the path NEVER extends outside the resources directory! d,l = os.path.split(link_target) link_parts = [] while l: link_parts.append(l) d,l = os.path.split(d) curr_path = os.sep for p in reversed(link_parts): is_local = (is_local and not (curr_path == os.sep and p == os.pardir) ) curr_path = os.path.abspath(os.path.join(curr_path, p)) return is_local
[ "def", "is_link_local", "(", "link_target", ")", ":", "is_local", "=", "(", "not", "os", ".", "path", ".", "isabs", "(", "link_target", ")", ")", "if", "is_local", ":", "# make sure that the path NEVER extends outside the resources directory!", "d", ",", "l", "=",...
:param link_target: The target of a symbolic link, as given by os.readlink() :type link_target: string :returns: A boolean indicating the link is local to the current directory. This is defined to mean that os.path.isabs(link_target) == False and the link NEVER references the parent directory, so "./foo/../../curdir/foo" would return False. :rtype: boolean
[ ":", "param", "link_target", ":", "The", "target", "of", "a", "symbolic", "link", "as", "given", "by", "os", ".", "readlink", "()", ":", "type", "link_target", ":", "string", ":", "returns", ":", "A", "boolean", "indicating", "the", "link", "is", "local"...
python
train
Yelp/yelp-python
yelp/endpoint/business.py
https://github.com/Yelp/yelp-python/blob/12d611bc2344bbc1c93c83775aa71b7b01b36ad6/yelp/endpoint/business.py#L13-L29
def get_by_id(self, business_id, **url_params): """Make a request to the business details endpoint. More info at https://www.yelp.com/developers/documentation/v3/business Args: business_id (str): The business alias (i.e. yelp-san-francisco) or ID (i.e. 4kMBvIEWPxWkWKFN__8SxQ. **url_params: Dict corresponding to business API params https://www.yelp.com/developers/documentation/v3/business Returns: yelp.obj.business.Business object that wraps the response. """ business_path = BUSINESS_PATH.format(business_id=business_id) response = self.client._make_request(business_path, url_params=url_params) return Business(response)
[ "def", "get_by_id", "(", "self", ",", "business_id", ",", "*", "*", "url_params", ")", ":", "business_path", "=", "BUSINESS_PATH", ".", "format", "(", "business_id", "=", "business_id", ")", "response", "=", "self", ".", "client", ".", "_make_request", "(", ...
Make a request to the business details endpoint. More info at https://www.yelp.com/developers/documentation/v3/business Args: business_id (str): The business alias (i.e. yelp-san-francisco) or ID (i.e. 4kMBvIEWPxWkWKFN__8SxQ. **url_params: Dict corresponding to business API params https://www.yelp.com/developers/documentation/v3/business Returns: yelp.obj.business.Business object that wraps the response.
[ "Make", "a", "request", "to", "the", "business", "details", "endpoint", ".", "More", "info", "at", "https", ":", "//", "www", ".", "yelp", ".", "com", "/", "developers", "/", "documentation", "/", "v3", "/", "business" ]
python
train
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3084-L3187
def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs)
[ "def", "eval", "(", "self", ",", "expr", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "eval", "import", "eval", "as", "_eval", "inplace", "=", "validate_bool_kwarg", "(", "inpl...
Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7
[ "Evaluate", "a", "string", "describing", "operations", "on", "DataFrame", "columns", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L1110-L1121
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
[ "def", "post", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", ":", "assert", "wait_for_completion", "is", "True", "# async not supported yet", "cpc_oid", "=", "uri_parms", "[", "0", ...
Operation: Stop CPC (requires DPM mode).
[ "Operation", ":", "Stop", "CPC", "(", "requires", "DPM", "mode", ")", "." ]
python
train
joelfrederico/SciSalt
scisalt/facettools/logbookForm.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/facettools/logbookForm.py#L359-L390
def submitEntry(self): """Process user inputs and subit logbook entry when user clicks Submit button""" # logType = self.logui.logType.currentText() mcclogs, physlogs = self.selectedLogs() success = True if mcclogs != []: if not self.acceptedUser("MCC"): QMessageBox().warning(self, "Invalid User", "Please enter a valid user name!") return fileName = self.xmlSetup("MCC", mcclogs) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "MCC") success = self.sendToLogbook(fileName, "MCC") if physlogs != []: for i in range(len(physlogs)): fileName = self.xmlSetup("Physics", physlogs[i]) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "Physics") success_phys = self.sendToLogbook(fileName, "Physics", physlogs[i]) success = success and success_phys self.done(success)
[ "def", "submitEntry", "(", "self", ")", ":", "# logType = self.logui.logType.currentText()", "mcclogs", ",", "physlogs", "=", "self", ".", "selectedLogs", "(", ")", "success", "=", "True", "if", "mcclogs", "!=", "[", "]", ":", "if", "not", "self", ".", "acce...
Process user inputs and subit logbook entry when user clicks Submit button
[ "Process", "user", "inputs", "and", "subit", "logbook", "entry", "when", "user", "clicks", "Submit", "button" ]
python
valid
pantsbuild/pants
src/python/pants/reporting/json_reporter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/reporting/json_reporter.py#L83-L86
def handle_output(self, workunit, label, stream): """Implementation of Reporter callback.""" self._root_id_to_workunit_stack[str(workunit.root().id)][-1]['outputs'][label] += stream
[ "def", "handle_output", "(", "self", ",", "workunit", ",", "label", ",", "stream", ")", ":", "self", ".", "_root_id_to_workunit_stack", "[", "str", "(", "workunit", ".", "root", "(", ")", ".", "id", ")", "]", "[", "-", "1", "]", "[", "'outputs'", "]"...
Implementation of Reporter callback.
[ "Implementation", "of", "Reporter", "callback", "." ]
python
train
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L746-L795
def shp2array(shp_fn, r_ds=None, res=None, extent=None, t_srs=None): """Rasterize input shapefile to match existing raster Dataset (or specified res/extent/t_srs) """ shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() #This returns xmin, ymin, xmax, ymax shp_extent = lyr_extent(lyr) shp_srs = lyr.GetSpatialRef() # dst_dt = gdal.GDT_Byte ndv = 0 if r_ds is not None: r_extent = ds_extent(r_ds) res = get_res(r_ds, square=True)[0] if extent is None: extent = r_extent r_srs = get_ds_srs(r_ds) r_geom = ds_geom(r_ds) # dst_ns = r_ds.RasterXSize # dst_nl = r_ds.RasterYSize #Convert raster extent to shp_srs cT = osr.CoordinateTransformation(r_srs, shp_srs) r_geom_reproj = geom_dup(r_geom) r_geom_reproj.Transform(cT) r_geom_reproj.AssignSpatialReference(t_srs) lyr.SetSpatialFilter(r_geom_reproj) #lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt)) else: #TODO: clean this up if res is None: sys.exit("Must specify input res") if extent is None: print("Using input shp extent") extent = shp_extent if t_srs is None: t_srs = r_srs if not shp_srs.IsSame(t_srs): print("Input shp srs: %s" % shp_srs.ExportToProj4()) print("Specified output srs: %s" % t_srs.ExportToProj4()) out_ds = lyr_proj(lyr, t_srs) outlyr = out_ds.GetLayer() else: outlyr = lyr #outlyr.SetSpatialFilter(r_geom) m_ds = mem_ds(res, extent, srs=t_srs, dtype=gdal.GDT_Byte) b = m_ds.GetRasterBand(1) b.SetNoDataValue(ndv) gdal.RasterizeLayer(m_ds, [1], outlyr, burn_values=[1]) a = b.ReadAsArray() a = ~(a.astype('Bool')) return a
[ "def", "shp2array", "(", "shp_fn", ",", "r_ds", "=", "None", ",", "res", "=", "None", ",", "extent", "=", "None", ",", "t_srs", "=", "None", ")", ":", "shp_ds", "=", "ogr", ".", "Open", "(", "shp_fn", ")", "lyr", "=", "shp_ds", ".", "GetLayer", "...
Rasterize input shapefile to match existing raster Dataset (or specified res/extent/t_srs)
[ "Rasterize", "input", "shapefile", "to", "match", "existing", "raster", "Dataset", "(", "or", "specified", "res", "/", "extent", "/", "t_srs", ")" ]
python
train
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3806-L3814
def use_comparative_sequence_rule_view(self): """Pass through to provider SequenceRuleLookupSession.use_comparative_sequence_rule_view""" self._object_views['sequence_rule'] = COMPARATIVE # self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_sequence_rule_view() except AttributeError: pass
[ "def", "use_comparative_sequence_rule_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'sequence_rule'", "]", "=", "COMPARATIVE", "# self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self",...
Pass through to provider SequenceRuleLookupSession.use_comparative_sequence_rule_view
[ "Pass", "through", "to", "provider", "SequenceRuleLookupSession", ".", "use_comparative_sequence_rule_view" ]
python
train
quantopian/zipline
zipline/data/bundles/quandl.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L183-L250
def quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): """ quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication """ api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes adjustment_writer.write( splits=parse_splits( raw_data[[ 'sid', 'date', 'split_ratio', ]].loc[raw_data.split_ratio != 1], show_progress=show_progress ), dividends=parse_dividends( raw_data[[ 'sid', 'date', 'ex_dividend', ]].loc[raw_data.ex_dividend != 0], show_progress=show_progress ) )
[ "def", "quandl_bundle", "(", "environ", ",", "asset_db_writer", ",", "minute_bar_writer", ",", "daily_bar_writer", ",", "adjustment_writer", ",", "calendar", ",", "start_session", ",", "end_session", ",", "cache", ",", "show_progress", ",", "output_dir", ")", ":", ...
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication
[ "quandl_bundle", "builds", "a", "daily", "dataset", "using", "Quandl", "s", "WIKI", "Prices", "dataset", "." ]
python
train
LettError/MutatorMath
Lib/mutatorMath/ufo/instance.py
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/instance.py#L390-L418
def save(self): """ Save the UFO.""" # handle glyphs that were muted for name in self.mutedGlyphsNames: if name not in self.font: continue if self.logger: self.logger.info("removing muted glyph %s", name) del self.font[name] # XXX housekeeping: # remove glyph from groups / kerning as well? # remove components referencing this glyph? # fontTools.ufoLib no longer calls os.makedirs for us if the # parent directories of the Font we are saving do not exist. # We want to keep backward compatibility with the previous # MutatorMath behavior, so we create the instance' parent # directories if they do not exist. We assume that the users # knows what they are doing... directory = os.path.dirname(os.path.normpath(self.path)) if directory and not os.path.exists(directory): os.makedirs(directory) try: self.font.save(os.path.abspath(self.path), self.ufoVersion) except defcon.DefconError as error: if self.logger: self.logger.exception("Error generating.") return False, error.report return True, None
[ "def", "save", "(", "self", ")", ":", "# handle glyphs that were muted", "for", "name", "in", "self", ".", "mutedGlyphsNames", ":", "if", "name", "not", "in", "self", ".", "font", ":", "continue", "if", "self", ".", "logger", ":", "self", ".", "logger", ...
Save the UFO.
[ "Save", "the", "UFO", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/mp_util.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/mp_util.py#L237-L246
def dot_mavproxy(name=None): '''return a path to store mavproxy data''' if 'HOME' not in os.environ: dir = os.path.join(os.environ['LOCALAPPDATA'], '.mavproxy') else: dir = os.path.join(os.environ['HOME'], '.mavproxy') mkdir_p(dir) if name is None: return dir return os.path.join(dir, name)
[ "def", "dot_mavproxy", "(", "name", "=", "None", ")", ":", "if", "'HOME'", "not", "in", "os", ".", "environ", ":", "dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'LOCALAPPDATA'", "]", ",", "'.mavproxy'", ")", "else", "...
return a path to store mavproxy data
[ "return", "a", "path", "to", "store", "mavproxy", "data" ]
python
train
osrg/ryu
ryu/services/protocols/bgp/speaker.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/speaker.py#L359-L379
def send_notification(self, code, subcode): """Utility to send notification message. Closes the socket after sending the message. :Parameters: - `socket`: (socket) - socket over which to send notification message. - `code`: (int) - BGP Notification code - `subcode`: (int) - BGP Notification sub-code RFC ref: http://tools.ietf.org/html/rfc4486 http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml """ notification = BGPNotification(code, subcode) reason = notification.reason self._send_with_lock(notification) self._signal_bus.bgp_error(self._peer, code, subcode, reason) if len(self._localname): LOG.error('Sent notification to %r >> %s', self._localname, notification) self._socket.close()
[ "def", "send_notification", "(", "self", ",", "code", ",", "subcode", ")", ":", "notification", "=", "BGPNotification", "(", "code", ",", "subcode", ")", "reason", "=", "notification", ".", "reason", "self", ".", "_send_with_lock", "(", "notification", ")", ...
Utility to send notification message. Closes the socket after sending the message. :Parameters: - `socket`: (socket) - socket over which to send notification message. - `code`: (int) - BGP Notification code - `subcode`: (int) - BGP Notification sub-code RFC ref: http://tools.ietf.org/html/rfc4486 http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
[ "Utility", "to", "send", "notification", "message", "." ]
python
train
dakrauth/django-swingtime
swingtime/views.py
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L43-L88
def event_view( request, pk, template='swingtime/event_detail.html', event_form_class=forms.EventForm, recurrence_form_class=forms.MultipleOccurrenceForm ): ''' View an ``Event`` instance and optionally update either the event or its occurrences. Context parameters: ``event`` the event keyed by ``pk`` ``event_form`` a form object for updating the event ``recurrence_form`` a form object for adding occurrences ''' event = get_object_or_404(Event, pk=pk) event_form = recurrence_form = None if request.method == 'POST': if '_update' in request.POST: event_form = event_form_class(request.POST, instance=event) if event_form.is_valid(): event_form.save(event) return http.HttpResponseRedirect(request.path) elif '_add' in request.POST: recurrence_form = recurrence_form_class(request.POST) if recurrence_form.is_valid(): recurrence_form.save(event) return http.HttpResponseRedirect(request.path) else: return http.HttpResponseBadRequest('Bad Request') data = { 'event': event, 'event_form': event_form or event_form_class(instance=event), 'recurrence_form': recurrence_form or recurrence_form_class( initial={'dtstart': datetime.now()} ) } return render(request, template, data)
[ "def", "event_view", "(", "request", ",", "pk", ",", "template", "=", "'swingtime/event_detail.html'", ",", "event_form_class", "=", "forms", ".", "EventForm", ",", "recurrence_form_class", "=", "forms", ".", "MultipleOccurrenceForm", ")", ":", "event", "=", "get_...
View an ``Event`` instance and optionally update either the event or its occurrences. Context parameters: ``event`` the event keyed by ``pk`` ``event_form`` a form object for updating the event ``recurrence_form`` a form object for adding occurrences
[ "View", "an", "Event", "instance", "and", "optionally", "update", "either", "the", "event", "or", "its", "occurrences", "." ]
python
train
python-rope/rope
rope/base/pycore.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pycore.py#L169-L185
def run_module(self, resource, args=None, stdin=None, stdout=None): """Run `resource` module Returns a `rope.base.oi.doa.PythonFileRunner` object for controlling the process. """ perform_doa = self.project.prefs.get('perform_doi', True) perform_doa = self.project.prefs.get('perform_doa', perform_doa) receiver = self.object_info.doa_data_received if not perform_doa: receiver = None runner = rope.base.oi.doa.PythonFileRunner( self, resource, args, stdin, stdout, receiver) runner.add_finishing_observer(self.module_cache.forget_all_data) runner.run() return runner
[ "def", "run_module", "(", "self", ",", "resource", ",", "args", "=", "None", ",", "stdin", "=", "None", ",", "stdout", "=", "None", ")", ":", "perform_doa", "=", "self", ".", "project", ".", "prefs", ".", "get", "(", "'perform_doi'", ",", "True", ")"...
Run `resource` module Returns a `rope.base.oi.doa.PythonFileRunner` object for controlling the process.
[ "Run", "resource", "module" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/_signing.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/_signing.py#L399-L591
def generate_signed_url_v4( credentials, resource, expiration, api_access_endpoint=DEFAULT_ENDPOINT, method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, _request_timestamp=None, # for testing only ): """Generate a V4 signed URL to provide query-string auth'n to a resource. .. note:: Assumes ``credentials`` implements the :class:`google.auth.credentials.Signing` interface. Also assumes ``credentials`` has a ``service_account_email`` property which identifies the credentials. .. note:: If you are on Google Compute Engine, you can't generate a signed URL. Follow `Issue 922`_ for updates on this. If you'd like to be able to generate a signed URL from GCE, you can use a standard service account from a JSON file rather than a GCE service account. See headers `reference`_ for more details on optional arguments. .. _Issue 922: https://github.com/GoogleCloudPlatform/\ google-cloud-python/issues/922 .. _reference: https://cloud.google.com/storage/docs/reference-headers :type credentials: :class:`google.auth.credentials.Signing` :param credentials: Credentials object with an associated private key to sign text. :type resource: str :param resource: A pointer to a specific resource (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :type api_access_endpoint: str :param api_access_endpoint: Optional URI base. Defaults to "https://storage.googleapis.com/" :type method: str :param method: The HTTP verb that will be used when requesting the URL. Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the signature will additionally contain the `x-goog-resumable` header, and the method changed to POST. See the signed URL docs regarding this flow: https://cloud.google.com/storage/docs/access-control/signed-urls :type content_md5: str :param content_md5: (Optional) The MD5 hash of the object referenced by ``resource``. :type content_type: str :param content_type: (Optional) The content type of the object referenced by ``resource``. :type response_type: str :param response_type: (Optional) Content type of responses to requests for the signed URL. Used to over-ride the content type of the underlying resource. :type response_disposition: str :param response_disposition: (Optional) Content disposition of responses to requests for the signed URL. :type generation: str :param generation: (Optional) A value that indicates which generation of the resource to fetch. :type headers: dict :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :type query_parameters: dict :param query_parameters: (Optional) Additional query paramtersto be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers#query :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: str :returns: A signed URL you can use to access the resource until expiration. """ ensure_signed_credentials(credentials) expiration_seconds = get_expiration_seconds_v4(expiration) if _request_timestamp is None: now = NOW() request_timestamp = now.strftime("%Y%m%dT%H%M%SZ") datestamp = now.date().strftime("%Y%m%d") else: request_timestamp = _request_timestamp datestamp = _request_timestamp[:8] client_email = credentials.signer_email credential_scope = "{}/auto/storage/goog4_request".format(datestamp) credential = "{}/{}".format(client_email, credential_scope) if headers is None: headers = {} if content_type is not None: headers["Content-Type"] = content_type if content_md5 is not None: headers["Content-MD5"] = content_md5 header_names = [key.lower() for key in headers] if "host" not in header_names: headers["Host"] = "storage.googleapis.com" if method.upper() == "RESUMABLE": method = "POST" headers["x-goog-resumable"] = "start" canonical_headers, ordered_headers = get_canonical_headers(headers) canonical_header_string = ( "\n".join(canonical_headers) + "\n" ) # Yes, Virginia, the extra newline is part of the spec. signed_headers = ";".join([key for key, _ in ordered_headers]) if query_parameters is None: query_parameters = {} else: query_parameters = {key: value or "" for key, value in query_parameters.items()} query_parameters["X-Goog-Algorithm"] = "GOOG4-RSA-SHA256" query_parameters["X-Goog-Credential"] = credential query_parameters["X-Goog-Date"] = request_timestamp query_parameters["X-Goog-Expires"] = expiration_seconds query_parameters["X-Goog-SignedHeaders"] = signed_headers if response_type is not None: query_parameters["response-content-type"] = response_type if response_disposition is not None: query_parameters["response-content-disposition"] = response_disposition if generation is not None: query_parameters["generation"] = generation ordered_query_parameters = sorted(query_parameters.items()) canonical_query_string = six.moves.urllib.parse.urlencode(ordered_query_parameters) canonical_elements = [ method, resource, canonical_query_string, canonical_header_string, signed_headers, "UNSIGNED-PAYLOAD", ] canonical_request = "\n".join(canonical_elements) canonical_request_hash = hashlib.sha256( canonical_request.encode("ascii") ).hexdigest() string_elements = [ "GOOG4-RSA-SHA256", request_timestamp, credential_scope, canonical_request_hash, ] string_to_sign = "\n".join(string_elements) signature_bytes = credentials.sign_bytes(string_to_sign.encode("ascii")) signature = binascii.hexlify(signature_bytes).decode("ascii") return "{}{}?{}&X-Goog-Signature={}".format( api_access_endpoint, resource, canonical_query_string, signature )
[ "def", "generate_signed_url_v4", "(", "credentials", ",", "resource", ",", "expiration", ",", "api_access_endpoint", "=", "DEFAULT_ENDPOINT", ",", "method", "=", "\"GET\"", ",", "content_md5", "=", "None", ",", "content_type", "=", "None", ",", "response_type", "=...
Generate a V4 signed URL to provide query-string auth'n to a resource. .. note:: Assumes ``credentials`` implements the :class:`google.auth.credentials.Signing` interface. Also assumes ``credentials`` has a ``service_account_email`` property which identifies the credentials. .. note:: If you are on Google Compute Engine, you can't generate a signed URL. Follow `Issue 922`_ for updates on this. If you'd like to be able to generate a signed URL from GCE, you can use a standard service account from a JSON file rather than a GCE service account. See headers `reference`_ for more details on optional arguments. .. _Issue 922: https://github.com/GoogleCloudPlatform/\ google-cloud-python/issues/922 .. _reference: https://cloud.google.com/storage/docs/reference-headers :type credentials: :class:`google.auth.credentials.Signing` :param credentials: Credentials object with an associated private key to sign text. :type resource: str :param resource: A pointer to a specific resource (typically, ``/bucket-name/path/to/blob.txt``). :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :type api_access_endpoint: str :param api_access_endpoint: Optional URI base. Defaults to "https://storage.googleapis.com/" :type method: str :param method: The HTTP verb that will be used when requesting the URL. Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the signature will additionally contain the `x-goog-resumable` header, and the method changed to POST. See the signed URL docs regarding this flow: https://cloud.google.com/storage/docs/access-control/signed-urls :type content_md5: str :param content_md5: (Optional) The MD5 hash of the object referenced by ``resource``. :type content_type: str :param content_type: (Optional) The content type of the object referenced by ``resource``. :type response_type: str :param response_type: (Optional) Content type of responses to requests for the signed URL. Used to over-ride the content type of the underlying resource. :type response_disposition: str :param response_disposition: (Optional) Content disposition of responses to requests for the signed URL. :type generation: str :param generation: (Optional) A value that indicates which generation of the resource to fetch. :type headers: dict :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :type query_parameters: dict :param query_parameters: (Optional) Additional query paramtersto be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers#query :raises: :exc:`TypeError` when expiration is not a valid type. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: str :returns: A signed URL you can use to access the resource until expiration.
[ "Generate", "a", "V4", "signed", "URL", "to", "provide", "query", "-", "string", "auth", "n", "to", "a", "resource", "." ]
python
train
Devoxin/Lavalink.py
lavalink/PlayerManager.py
https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/PlayerManager.py#L153-L156
async def set_pause(self, pause: bool): """ Sets the player's paused state. """ await self._lavalink.ws.send(op='pause', guildId=self.guild_id, pause=pause) self.paused = pause
[ "async", "def", "set_pause", "(", "self", ",", "pause", ":", "bool", ")", ":", "await", "self", ".", "_lavalink", ".", "ws", ".", "send", "(", "op", "=", "'pause'", ",", "guildId", "=", "self", ".", "guild_id", ",", "pause", "=", "pause", ")", "sel...
Sets the player's paused state.
[ "Sets", "the", "player", "s", "paused", "state", "." ]
python
valid
wickerwaka/russound_rio
russound_rio/rio.py
https://github.com/wickerwaka/russound_rio/blob/e331985fd1544abec6a1da3637090550d6f93f76/russound_rio/rio.py#L360-L369
def get_cached_source_variable(self, source_id, variable, default=None): """ Get the cached value of a source variable. If the variable is not cached return the default value. """ source_id = int(source_id) try: return self._retrieve_cached_source_variable( source_id, variable) except UncachedVariable: return default
[ "def", "get_cached_source_variable", "(", "self", ",", "source_id", ",", "variable", ",", "default", "=", "None", ")", ":", "source_id", "=", "int", "(", "source_id", ")", "try", ":", "return", "self", ".", "_retrieve_cached_source_variable", "(", "source_id", ...
Get the cached value of a source variable. If the variable is not cached return the default value.
[ "Get", "the", "cached", "value", "of", "a", "source", "variable", ".", "If", "the", "variable", "is", "not", "cached", "return", "the", "default", "value", "." ]
python
train
tanghaibao/jcvi
jcvi/utils/taxonomy.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/taxonomy.py#L179-L196
def newick(args): """ %prog newick idslist Query a list of IDs to retrieve phylogeny. """ p = OptionParser(newick.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args mylist = [x.strip() for x in open(idsfile) if x.strip()] print(get_taxids(mylist)) t = TaxIDTree(mylist) print(t)
[ "def", "newick", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "newick", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "n...
%prog newick idslist Query a list of IDs to retrieve phylogeny.
[ "%prog", "newick", "idslist" ]
python
train
aws/sagemaker-containers
src/sagemaker_containers/_files.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_files.py#L140-L157
def s3_download(url, dst): # type: (str, str) -> None """Download a file from S3. Args: url (str): the s3 url of the file. dst (str): the destination where the file will be saved. """ url = parse.urlparse(url) if url.scheme != 's3': raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url)) bucket, key = url.netloc, url.path.lstrip('/') region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV)) s3 = boto3.resource('s3', region_name=region) s3.Bucket(bucket).download_file(key, dst)
[ "def", "s3_download", "(", "url", ",", "dst", ")", ":", "# type: (str, str) -> None", "url", "=", "parse", ".", "urlparse", "(", "url", ")", "if", "url", ".", "scheme", "!=", "'s3'", ":", "raise", "ValueError", "(", "\"Expecting 's3' scheme, got: %s in %s\"", ...
Download a file from S3. Args: url (str): the s3 url of the file. dst (str): the destination where the file will be saved.
[ "Download", "a", "file", "from", "S3", "." ]
python
train
dedupeio/dedupe
dedupe/core.py
https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/core.py#L37-L62
def randomPairs(n_records, sample_size): """ Return random combinations of indices for a square matrix of size n records. For a discussion of how this works see http://stackoverflow.com/a/14839010/98080 """ n = int(n_records * (n_records - 1) / 2) if sample_size >= n: random_pairs = numpy.arange(n, dtype='uint') else: try: random_pairs = numpy.array(random.sample(range(n), sample_size), dtype='uint') except OverflowError: return randomPairsWithReplacement(n_records, sample_size) b = 1 - 2 * n_records root = (-b - 2 * numpy.sqrt(2 * (n - random_pairs) + 0.25)) / 2 i = numpy.floor(root).astype('uint') j = numpy.rint(random_pairs + i * (b + i + 2) / 2 + 1).astype('uint') return zip(i, j)
[ "def", "randomPairs", "(", "n_records", ",", "sample_size", ")", ":", "n", "=", "int", "(", "n_records", "*", "(", "n_records", "-", "1", ")", "/", "2", ")", "if", "sample_size", ">=", "n", ":", "random_pairs", "=", "numpy", ".", "arange", "(", "n", ...
Return random combinations of indices for a square matrix of size n records. For a discussion of how this works see http://stackoverflow.com/a/14839010/98080
[ "Return", "random", "combinations", "of", "indices", "for", "a", "square", "matrix", "of", "size", "n", "records", ".", "For", "a", "discussion", "of", "how", "this", "works", "see", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "1483901...
python
train
celiao/tmdbsimple
tmdbsimple/tv.py
https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/tv.py#L481-L492
def credits(self, **kwargs): """ Get the TV episode credits by combination of season and episode number. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_series_id_season_number_episode_number_path('credits') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "credits", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_series_id_season_number_episode_number_path", "(", "'credits'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set...
Get the TV episode credits by combination of season and episode number. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "the", "TV", "episode", "credits", "by", "combination", "of", "season", "and", "episode", "number", "." ]
python
test
log2timeline/dfvfs
dfvfs/vfs/apfs_container_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/apfs_container_file_entry.py#L16-L40
def _EntriesGenerator(self): """Retrieves directory entries. Since a directory can contain a vast number of entries using a generator is more memory efficient. Yields: APFSContainerPathSpec: a path specification. """ # Only the virtual root file has directory entries. volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex( self.path_spec) if volume_index is not None: return location = getattr(self.path_spec, 'location', None) if location is None or location != self._file_system.LOCATION_ROOT: return fsapfs_container = self._file_system.GetAPFSContainer() for volume_index in range(0, fsapfs_container.number_of_volumes): yield apfs_container_path_spec.APFSContainerPathSpec( location='/apfs{0:d}'.format(volume_index + 1), volume_index=volume_index, parent=self.path_spec.parent)
[ "def", "_EntriesGenerator", "(", "self", ")", ":", "# Only the virtual root file has directory entries.", "volume_index", "=", "apfs_helper", ".", "APFSContainerPathSpecGetVolumeIndex", "(", "self", ".", "path_spec", ")", "if", "volume_index", "is", "not", "None", ":", ...
Retrieves directory entries. Since a directory can contain a vast number of entries using a generator is more memory efficient. Yields: APFSContainerPathSpec: a path specification.
[ "Retrieves", "directory", "entries", "." ]
python
train
synw/dataswim
dataswim/charts/seaborn.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/seaborn.py#L153-L176
def _get_ticks(self, opts): """ Check if xticks and yticks are set """ opts, _ = self._get_opts(opts, None) if "xticks" not in opts: if "xticks" not in self.chart_opts: self.err(self.dlinear_, "Please set the xticks option for this chart to work") return else: xticks = self.chart_opts["xticks"] else: xticks = opts["xticks"] if "yticks" not in opts: if "yticks"not in self.chart_opts: self.err(self.dlinear_, "Please set the yticks option for this chart to work") return else: yticks = self.chart_opts["yticks"] else: yticks = opts["yticks"] return xticks, yticks
[ "def", "_get_ticks", "(", "self", ",", "opts", ")", ":", "opts", ",", "_", "=", "self", ".", "_get_opts", "(", "opts", ",", "None", ")", "if", "\"xticks\"", "not", "in", "opts", ":", "if", "\"xticks\"", "not", "in", "self", ".", "chart_opts", ":", ...
Check if xticks and yticks are set
[ "Check", "if", "xticks", "and", "yticks", "are", "set" ]
python
train
openid/python-openid
openid/extensions/draft/pape5.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape5.py#L172-L184
def fromOpenIDRequest(cls, request): """Instantiate a Request object from the arguments in a C{checkid_*} OpenID message """ self = cls() args = request.message.getArgs(self.ns_uri) is_openid1 = request.message.isOpenID1() if args == {}: return None self.parseExtensionArgs(args, is_openid1) return self
[ "def", "fromOpenIDRequest", "(", "cls", ",", "request", ")", ":", "self", "=", "cls", "(", ")", "args", "=", "request", ".", "message", ".", "getArgs", "(", "self", ".", "ns_uri", ")", "is_openid1", "=", "request", ".", "message", ".", "isOpenID1", "("...
Instantiate a Request object from the arguments in a C{checkid_*} OpenID message
[ "Instantiate", "a", "Request", "object", "from", "the", "arguments", "in", "a", "C", "{", "checkid_", "*", "}", "OpenID", "message" ]
python
train
DBuildService/dockerfile-parse
dockerfile_parse/parser.py
https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L578-L592
def _add_instruction(self, instruction, value): """ :param instruction: instruction name to be added :param value: instruction value """ if (instruction == 'LABEL' or instruction == 'ENV') and len(value) == 2: new_line = instruction + ' ' + '='.join(map(quote, value)) + '\n' else: new_line = '{0} {1}\n'.format(instruction, value) if new_line: lines = self.lines if not lines[len(lines) - 1].endswith('\n'): new_line = '\n' + new_line lines += new_line self.lines = lines
[ "def", "_add_instruction", "(", "self", ",", "instruction", ",", "value", ")", ":", "if", "(", "instruction", "==", "'LABEL'", "or", "instruction", "==", "'ENV'", ")", "and", "len", "(", "value", ")", "==", "2", ":", "new_line", "=", "instruction", "+", ...
:param instruction: instruction name to be added :param value: instruction value
[ ":", "param", "instruction", ":", "instruction", "name", "to", "be", "added", ":", "param", "value", ":", "instruction", "value" ]
python
train
saltstack/salt
salt/modules/xapi_virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xapi_virt.py#L76-L102
def _get_xapi_session(): ''' Get a session to XenAPI. By default, use the local UNIX socket. ''' _xenapi = _check_xenapi() xapi_uri = __salt__['config.option']('xapi.uri') xapi_login = __salt__['config.option']('xapi.login') xapi_password = __salt__['config.option']('xapi.password') if not xapi_uri: # xend local UNIX socket xapi_uri = 'httpu:///var/run/xend/xen-api.sock' if not xapi_login: xapi_login = '' if not xapi_password: xapi_password = '' try: session = _xenapi.Session(xapi_uri) session.xenapi.login_with_password(xapi_login, xapi_password) yield session.xenapi except Exception: raise CommandExecutionError('Failed to connect to XenAPI socket.') finally: session.xenapi.session.logout()
[ "def", "_get_xapi_session", "(", ")", ":", "_xenapi", "=", "_check_xenapi", "(", ")", "xapi_uri", "=", "__salt__", "[", "'config.option'", "]", "(", "'xapi.uri'", ")", "xapi_login", "=", "__salt__", "[", "'config.option'", "]", "(", "'xapi.login'", ")", "xapi_...
Get a session to XenAPI. By default, use the local UNIX socket.
[ "Get", "a", "session", "to", "XenAPI", ".", "By", "default", "use", "the", "local", "UNIX", "socket", "." ]
python
train
guaix-ucm/numina
numina/core/tagexpr.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L44-L47
def map_tree(visitor, tree): """Apply function to nodes""" newn = [map_tree(visitor, node) for node in tree.nodes] return visitor(tree, newn)
[ "def", "map_tree", "(", "visitor", ",", "tree", ")", ":", "newn", "=", "[", "map_tree", "(", "visitor", ",", "node", ")", "for", "node", "in", "tree", ".", "nodes", "]", "return", "visitor", "(", "tree", ",", "newn", ")" ]
Apply function to nodes
[ "Apply", "function", "to", "nodes" ]
python
train
limpyd/redis-limpyd-jobs
limpyd_jobs/models.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/models.py#L276-L283
def get_from_ident(self, ident): """ Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident """ model_repr, job_pk = ident.split(':', 1) klass = import_class(model_repr) return klass.get(job_pk)
[ "def", "get_from_ident", "(", "self", ",", "ident", ")", ":", "model_repr", ",", "job_pk", "=", "ident", ".", "split", "(", "':'", ",", "1", ")", "klass", "=", "import_class", "(", "model_repr", ")", "return", "klass", ".", "get", "(", "job_pk", ")" ]
Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident
[ "Take", "a", "string", "as", "returned", "by", "get_ident", "and", "return", "a", "job", "based", "on", "the", "class", "representation", "and", "the", "job", "s", "pk", "from", "the", "ident" ]
python
train
mandiant/ioc_writer
ioc_writer/ioc_common.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L919-L931
def make_serviceitem_servicedllsignatureexists(dll_sig_exists, condition='is', negate=False): """ Create a node for ServiceItem/serviceDLLSignatureExists :return: A IndicatorItem represented as an Element node """ document = 'ServiceItem' search = 'ServiceItem/serviceDLLSignatureExists' content_type = 'bool' content = dll_sig_exists ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate) return ii_node
[ "def", "make_serviceitem_servicedllsignatureexists", "(", "dll_sig_exists", ",", "condition", "=", "'is'", ",", "negate", "=", "False", ")", ":", "document", "=", "'ServiceItem'", "search", "=", "'ServiceItem/serviceDLLSignatureExists'", "content_type", "=", "'bool'", "...
Create a node for ServiceItem/serviceDLLSignatureExists :return: A IndicatorItem represented as an Element node
[ "Create", "a", "node", "for", "ServiceItem", "/", "serviceDLLSignatureExists", ":", "return", ":", "A", "IndicatorItem", "represented", "as", "an", "Element", "node" ]
python
train
django-admin-tools/django-admin-tools
admin_tools/utils.py
https://github.com/django-admin-tools/django-admin-tools/blob/ba6f46f51ebd84fcf84f2f79ec9487f45452d79b/admin_tools/utils.py#L166-L173
def _get_admin_change_url(self, model, context): """ Returns the admin change url. """ app_label = model._meta.app_label return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context), app_label, model.__name__.lower()))
[ "def", "_get_admin_change_url", "(", "self", ",", "model", ",", "context", ")", ":", "app_label", "=", "model", ".", "_meta", ".", "app_label", "return", "reverse", "(", "'%s:%s_%s_changelist'", "%", "(", "get_admin_site_name", "(", "context", ")", ",", "app_l...
Returns the admin change url.
[ "Returns", "the", "admin", "change", "url", "." ]
python
train
google/brotli
research/brotlidump.py
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1578-L1615
def processStream(self): """Process a brotli stream. """ print('addr hex{:{}s}binary context explanation'.format( '', self.width-10)) print('Stream header'.center(60, '-')) self.windowSize = self.verboseRead(WindowSizeAlphabet()) print('Metablock header'.center(60, '=')) self.ISLAST = False self.output = bytearray() while not self.ISLAST: self.ISLAST = self.verboseRead( BoolCode('LAST', description="Last block")) if self.ISLAST: if self.verboseRead( BoolCode('EMPTY', description="Empty block")): break if self.metablockLength(): continue if not self.ISLAST and self.uncompressed(): continue print('Block type descriptors'.center(60, '-')) self.numberOfBlockTypes = {} self.currentBlockCounts = {} self.blockTypeCodes = {} self.blockCountCodes = {} for blockType in (L,I,D): self.blockType(blockType) print('Distance code parameters'.center(60, '-')) self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet()) self.readLiteralContextModes() print('Context maps'.center(60, '-')) self.cmaps = {} #keep the number of each kind of prefix tree for the last loop numberOfTrees = {I: self.numberOfBlockTypes[I]} for blockType in (L,D): numberOfTrees[blockType] = self.contextMap(blockType) print('Prefix code lists'.center(60, '-')) self.prefixCodes = {} for blockType in (L,I,D): self.readPrefixArray(blockType, numberOfTrees[blockType]) self.metablock()
[ "def", "processStream", "(", "self", ")", ":", "print", "(", "'addr hex{:{}s}binary context explanation'", ".", "format", "(", "''", ",", "self", ".", "width", "-", "10", ")", ")", "print", "(", "'Stream header'", ".", "center", "(", "60", ",", "'-'", ")"...
Process a brotli stream.
[ "Process", "a", "brotli", "stream", "." ]
python
test
brocade/pynos
pynos/device.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/device.py#L251-L269
def firmware_version(self): """ Returns firmware version. Args: None Returns: Dictionary Raises: None """ namespace = "urn:brocade.com:mgmt:brocade-firmware-ext" request_ver = ET.Element("show-firmware-version", xmlns=namespace) ver = self._callback(request_ver, handler='get') return ver.find('.//*{%s}os-version' % namespace).text
[ "def", "firmware_version", "(", "self", ")", ":", "namespace", "=", "\"urn:brocade.com:mgmt:brocade-firmware-ext\"", "request_ver", "=", "ET", ".", "Element", "(", "\"show-firmware-version\"", ",", "xmlns", "=", "namespace", ")", "ver", "=", "self", ".", "_callback"...
Returns firmware version. Args: None Returns: Dictionary Raises: None
[ "Returns", "firmware", "version", "." ]
python
train
sveetch/boussole
boussole/inspector.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/inspector.py#L181-L200
def children(self, sourcepath, recursive=True): """ Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path. """ return self._get_recursive_dependancies( self._CHILDREN_MAP, sourcepath, recursive=True )
[ "def", "children", "(", "self", ",", "sourcepath", ",", "recursive", "=", "True", ")", ":", "return", "self", ".", "_get_recursive_dependancies", "(", "self", ".", "_CHILDREN_MAP", ",", "sourcepath", ",", "recursive", "=", "True", ")" ]
Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path.
[ "Recursively", "find", "all", "children", "that", "are", "imported", "from", "the", "given", "source", "path", "." ]
python
train
licenses/lice
lice/core.py
https://github.com/licenses/lice/blob/71635c2544d5edf9e93af4141467763916a86624/lice/core.py#L93-L100
def clean_path(p): """ Clean a path by expanding user and environment variables and ensuring absolute path. """ p = os.path.expanduser(p) p = os.path.expandvars(p) p = os.path.abspath(p) return p
[ "def", "clean_path", "(", "p", ")", ":", "p", "=", "os", ".", "path", ".", "expanduser", "(", "p", ")", "p", "=", "os", ".", "path", ".", "expandvars", "(", "p", ")", "p", "=", "os", ".", "path", ".", "abspath", "(", "p", ")", "return", "p" ]
Clean a path by expanding user and environment variables and ensuring absolute path.
[ "Clean", "a", "path", "by", "expanding", "user", "and", "environment", "variables", "and", "ensuring", "absolute", "path", "." ]
python
train
openstax/cnx-archive
cnxarchive/views/sitemap.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/sitemap.py#L39-L47
def notblocked(page): """Determine if given url is a page that should be in sitemap.""" for blocked in PAGES_TO_BLOCK: if blocked[0] != '*': blocked = '*' + blocked rx = re.compile(blocked.replace('*', '[^$]*')) if rx.match(page): return False return True
[ "def", "notblocked", "(", "page", ")", ":", "for", "blocked", "in", "PAGES_TO_BLOCK", ":", "if", "blocked", "[", "0", "]", "!=", "'*'", ":", "blocked", "=", "'*'", "+", "blocked", "rx", "=", "re", ".", "compile", "(", "blocked", ".", "replace", "(", ...
Determine if given url is a page that should be in sitemap.
[ "Determine", "if", "given", "url", "is", "a", "page", "that", "should", "be", "in", "sitemap", "." ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L1468-L1478
def update(self, obj, size): '''Update this profile. ''' self.number += 1 self.total += size if self.high < size: # largest self.high = size try: # prefer using weak ref self.objref, self.weak = Weakref.ref(obj), True except TypeError: self.objref, self.weak = obj, False
[ "def", "update", "(", "self", ",", "obj", ",", "size", ")", ":", "self", ".", "number", "+=", "1", "self", ".", "total", "+=", "size", "if", "self", ".", "high", "<", "size", ":", "# largest", "self", ".", "high", "=", "size", "try", ":", "# pref...
Update this profile.
[ "Update", "this", "profile", "." ]
python
train
dade-ai/snipy
snipy/io/fileutil.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L348-L359
def walkfolder(toppath, pred): """ walk folder if pred(foldername) is True :type toppath: str :type pred: function(str) => bool """ for entry in scandir.scandir(toppath): if not entry.is_dir() or not pred(entry.name): continue yield entry.path for p in walkfolder(entry.path, pred): yield p
[ "def", "walkfolder", "(", "toppath", ",", "pred", ")", ":", "for", "entry", "in", "scandir", ".", "scandir", "(", "toppath", ")", ":", "if", "not", "entry", ".", "is_dir", "(", ")", "or", "not", "pred", "(", "entry", ".", "name", ")", ":", "continu...
walk folder if pred(foldername) is True :type toppath: str :type pred: function(str) => bool
[ "walk", "folder", "if", "pred", "(", "foldername", ")", "is", "True", ":", "type", "toppath", ":", "str", ":", "type", "pred", ":", "function", "(", "str", ")", "=", ">", "bool" ]
python
valid
samuelcolvin/arq
arq/worker.py
https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L213-L225
async def run_check(self) -> int: """ Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs` if any jobs have failed. :return: number of completed jobs """ await self.async_run() if self.jobs_failed: failed_job_results = [r for r in await self.pool.all_job_results() if not r.success] raise FailedJobs(self.jobs_failed, failed_job_results) else: return self.jobs_complete
[ "async", "def", "run_check", "(", "self", ")", "->", "int", ":", "await", "self", ".", "async_run", "(", ")", "if", "self", ".", "jobs_failed", ":", "failed_job_results", "=", "[", "r", "for", "r", "in", "await", "self", ".", "pool", ".", "all_job_resu...
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs` if any jobs have failed. :return: number of completed jobs
[ "Run", ":", "func", ":", "arq", ".", "worker", ".", "Worker", ".", "async_run", "check", "for", "failed", "jobs", "and", "raise", ":", "class", ":", "arq", ".", "worker", ".", "FailedJobs", "if", "any", "jobs", "have", "failed", "." ]
python
train
captin411/ofxclient
ofxclient/institution.py
https://github.com/captin411/ofxclient/blob/4da2719f0ecbbf5eee62fb82c1b3b34ec955ee5e/ofxclient/institution.py#L134-L151
def accounts(self): """Ask the bank for the known :py:class:`ofxclient.Account` list. :rtype: list of :py:class:`ofxclient.Account` objects """ from ofxclient.account import Account client = self.client() query = client.account_list_query() resp = client.post(query) resp_handle = StringIO(resp) if IS_PYTHON_2: parsed = OfxParser.parse(resp_handle) else: parsed = OfxParser.parse(BytesIO(resp_handle.read().encode())) return [Account.from_ofxparse(a, institution=self) for a in parsed.accounts]
[ "def", "accounts", "(", "self", ")", ":", "from", "ofxclient", ".", "account", "import", "Account", "client", "=", "self", ".", "client", "(", ")", "query", "=", "client", ".", "account_list_query", "(", ")", "resp", "=", "client", ".", "post", "(", "q...
Ask the bank for the known :py:class:`ofxclient.Account` list. :rtype: list of :py:class:`ofxclient.Account` objects
[ "Ask", "the", "bank", "for", "the", "known", ":", "py", ":", "class", ":", "ofxclient", ".", "Account", "list", "." ]
python
train
tensorpack/tensorpack
examples/basics/export-model.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/basics/export-model.py#L106-L113
def export_serving(model_path): """Export trained model to use it in TensorFlow Serving or cloudML. """ pred_config = PredictConfig( session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) ModelExporter(pred_config).export_serving('/tmp/exported')
[ "def", "export_serving", "(", "model_path", ")", ":", "pred_config", "=", "PredictConfig", "(", "session_init", "=", "get_model_loader", "(", "model_path", ")", ",", "model", "=", "InferenceOnlyModel", "(", ")", ",", "input_names", "=", "[", "'input_img_bytes'", ...
Export trained model to use it in TensorFlow Serving or cloudML.
[ "Export", "trained", "model", "to", "use", "it", "in", "TensorFlow", "Serving", "or", "cloudML", "." ]
python
train
tornadoweb/tornado
tornado/auth.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/auth.py#L468-L490
async def _oauth_get_user_future( self, access_token: Dict[str, Any] ) -> Dict[str, Any]: """Subclasses must override this to get basic information about the user. Should be a coroutine whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. .. versionchanged:: 5.1 Subclasses may also define this method with ``async def``. .. versionchanged:: 6.0 A synchronous fallback to ``_oauth_get_user`` was removed. """ raise NotImplementedError()
[ "async", "def", "_oauth_get_user_future", "(", "self", ",", "access_token", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "raise", "NotImplementedError", "(", ")" ]
Subclasses must override this to get basic information about the user. Should be a coroutine whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. .. versionchanged:: 5.1 Subclasses may also define this method with ``async def``. .. versionchanged:: 6.0 A synchronous fallback to ``_oauth_get_user`` was removed.
[ "Subclasses", "must", "override", "this", "to", "get", "basic", "information", "about", "the", "user", "." ]
python
train
Alignak-monitoring/alignak
alignak/downtime.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/downtime.py#L379-L394
def get_raise_brok(self, host_name, service_name=''): """Get a start downtime brok :param host_name: host concerned by the downtime :type host_name :param service_name: service concerned by the downtime :type service_name :return: brok with wanted data :rtype: alignak.brok.Brok """ data = self.serialize() data['host'] = host_name if service_name != '': data['service'] = service_name return Brok({'type': 'downtime_raise', 'data': data})
[ "def", "get_raise_brok", "(", "self", ",", "host_name", ",", "service_name", "=", "''", ")", ":", "data", "=", "self", ".", "serialize", "(", ")", "data", "[", "'host'", "]", "=", "host_name", "if", "service_name", "!=", "''", ":", "data", "[", "'servi...
Get a start downtime brok :param host_name: host concerned by the downtime :type host_name :param service_name: service concerned by the downtime :type service_name :return: brok with wanted data :rtype: alignak.brok.Brok
[ "Get", "a", "start", "downtime", "brok" ]
python
train
dwavesystems/dwave_networkx
dwave_networkx/drawing/chimera_layout.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/drawing/chimera_layout.py#L295-L336
def draw_chimera_yield(G, **kwargs): """Draws the given graph G with highlighted faults, according to layout. Parameters ---------- G : NetworkX graph The graph to be parsed for faults unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not faults. If unused_color is None, these nodes and edges will not be shown at all. fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0)) A color to represent nodes absent from the graph G. Colors should be length-4 tuples of floats between 0 and 1 inclusive. fault_shape : string, optional (default='x') The shape of the fault nodes. Specification is as matplotlib.scatter marker, one of 'so^>v<dph8'. fault_style : string, optional (default='dashed') Edge fault line style (solid|dashed|dotted,dashdot) kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ try: assert(G.graph["family"] == "chimera") m = G.graph["columns"] n = G.graph["rows"] t = G.graph["tile"] coordinates = G.graph["labels"] == "coordinate" except: raise ValueError("Target chimera graph needs to have columns, rows, \ tile, and label attributes to be able to identify faulty qubits.") perfect_graph = chimera_graph(m,n,t, coordinates=coordinates) draw_yield(G, chimera_layout(perfect_graph), perfect_graph, **kwargs)
[ "def", "draw_chimera_yield", "(", "G", ",", "*", "*", "kwargs", ")", ":", "try", ":", "assert", "(", "G", ".", "graph", "[", "\"family\"", "]", "==", "\"chimera\"", ")", "m", "=", "G", ".", "graph", "[", "\"columns\"", "]", "n", "=", "G", ".", "g...
Draws the given graph G with highlighted faults, according to layout. Parameters ---------- G : NetworkX graph The graph to be parsed for faults unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not faults. If unused_color is None, these nodes and edges will not be shown at all. fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0)) A color to represent nodes absent from the graph G. Colors should be length-4 tuples of floats between 0 and 1 inclusive. fault_shape : string, optional (default='x') The shape of the fault nodes. Specification is as matplotlib.scatter marker, one of 'so^>v<dph8'. fault_style : string, optional (default='dashed') Edge fault line style (solid|dashed|dotted,dashdot) kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored.
[ "Draws", "the", "given", "graph", "G", "with", "highlighted", "faults", "according", "to", "layout", "." ]
python
train
log2timeline/plaso
plaso/formatters/manager.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/manager.py#L37-L64
def GetFormatterObject(cls, data_type): """Retrieves the formatter object for a specific data type. Args: data_type (str): data type. Returns: EventFormatter: corresponding formatter or the default formatter if not available. """ data_type = data_type.lower() if data_type not in cls._formatter_objects: formatter_object = None if data_type in cls._formatter_classes: formatter_class = cls._formatter_classes[data_type] # TODO: remove the need to instantiate the Formatter classes # and use class methods only. formatter_object = formatter_class() if not formatter_object: logger.warning( 'Using default formatter for data type: {0:s}'.format(data_type)) formatter_object = default.DefaultFormatter() cls._formatter_objects[data_type] = formatter_object return cls._formatter_objects[data_type]
[ "def", "GetFormatterObject", "(", "cls", ",", "data_type", ")", ":", "data_type", "=", "data_type", ".", "lower", "(", ")", "if", "data_type", "not", "in", "cls", ".", "_formatter_objects", ":", "formatter_object", "=", "None", "if", "data_type", "in", "cls"...
Retrieves the formatter object for a specific data type. Args: data_type (str): data type. Returns: EventFormatter: corresponding formatter or the default formatter if not available.
[ "Retrieves", "the", "formatter", "object", "for", "a", "specific", "data", "type", "." ]
python
train
singularityhub/sregistry-cli
sregistry/main/s3/__init__.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/s3/__init__.py#L118-L141
def _update_secrets(self, base=None): '''update secrets will update/get the base for the server, along with the bucket name, defaulting to sregistry. ''' # We are required to have a base, either from environment or terminal self.base = self._get_and_update_setting('SREGISTRY_S3_BASE', self.base) self._id = self._required_get_and_update('AWS_ACCESS_KEY_ID') self._key = self._required_get_and_update('AWS_SECRET_ACCESS_KEY') # Get the desired S3 signature. Default is the current "s3v4" signature. # If specified, user can request "s3" (v2 old) signature self._signature = self._get_and_update_setting('SREGISTRY_S3_SIGNATURE') if self._signature == 's3': # Requested signature is S3 V2 self._signature = 's3' else: # self._signature is not set or not set to s3 (v2), default to s3v4 self._signature = 's3v4' # Define self.bucket_name, self.s3, then self.bucket self.get_bucket_name() self.get_resource() self.get_bucket()
[ "def", "_update_secrets", "(", "self", ",", "base", "=", "None", ")", ":", "# We are required to have a base, either from environment or terminal", "self", ".", "base", "=", "self", ".", "_get_and_update_setting", "(", "'SREGISTRY_S3_BASE'", ",", "self", ".", "base", ...
update secrets will update/get the base for the server, along with the bucket name, defaulting to sregistry.
[ "update", "secrets", "will", "update", "/", "get", "the", "base", "for", "the", "server", "along", "with", "the", "bucket", "name", "defaulting", "to", "sregistry", "." ]
python
test
muckamuck/stackility
stackility/command.py
https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L81-L102
def delete(stack, region, profile): """ Delete the given CloudFormation stack. """ ini_data = {} environment = {} environment['stack_name'] = stack if region: environment['region'] = region else: environment['region'] = find_myself() if profile: environment['profile'] = profile ini_data['environment'] = environment if start_smash(ini_data): sys.exit(0) else: sys.exit(1)
[ "def", "delete", "(", "stack", ",", "region", ",", "profile", ")", ":", "ini_data", "=", "{", "}", "environment", "=", "{", "}", "environment", "[", "'stack_name'", "]", "=", "stack", "if", "region", ":", "environment", "[", "'region'", "]", "=", "regi...
Delete the given CloudFormation stack.
[ "Delete", "the", "given", "CloudFormation", "stack", "." ]
python
train
Dentosal/python-sc2
sc2/game_data.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/game_data.py#L222-L234
def cost_zerg_corrected(self) -> "Cost": """ This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """ if self.race == Race.Zerg and Attribute.Structure.value in self.attributes: # a = self._game_data.units(UnitTypeId.ZERGLING) # print(a) # print(vars(a)) return Cost( self._proto.mineral_cost - 50, self._proto.vespene_cost, self._proto.build_time ) else: return self.cost
[ "def", "cost_zerg_corrected", "(", "self", ")", "->", "\"Cost\"", ":", "if", "self", ".", "race", "==", "Race", ".", "Zerg", "and", "Attribute", ".", "Structure", ".", "value", "in", "self", ".", "attributes", ":", "# a = self._game_data.units(UnitTypeId.ZERGLIN...
This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively
[ "This", "returns", "25", "for", "extractor", "and", "200", "for", "spawning", "pool", "instead", "of", "75", "and", "250", "respectively" ]
python
train
Bogdanp/anom-py
anom/conditions.py
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/conditions.py#L36-L38
def is_false(entity, prop, name): "bool: True if the value of a property is False." return is_not_empty(entity, prop, name) and name in entity._data and not bool(getattr(entity, name))
[ "def", "is_false", "(", "entity", ",", "prop", ",", "name", ")", ":", "return", "is_not_empty", "(", "entity", ",", "prop", ",", "name", ")", "and", "name", "in", "entity", ".", "_data", "and", "not", "bool", "(", "getattr", "(", "entity", ",", "name...
bool: True if the value of a property is False.
[ "bool", ":", "True", "if", "the", "value", "of", "a", "property", "is", "False", "." ]
python
train
swimlane/swimlane-python
swimlane/core/cache.py
https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/cache.py#L96-L103
def clear(self, *resource_types): """Clear cache for each provided APIResource class, or all resources if no classes are provided""" resource_types = resource_types or tuple(self.__caches.keys()) for cls in resource_types: # Clear and delete cache instances to guarantee no lingering references self.__caches[cls].clear() del self.__caches[cls]
[ "def", "clear", "(", "self", ",", "*", "resource_types", ")", ":", "resource_types", "=", "resource_types", "or", "tuple", "(", "self", ".", "__caches", ".", "keys", "(", ")", ")", "for", "cls", "in", "resource_types", ":", "# Clear and delete cache instances ...
Clear cache for each provided APIResource class, or all resources if no classes are provided
[ "Clear", "cache", "for", "each", "provided", "APIResource", "class", "or", "all", "resources", "if", "no", "classes", "are", "provided" ]
python
train
quantmind/dynts
dynts/api/timeseries.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/api/timeseries.py#L150-L155
def dates(self, desc=None): '''Returns an iterable over ``datetime.date`` instances in the timeseries.''' c = self.dateinverse for key in self.keys(desc=desc): yield c(key)
[ "def", "dates", "(", "self", ",", "desc", "=", "None", ")", ":", "c", "=", "self", ".", "dateinverse", "for", "key", "in", "self", ".", "keys", "(", "desc", "=", "desc", ")", ":", "yield", "c", "(", "key", ")" ]
Returns an iterable over ``datetime.date`` instances in the timeseries.
[ "Returns", "an", "iterable", "over", "datetime", ".", "date", "instances", "in", "the", "timeseries", "." ]
python
train
MolSSI-BSE/basis_set_exchange
basis_set_exchange/printing.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/printing.py#L90-L105
def ecp_pot_str(pot): '''Return a string representing the data for an ECP potential ''' am = pot['angular_momentum'] amchar = lut.amint_to_char(am) rexponents = pot['r_exponents'] gexponents = pot['gaussian_exponents'] coefficients = pot['coefficients'] point_places = [0, 10, 33] s = 'Potential: {} potential\n'.format(amchar) s += 'Type: {}\n'.format(pot['ecp_type']) s += write_matrix([rexponents, gexponents, *coefficients], point_places) return s
[ "def", "ecp_pot_str", "(", "pot", ")", ":", "am", "=", "pot", "[", "'angular_momentum'", "]", "amchar", "=", "lut", ".", "amint_to_char", "(", "am", ")", "rexponents", "=", "pot", "[", "'r_exponents'", "]", "gexponents", "=", "pot", "[", "'gaussian_exponen...
Return a string representing the data for an ECP potential
[ "Return", "a", "string", "representing", "the", "data", "for", "an", "ECP", "potential" ]
python
train
HEPData/hepdata-converter
hepdata_converter/parsers/oldhepdata_parser.py
https://github.com/HEPData/hepdata-converter/blob/354271448980efba86f2f3d27b99d818e75fd90d/hepdata_converter/parsers/oldhepdata_parser.py#L204-L406
def _parse_table_data(self, data): """Parse dataset data of the original HEPData format :param data: header of the table to be parsed :raise ValueError: """ header = data.split(':') self.current_table.data_header = header for i, h in enumerate(header): header[i] = h.strip() x_count = header.count('x') y_count = header.count('y') if not self.current_table.xheaders: raise BadFormat("*xheader line needs to appear before *data: %s" % data) if not self.current_table.yheaders: raise BadFormat("*yheader line needs to appear before *data: %s" % data) # use deepcopy to avoid references in yaml... may not be required, and will very probably be refactored # TODO - is this appropriate behavior, or are references in YAML files acceptable, they are certainly less human readable self.current_table.data = {'independent_variables': [{'header': self.current_table.xheaders[i] if i < len(self.current_table.xheaders) else copy.deepcopy(self.current_table.xheaders[-1]), 'values': []} for i in range(x_count)], 'dependent_variables': [{'header': self.current_table.yheaders[i] if i < len(self.current_table.yheaders) else copy.deepcopy(self.current_table.yheaders[-1]), 'qualifiers': [self.current_table.qualifiers[j][i] if i < len(self.current_table.qualifiers[j]) else copy.deepcopy(self.current_table.qualifiers[j][-1]) for j in range(len(self.current_table.qualifiers)) ], 'values': []} for i in range(y_count)]} xy_mapping = [] current_x_count = 0 current_y_count = 0 for h in header: if h == 'x': xy_mapping.append(current_x_count) current_x_count += 1 if h == 'y': xy_mapping.append(current_y_count) current_y_count += 1 last_index = self.current_file.tell() line = self._strip_comments(self.current_file.readline()) while line and not line.startswith('*'): data_entry_elements = line.split(';')[:-1] # split and also strip newline character at the end if len(data_entry_elements) == len(header): # this is kind of a big stretch... I assume that x is always first for i, h in enumerate(header): single_element = data_entry_elements[i].strip() # number patterns copied from old subs.pl parsing script pmnum1 = '[-+]?[\d]+\.?[\d]*' pmnum2 = '[-+]?\.[\d]+' pmnum3 = '[-+]?[\d]+\.?[\d]*\s*[eE]+\s*[+-]?\s*[\d]+' pmnum = '(' + pmnum1 + '|' + pmnum2 + '|' + pmnum3 + ')' # implement same regular expression matching as in old subs.pl parsing script if h == 'x': # independent variables r = re.search('^(?P<value>' + pmnum + ')$', single_element) if r: # "value" single_element = {'value': r.group('value')} else: r = re.search('^(?P<value>' + pmnum + ')\s*\(\s*BIN\s*=\s*(?P<low>' + pmnum + \ ')\s+TO\s+(?P<high>' + pmnum + ')\s*\)$', single_element) if r: # "value (BIN=low TO high)" single_element = {'value': float(r.group('value')), 'low': float(r.group('low')), 'high': float(r.group('high'))} else: r = re.search('^(?P<low>' + pmnum + ')\s+TO\s+(?P<high>' + pmnum + ')$', single_element) if r: # "low TO high" single_element = {'low': float(r.group('low')), 'high': float(r.group('high'))} else: # everything else: don't try to convert to float single_element = {'value': single_element} # TO DO: subs.pl also parses other formats such as "low high", "value low high" (sorted), # "value +- err", and "value -err_m, +err_p". Do we need to support these formats here? # Probably not: unsupported formats will just be written as a text string. self.current_table.data['independent_variables'][xy_mapping[i]]['values'].append(single_element) # extract energy if SQRT(S) is one of the 'x' variables xheader = self.current_table.data['independent_variables'][xy_mapping[i]]['header'] if xheader['name'].startswith('SQRT(S)') and lower(xheader['units']) in ('gev'): for energy in single_element.values(): try: energy = float(energy) self.set_of_energies.add(energy) except: pass elif h == 'y': # dependent variable pmnum_pct = pmnum + '(\s*PCT)?' # errors can possibly be given as percentages r = re.search('^(?P<value>' + pmnum + ')\s+(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' + pmnum_pct + '|-)\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element) element = {'errors': []} if r: # asymmetric first error element['value'] = r.group('value').strip() err_p = r.group('err_p').strip().lstrip('+') if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p err_m = r.group('err_m').strip().lstrip('+') if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%': err_p = err_p + '%' if not err_p and not err_m: raise ValueError("Both asymmetric errors cannot be '-': %s" % line) if r.group('err_sys'): element['errors'] += [{'label': 'stat', 'asymerror': {'plus': err_p, 'minus': err_m}}] else: element['errors'] += [{'asymerror': {'plus': err_p, 'minus': err_m}}] else: r = re.search('^(?P<value>' + pmnum + ')\s*(\+-\s*(?P<error>' + pmnum_pct + '))?\s*(?P<err_sys>\(\s*DSYS=[^()]+\s*\))?$', single_element) if r: # symmetric first error element['value'] = r.group('value').strip() if r.group('error'): error = r.group('error').strip().lstrip('+') error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error if r.group('err_sys'): element['errors'] += [{'label': 'stat', 'symerror': error}] else: element['errors'] += [{'symerror': error}] else: # everything else element['value'] = single_element err_sys = [] if r and r.group('err_sys'): err_sys = r.group('err_sys').strip(' \t()').split('DSYS=') for err in err_sys + self.current_table.dserrors: err = err.strip(' \t,') if not err: continue error = {} label = 'sys' r = re.search('^(\+-)?\s*(?P<error>' + pmnum_pct + ')\s*(\:\s*(?P<label>.+))?$', err) if r: # symmetric systematic error if r.group('label'): label += ',' + r.group('label') error = r.group('error').strip().lstrip('+') error = error[:-3].strip() + '%' if error[-3:] == 'PCT' else error error = {'symerror': error} else: r = re.search('^(?P<err_p>' + pmnum_pct + '|-)\s*,\s*(?P<err_m>' + pmnum_pct + '|-)\s*(\:\s*(?P<label>.+))?$', err) if r: # asymmetric systematic error if r.group('label'): label += ',' + r.group('label') err_p = r.group('err_p').strip().lstrip('+') if err_p == '-': err_p = '' # represent missing error as '-' in oldhepdata format err_p = err_p[:-3].strip() + '%' if err_p[-3:] == 'PCT' else err_p err_m = r.group('err_m').strip().lstrip('+') if err_m == '-': err_m = '' # represent missing error as '-' in oldhepdata format err_m = err_m[:-3].strip() + '%' if err_m[-3:] == 'PCT' else err_m if err_p and err_m and err_p[-1] != '%' and err_m[-1] == '%': err_p = err_p + '%' if not err_p and not err_m: raise ValueError("Both asymmetric errors cannot be '-': %s" % line) error = {'asymerror': {'plus': err_p, 'minus': err_m}} if not r: # error happened raise ValueError("Error while parsing data line: %s" % line) error['label'] = label if element['value'] != single_element: element['errors'].append(error) self.current_table.data['dependent_variables'][xy_mapping[i]]['values'].append(element) elif data_entry_elements: raise BadFormat("%s data entry elements but %s expected: %s" % (len(data_entry_elements), len(header), line)) last_index = self.current_file.tell() l = self.current_file.readline() line = self._strip_comments(l) self.current_file.seek(last_index) # extract minimum and maximum from set of energies if self.set_of_energies: energy_min = min(self.set_of_energies) energy_max = max(self.set_of_energies) if energy_max > energy_min: energy = str(energy_min) + '-' + str(energy_max) else: energy = energy_min self._parse_energies(energy) if self.current_table.description: if any(word in self.current_table.description.lower() for word in ['covariance', 'correlation', 'matrix']): reformatted = self._reformat_matrix()
[ "def", "_parse_table_data", "(", "self", ",", "data", ")", ":", "header", "=", "data", ".", "split", "(", "':'", ")", "self", ".", "current_table", ".", "data_header", "=", "header", "for", "i", ",", "h", "in", "enumerate", "(", "header", ")", ":", "...
Parse dataset data of the original HEPData format :param data: header of the table to be parsed :raise ValueError:
[ "Parse", "dataset", "data", "of", "the", "original", "HEPData", "format" ]
python
train
johntfoster/bspline
bspline/bspline.py
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L131-L133
def d(self, xi): """Convenience function to compute first derivative of basis functions. 'Memoized' for speed.""" return self.__basis(xi, self.p, compute_derivatives=True)
[ "def", "d", "(", "self", ",", "xi", ")", ":", "return", "self", ".", "__basis", "(", "xi", ",", "self", ".", "p", ",", "compute_derivatives", "=", "True", ")" ]
Convenience function to compute first derivative of basis functions. 'Memoized' for speed.
[ "Convenience", "function", "to", "compute", "first", "derivative", "of", "basis", "functions", ".", "Memoized", "for", "speed", "." ]
python
train
mehmetg/streak_client
streak_client/streak_client.py
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L1050-L1062
def get_box_files(self, box_key): '''Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts) ''' uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix ]) return self._req('get', uri)
[ "def", "get_box_files", "(", "self", ",", "box_key", ")", ":", "uri", "=", "'/'", ".", "join", "(", "[", "self", ".", "api_uri", ",", "self", ".", "boxes_suffix", ",", "box_key", ",", "self", ".", "files_suffix", "]", ")", "return", "self", ".", "_re...
Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts)
[ "Gets", "to", "file", "infos", "in", "a", "single", "box", ".", "Args", ":", "box_key", "key", "for", "the", "file", "return", "(", "status", "code", "list", "of", "file", "info", "dicts", ")" ]
python
train
theosysbio/means
src/means/inference/inference.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/inference/inference.py#L221-L241
def infer(self, number_of_processes=1, *args, **kwargs): """ :param number_of_processes: If set to more than 1, the inference routines will be paralellised using ``multiprocessing`` module :param args: arguments to pass to :meth:`Inference.infer` :param kwargs: keyword arguments to pass to :meth:`Inference.infer` :return: """ if number_of_processes == 1: results = map(lambda x: x.infer(*args, **kwargs), self._inference_objects) else: inference_objects = self._inference_objects results = raw_results_in_parallel(self._inference_objects, number_of_processes, *args, **kwargs) results = [inference._result_from_raw_result(raw_result) for inference, raw_result in zip(inference_objects, results)] results = sorted(results, key=lambda x: x.distance_at_minimum) return InferenceResultsCollection(results)
[ "def", "infer", "(", "self", ",", "number_of_processes", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "number_of_processes", "==", "1", ":", "results", "=", "map", "(", "lambda", "x", ":", "x", ".", "infer", "(", "*", "args"...
:param number_of_processes: If set to more than 1, the inference routines will be paralellised using ``multiprocessing`` module :param args: arguments to pass to :meth:`Inference.infer` :param kwargs: keyword arguments to pass to :meth:`Inference.infer` :return:
[ ":", "param", "number_of_processes", ":", "If", "set", "to", "more", "than", "1", "the", "inference", "routines", "will", "be", "paralellised", "using", "multiprocessing", "module", ":", "param", "args", ":", "arguments", "to", "pass", "to", ":", "meth", ":"...
python
train
basho/riak-python-client
riak/transports/tcp/connection.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L82-L91
def _init_security(self): """ Initialize a secure connection to the server. """ if not self._starttls(): raise SecurityError("Could not start TLS connection") # _ssh_handshake() will throw an exception upon failure self._ssl_handshake() if not self._auth(): raise SecurityError("Could not authorize connection")
[ "def", "_init_security", "(", "self", ")", ":", "if", "not", "self", ".", "_starttls", "(", ")", ":", "raise", "SecurityError", "(", "\"Could not start TLS connection\"", ")", "# _ssh_handshake() will throw an exception upon failure", "self", ".", "_ssl_handshake", "(",...
Initialize a secure connection to the server.
[ "Initialize", "a", "secure", "connection", "to", "the", "server", "." ]
python
train
twisted/axiom
axiom/scheduler.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/scheduler.py#L217-L224
def scheduledTimes(self, runnable): """ Return an iterable of the times at which the given item is scheduled to run. """ events = self.store.query( TimedEvent, TimedEvent.runnable == runnable) return (event.time for event in events if not event.running)
[ "def", "scheduledTimes", "(", "self", ",", "runnable", ")", ":", "events", "=", "self", ".", "store", ".", "query", "(", "TimedEvent", ",", "TimedEvent", ".", "runnable", "==", "runnable", ")", "return", "(", "event", ".", "time", "for", "event", "in", ...
Return an iterable of the times at which the given item is scheduled to run.
[ "Return", "an", "iterable", "of", "the", "times", "at", "which", "the", "given", "item", "is", "scheduled", "to", "run", "." ]
python
train
saltstack/salt
salt/states/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/junos.py#L157-L188
def rollback(name, **kwargs): ''' Rollbacks the committed changes. .. code-block:: yaml rollback the changes: junos: - rollback - id: 5 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) * kwargs: Keyworded arguments which can be provided like- * timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * diffs_file: Path to the file where any diffs will be written. (default = None) ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} ret['changes'] = __salt__['junos.rollback'](**kwargs) return ret
[ "def", "rollback", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "ret", "[", "'changes'", "]", "=", "__salt...
Rollbacks the committed changes. .. code-block:: yaml rollback the changes: junos: - rollback - id: 5 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) * kwargs: Keyworded arguments which can be provided like- * timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * diffs_file: Path to the file where any diffs will be written. (default = None)
[ "Rollbacks", "the", "committed", "changes", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/basicpar.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/basicpar.py#L489-L526
def save(self, dolist=0): """Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. """ quoted = not dolist fields = 7*[""] fields[0] = self.name fields[1] = self.type fields[2] = self.mode fields[3] = self.toString(self.value,quoted=quoted) if self.choice is not None: schoice = list(map(self.toString, self.choice)) schoice.insert(0,'') schoice.append('') fields[4] = repr('|'.join(schoice)) elif self.min not in [None,INDEF]: fields[4] = self.toString(self.min,quoted=quoted) if self.max not in [None,INDEF]: fields[5] = self.toString(self.max,quoted=quoted) if self.prompt: if quoted: sprompt = repr(self.prompt) else: sprompt = self.prompt # prompt can have embedded newlines (which are printed) sprompt = sprompt.replace(r'\012', '\n') sprompt = sprompt.replace(r'\n', '\n') fields[6] = sprompt # delete trailing null parameters for i in [6,5,4]: if fields[i] != "": break del fields[i] if dolist: return fields else: return ','.join(fields)
[ "def", "save", "(", "self", ",", "dolist", "=", "0", ")", ":", "quoted", "=", "not", "dolist", "fields", "=", "7", "*", "[", "\"\"", "]", "fields", "[", "0", "]", "=", "self", ".", "name", "fields", "[", "1", "]", "=", "self", ".", "type", "f...
Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file.
[ "Return", ".", "par", "format", "string", "for", "this", "parameter" ]
python
train
saltstack/salt
salt/modules/infoblox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/infoblox.py#L351-L382
def get_host_hostname(name, domains=None, **api_opts): ''' Get hostname If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot. If domains are provided, the best match out of the list is truncated from the fqdn leaving the hostname. If no matching domains are found the fqdn is returned. dots at end of names are ignored. CLI Examples: .. code-block:: bash salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com \ domains="['domain.com', 't.domain.com']" #returns: localhost.xxx salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com #returns: localhost ''' name = name.lower().rstrip('.') if not domains: return name.split('.')[0] domain = get_host_domainname(name, domains, **api_opts) if domain and domain in name: return name.rsplit('.' + domain)[0] return name
[ "def", "get_host_hostname", "(", "name", ",", "domains", "=", "None", ",", "*", "*", "api_opts", ")", ":", "name", "=", "name", ".", "lower", "(", ")", ".", "rstrip", "(", "'.'", ")", "if", "not", "domains", ":", "return", "name", ".", "split", "("...
Get hostname If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot. If domains are provided, the best match out of the list is truncated from the fqdn leaving the hostname. If no matching domains are found the fqdn is returned. dots at end of names are ignored. CLI Examples: .. code-block:: bash salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com \ domains="['domain.com', 't.domain.com']" #returns: localhost.xxx salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com #returns: localhost
[ "Get", "hostname" ]
python
train
fstab50/metal
metal/script_utils.py
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/script_utils.py#L391-L412
def read_local_config(cfg): """ Parses local config file for override values Args: :local_file (str): filename of local config file Returns: dict object of values contained in local config file """ try: if os.path.exists(cfg): config = import_file_object(cfg) return config else: logger.warning( '%s: local config file (%s) not found, cannot be read' % (inspect.stack()[0][3], str(cfg))) except IOError as e: logger.warning( 'import_file_object: %s error opening %s' % (str(e), str(cfg)) ) return {}
[ "def", "read_local_config", "(", "cfg", ")", ":", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "cfg", ")", ":", "config", "=", "import_file_object", "(", "cfg", ")", "return", "config", "else", ":", "logger", ".", "warning", "(", "'%s: loca...
Parses local config file for override values Args: :local_file (str): filename of local config file Returns: dict object of values contained in local config file
[ "Parses", "local", "config", "file", "for", "override", "values" ]
python
train
kstaniek/condoor
condoor/fsm.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/fsm.py#L152-L218
def run(self): """Start the FSM. Returns: boolean: True if FSM reaches the last state or false if the exception or error message was raised """ ctx = FSM.Context(self.name, self.device) transition_counter = 0 timeout = self.timeout self.log("{} Start".format(self.name)) while transition_counter < self.max_transitions: transition_counter += 1 try: start_time = time() if self.init_pattern is None: ctx.event = self.ctrl.expect(self.events, searchwindowsize=self.searchwindowsize, timeout=timeout) else: self.log("INIT_PATTERN={}".format(pattern_to_str(self.init_pattern))) try: ctx.event = self.events.index(self.init_pattern) except ValueError: self.log("INIT_PATTERN unknown.") continue finally: self.init_pattern = None finish_time = time() - start_time key = (ctx.event, ctx.state) ctx.pattern = self.events[ctx.event] if key in self.transition_table: transition = self.transition_table[key] next_state, action_instance, next_timeout = transition self.log("E={},S={},T={},RT={:.2f}".format(ctx.event, ctx.state, timeout, finish_time)) if callable(action_instance) and not isclass(action_instance): if not action_instance(ctx): self.log("Error: {}".format(ctx.msg)) return False elif isinstance(action_instance, Exception): self.log("A=Exception {}".format(action_instance)) raise action_instance elif action_instance is None: self.log("A=None") else: self.log("FSM Action is not callable: {}".format(str(action_instance))) raise RuntimeWarning("FSM Action is not callable") if next_timeout != 0: # no change if set to 0 timeout = next_timeout ctx.state = next_state self.log("NS={},NT={}".format(next_state, timeout)) else: self.log("Unknown transition: EVENT={},STATE={}".format(ctx.event, ctx.state)) continue except EOF: raise ConnectionError("Session closed unexpectedly", self.ctrl.hostname) if ctx.finished or next_state == -1: self.log("{} Stop at E={},S={}".format(self.name, ctx.event, ctx.state)) return True # check while else if even exists self.log("FSM looped. Exiting") return False
[ "def", "run", "(", "self", ")", ":", "ctx", "=", "FSM", ".", "Context", "(", "self", ".", "name", ",", "self", ".", "device", ")", "transition_counter", "=", "0", "timeout", "=", "self", ".", "timeout", "self", ".", "log", "(", "\"{} Start\"", ".", ...
Start the FSM. Returns: boolean: True if FSM reaches the last state or false if the exception or error message was raised
[ "Start", "the", "FSM", "." ]
python
train
CityOfZion/neo-python
neo/Network/Payloads/InvPayload.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Network/Payloads/InvPayload.py#L26-L36
def Size(self): """ Get the total size in bytes of the object. Returns: int: size. """ if len(self.Hashes) > 0: if not isinstance(self.Hashes[0], UInt256): corrected_hashes = list(map(lambda i: UInt256(data=binascii.unhexlify(i)), self.Hashes)) return s.uint8 + GetVarSize(corrected_hashes)
[ "def", "Size", "(", "self", ")", ":", "if", "len", "(", "self", ".", "Hashes", ")", ">", "0", ":", "if", "not", "isinstance", "(", "self", ".", "Hashes", "[", "0", "]", ",", "UInt256", ")", ":", "corrected_hashes", "=", "list", "(", "map", "(", ...
Get the total size in bytes of the object. Returns: int: size.
[ "Get", "the", "total", "size", "in", "bytes", "of", "the", "object", "." ]
python
train
jantman/pypi-download-stats
pypi_download_stats/projectstats.py
https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/projectstats.py#L290-L307
def per_system_data(self): """ Return download data by system. :return: dict of cache data; keys are datetime objects, values are dict of system (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = { self._column_value(x): data['by_system'][x] for x in data['by_system'] } if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
[ "def", "per_system_data", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "cache_date", "in", "self", ".", "cache_dates", ":", "data", "=", "self", ".", "_cache_get", "(", "cache_date", ")", "ret", "[", "cache_date", "]", "=", "{", "self", ".", "...
Return download data by system. :return: dict of cache data; keys are datetime objects, values are dict of system (str) to count (int) :rtype: dict
[ "Return", "download", "data", "by", "system", "." ]
python
train
eddiejessup/fealty
fealty/lattice.py
https://github.com/eddiejessup/fealty/blob/03745eb98d85bc2a5d08920773ab9c4515462d30/fealty/lattice.py#L78-L105
def pad_length(x, d): """Return a vector appropriate to a dimensional space, using an input vector as a prompt depending on its type: - If the input is a vector, return that vector. - If the input is a scalar, return a vector filled with that value. Useful when a function expects an array specifying values along each axis, but wants to also accept a scalar value in case the length is the same in all directions. Parameters ---------- x: float or array-like The input parameter that may need padding. d: int The dimensional space to make `x` appropriate for. Returns ------- x_pad: array-like, shape (d,) The padded parameter. """ try: x[0] except TypeError: x = d * [x] return np.array(x)
[ "def", "pad_length", "(", "x", ",", "d", ")", ":", "try", ":", "x", "[", "0", "]", "except", "TypeError", ":", "x", "=", "d", "*", "[", "x", "]", "return", "np", ".", "array", "(", "x", ")" ]
Return a vector appropriate to a dimensional space, using an input vector as a prompt depending on its type: - If the input is a vector, return that vector. - If the input is a scalar, return a vector filled with that value. Useful when a function expects an array specifying values along each axis, but wants to also accept a scalar value in case the length is the same in all directions. Parameters ---------- x: float or array-like The input parameter that may need padding. d: int The dimensional space to make `x` appropriate for. Returns ------- x_pad: array-like, shape (d,) The padded parameter.
[ "Return", "a", "vector", "appropriate", "to", "a", "dimensional", "space", "using", "an", "input", "vector", "as", "a", "prompt", "depending", "on", "its", "type", ":" ]
python
train
zero-os/zerotier_client
zerotier/client_support.py
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/client_support.py#L75-L90
def val_factory(val, datatypes): """ return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages. """ exceptions = [] for dt in datatypes: try: if isinstance(val, dt): return val return type_handler_object(val, dt) except Exception as e: exceptions.append(str(e)) # if we get here, we never found a valid value. raise an error raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'. format(val=val, types=datatypes, excs=exceptions))
[ "def", "val_factory", "(", "val", ",", "datatypes", ")", ":", "exceptions", "=", "[", "]", "for", "dt", "in", "datatypes", ":", "try", ":", "if", "isinstance", "(", "val", ",", "dt", ")", ":", "return", "val", "return", "type_handler_object", "(", "val...
return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages.
[ "return", "an", "instance", "of", "val", "that", "is", "of", "type", "datatype", ".", "keep", "track", "of", "exceptions", "so", "we", "can", "produce", "meaningful", "error", "messages", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/users.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/users.py#L668-L694
def update_user_settings(self, id, collapse_global_nav=None, manual_mark_as_read=None): """ Update user settings. Update an existing user's settings. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - manual_mark_as_read """If true, require user to manually mark discussion posts as read (don't auto-mark as read).""" if manual_mark_as_read is not None: params["manual_mark_as_read"] = manual_mark_as_read # OPTIONAL - collapse_global_nav """If true, the user's page loads with the global navigation collapsed""" if collapse_global_nav is not None: params["collapse_global_nav"] = collapse_global_nav self.logger.debug("GET /api/v1/users/{id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{id}/settings".format(**path), data=data, params=params, no_data=True)
[ "def", "update_user_settings", "(", "self", ",", "id", ",", "collapse_global_nav", "=", "None", ",", "manual_mark_as_read", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\...
Update user settings. Update an existing user's settings.
[ "Update", "user", "settings", ".", "Update", "an", "existing", "user", "s", "settings", "." ]
python
train