repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cocagne/txdbus
txdbus/objects.py
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/txdbus/objects.py#L898-L984
def getRemoteObject(self, busName, objectPath, interfaces=None, replaceKnownInterfaces=False): """ Creates a L{RemoteDBusObject} instance to represent the specified DBus object. If explicit interfaces are not supplied, DBus object introspection will be used to obtain them automatically. @type busName: C{string} @param busName: Name of the bus exporting the desired object @type objectPath: C{string} @param objectPath: DBus path of the desired object @type interfaces: None, C{string} or L{interface.DBusInterface} or a list of C{string}/L{interface.DBusInterface} @param interfaces: May be None, a single value, or a list of string interface names and/or instances of L{interface.DBusInterface}. If None or any of the specified interface names are unknown, full introspection will be attempted. If interfaces consists of solely of L{interface.DBusInterface} instances and/or known interfacep names, no introspection will be preformed. @type replaceKnownInterfaces: C{bool} @param replaceKnownInterfaces: If True (defaults to False), any interfaces discovered during the introspection process will override any previous, cached values. @rtype: L{twisted.internet.defer.Deferred} @returns: A Deferred to the L{RemoteDBusObject} instance """ weak_id = (busName, objectPath, interfaces) need_introspection = False required_interfaces = set() if interfaces is not None: ifl = [] if not isinstance(interfaces, list): interfaces = [interfaces] for i in interfaces: if isinstance(i, interface.DBusInterface): ifl.append(i) required_interfaces.add(i.name) else: required_interfaces.add(i) if i in interface.DBusInterface.knownInterfaces: ifl.append(interface.DBusInterface.knownInterfaces[i]) else: need_introspection = True if not need_introspection: return defer.succeed( RemoteDBusObject(self, busName, objectPath, ifl) ) d = self.conn.introspectRemoteObject( busName, objectPath, replaceKnownInterfaces, ) def ok(ifaces): missing = required_interfaces - {q.name for q in ifaces} if missing: raise error.IntrospectionFailed( 'Introspection failed to find interfaces: ' + ','.join(missing) ) prox = RemoteDBusObject(self, busName, objectPath, ifaces) self._weakProxies[weak_id] = prox return prox d.addCallback(ok) return d
[ "def", "getRemoteObject", "(", "self", ",", "busName", ",", "objectPath", ",", "interfaces", "=", "None", ",", "replaceKnownInterfaces", "=", "False", ")", ":", "weak_id", "=", "(", "busName", ",", "objectPath", ",", "interfaces", ")", "need_introspection", "=...
Creates a L{RemoteDBusObject} instance to represent the specified DBus object. If explicit interfaces are not supplied, DBus object introspection will be used to obtain them automatically. @type busName: C{string} @param busName: Name of the bus exporting the desired object @type objectPath: C{string} @param objectPath: DBus path of the desired object @type interfaces: None, C{string} or L{interface.DBusInterface} or a list of C{string}/L{interface.DBusInterface} @param interfaces: May be None, a single value, or a list of string interface names and/or instances of L{interface.DBusInterface}. If None or any of the specified interface names are unknown, full introspection will be attempted. If interfaces consists of solely of L{interface.DBusInterface} instances and/or known interfacep names, no introspection will be preformed. @type replaceKnownInterfaces: C{bool} @param replaceKnownInterfaces: If True (defaults to False), any interfaces discovered during the introspection process will override any previous, cached values. @rtype: L{twisted.internet.defer.Deferred} @returns: A Deferred to the L{RemoteDBusObject} instance
[ "Creates", "a", "L", "{", "RemoteDBusObject", "}", "instance", "to", "represent", "the", "specified", "DBus", "object", ".", "If", "explicit", "interfaces", "are", "not", "supplied", "DBus", "object", "introspection", "will", "be", "used", "to", "obtain", "the...
python
train
ckan/ckan-service-provider
ckanserviceprovider/db.py
https://github.com/ckan/ckan-service-provider/blob/83a42b027dba8a0b3ca7e5f689f990b7bc2cd7fa/ckanserviceprovider/db.py#L98-L179
def get_job(job_id): """Return the job with the given job_id as a dict. The dict also includes any metadata or logs associated with the job. Returns None instead of a dict if there's no job with the given job_id. The keys of a job dict are: "job_id": The unique identifier for the job (unicode) "job_type": The name of the job function that will be executed for this job (unicode) "status": The current status of the job, e.g. "pending", "complete", or "error" (unicode) "data": Any output data returned by the job if it has completed successfully. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "error": If the job failed with an error this will be a dict with a "message" key whose value is a string error message. The dict may also have other keys specific to the particular type of error. If the job did not fail with an error then "error" will be None. "requested_timestamp": The time at which the job was requested (string) "finished_timestamp": The time at which the job finished (string) "sent_data": The input data for the job, provided by the client site. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "result_url": The callback URL that CKAN Service Provider will post the result to when the job finishes (unicode) "api_key": The API key that CKAN Service Provider will use when posting the job result to the result_url (unicode or None). A None here doesn't mean that there was no API key: CKAN Service Provider deletes the API key from the database after it has posted the result to the result_url. "job_key": The key that users must provide (in the Authorization header of the HTTP request) to be authorized to modify the job (unicode). For example requests to the CKAN Service Provider API need this to get the status or output data of a job or to delete a job. If you login to CKAN Service Provider as an administrator then you can administer any job without providing its job_key. "metadata": Any custom metadata associated with the job (dict) "logs": Any logs associated with the job (list) """ # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if job_id: job_id = unicode(job_id) result = ENGINE.execute( JOBS_TABLE.select().where(JOBS_TABLE.c.job_id == job_id)).first() if not result: return None # Turn the result into a dictionary representation of the job. result_dict = {} for field in result.keys(): value = getattr(result, field) if value is None: result_dict[field] = value elif field in ('sent_data', 'data', 'error'): result_dict[field] = json.loads(value) elif isinstance(value, datetime.datetime): result_dict[field] = value.isoformat() else: result_dict[field] = unicode(value) result_dict['metadata'] = _get_metadata(job_id) result_dict['logs'] = _get_logs(job_id) return result_dict
[ "def", "get_job", "(", "job_id", ")", ":", "# Avoid SQLAlchemy \"Unicode type received non-unicode bind param value\"", "# warnings.", "if", "job_id", ":", "job_id", "=", "unicode", "(", "job_id", ")", "result", "=", "ENGINE", ".", "execute", "(", "JOBS_TABLE", ".", ...
Return the job with the given job_id as a dict. The dict also includes any metadata or logs associated with the job. Returns None instead of a dict if there's no job with the given job_id. The keys of a job dict are: "job_id": The unique identifier for the job (unicode) "job_type": The name of the job function that will be executed for this job (unicode) "status": The current status of the job, e.g. "pending", "complete", or "error" (unicode) "data": Any output data returned by the job if it has completed successfully. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "error": If the job failed with an error this will be a dict with a "message" key whose value is a string error message. The dict may also have other keys specific to the particular type of error. If the job did not fail with an error then "error" will be None. "requested_timestamp": The time at which the job was requested (string) "finished_timestamp": The time at which the job finished (string) "sent_data": The input data for the job, provided by the client site. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "result_url": The callback URL that CKAN Service Provider will post the result to when the job finishes (unicode) "api_key": The API key that CKAN Service Provider will use when posting the job result to the result_url (unicode or None). A None here doesn't mean that there was no API key: CKAN Service Provider deletes the API key from the database after it has posted the result to the result_url. "job_key": The key that users must provide (in the Authorization header of the HTTP request) to be authorized to modify the job (unicode). For example requests to the CKAN Service Provider API need this to get the status or output data of a job or to delete a job. If you login to CKAN Service Provider as an administrator then you can administer any job without providing its job_key. "metadata": Any custom metadata associated with the job (dict) "logs": Any logs associated with the job (list)
[ "Return", "the", "job", "with", "the", "given", "job_id", "as", "a", "dict", "." ]
python
train
KE-works/pykechain
pykechain/models/customization.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/customization.py#L825-L896
def add_attachment_viewer_widget(self, attachment_property, custom_title=False, height=None): """ Add a KE-chain Attachment Viewer (e.g. attachment viewer widget) to the customization. The widget will be saved to KE-chain. :param attachment_property: The Attachment Property to which the Viewer will be connected to. :type attachment_property: :class:`Property` or UUID :param custom_title: A custom title for the attachment viewer widget * False (default): Notebook name * String value: Custom title * None: No title :type custom_title: bool or basestring or None :param height: The height of the Notebook in pixels :type height: int or None :raises IllegalArgumentError: When unknown or illegal arguments are passed. """ # Check whether the attachment property is uuid type or class `Property` if isinstance(attachment_property, Property): attachment_property_id = attachment_property.id elif isinstance(attachment_property, text_type) and is_uuid(attachment_property): attachment_property_id = attachment_property attachment_property = self._client.property(id=attachment_property_id) else: raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must be a " "Property or Property id. Type is: {}".format(type(attachment_property))) # Check whether the `Property` has type `Attachment` property_type = attachment_property.type if property_type != PropertyType.ATTACHMENT_VALUE: raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must have " "type {}. Type found: {}".format(PropertyType.ATTACHMENT_VALUE, property_type)) # Check also whether `Property` has category `Instance` property_category = attachment_property._json_data['category'] if property_category != Category.INSTANCE: raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must have " "category {}. Category found: {}".format(Category.INSTANCE, property_category)) # Add custom title if custom_title is False: show_title_value = "Default" title = attachment_property.name elif custom_title is None: show_title_value = "No title" title = '' else: show_title_value = "Custom title" title = str(custom_title) # Declare attachment viewer widget config config = { 'propertyId': attachment_property_id, 'showTitleValue': show_title_value, 'xtype': ComponentXType.PROPERTYATTACHMENTPREVIEWER, 'title': title, 'filter': { 'activity_id': str(self.activity.id) }, 'height': height if height else 500 } # Declare attachment viewer widget meta meta = { 'propertyInstanceId': attachment_property_id, 'activityId': str(self.activity.id), 'customHeight': height if height else 500, 'showTitleValue': show_title_value, 'customTitle': title } self._add_widget(dict(config=config, meta=meta, name=WidgetNames.ATTACHMENTVIEWERWIDGET))
[ "def", "add_attachment_viewer_widget", "(", "self", ",", "attachment_property", ",", "custom_title", "=", "False", ",", "height", "=", "None", ")", ":", "# Check whether the attachment property is uuid type or class `Property`", "if", "isinstance", "(", "attachment_property",...
Add a KE-chain Attachment Viewer (e.g. attachment viewer widget) to the customization. The widget will be saved to KE-chain. :param attachment_property: The Attachment Property to which the Viewer will be connected to. :type attachment_property: :class:`Property` or UUID :param custom_title: A custom title for the attachment viewer widget * False (default): Notebook name * String value: Custom title * None: No title :type custom_title: bool or basestring or None :param height: The height of the Notebook in pixels :type height: int or None :raises IllegalArgumentError: When unknown or illegal arguments are passed.
[ "Add", "a", "KE", "-", "chain", "Attachment", "Viewer", "(", "e", ".", "g", ".", "attachment", "viewer", "widget", ")", "to", "the", "customization", "." ]
python
train
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Screen/SSPILScreen.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Screen/SSPILScreen.py#L139-L157
def clearView(self, fillColor = 0 ): """! \~english Clear up canvas with view size @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO ("1") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB ("RGB") color mode, RGB color values can be used \~chinese 清除画布中当前视图大小的区域同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO ("1") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB ("RGB") 彩色模式,可以使用 RGB 色彩值 """ self.Canvas.rectangle(self.View.rectToArray(), outline=0, fill=fillColor)
[ "def", "clearView", "(", "self", ",", "fillColor", "=", "0", ")", ":", "self", ".", "Canvas", ".", "rectangle", "(", "self", ".", "View", ".", "rectToArray", "(", ")", ",", "outline", "=", "0", ",", "fill", "=", "fillColor", ")" ]
! \~english Clear up canvas with view size @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO ("1") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB ("RGB") color mode, RGB color values can be used \~chinese 清除画布中当前视图大小的区域同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO ("1") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB ("RGB") 彩色模式,可以使用 RGB 色彩值
[ "!", "\\", "~english", "Clear", "up", "canvas", "with", "view", "size", "@param", "fillColor", ":", "a", "color", "value", "@note", "The", "fillColor", "value", "range", "depends", "on", "the", "setting", "of", "_buffer_color_mode", ".", "*", "If", "it", "...
python
train
scanny/python-pptx
pptx/chart/xmlwriter.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xmlwriter.py#L1524-L1548
def _lvl_xml(self, categories): """ The unicode XML snippet for the ``<c:lvl>`` elements containing multi-level category names. """ def lvl_pt_xml(level): xml = '' for idx, name in level: xml += ( ' <c:pt idx="%d">\n' ' <c:v>%s</c:v>\n' ' </c:pt>\n' ) % (idx, escape('%s' % name)) return xml xml = '' for level in categories.levels: xml += ( ' <c:lvl>\n' '{lvl_pt_xml}' ' </c:lvl>\n' ).format(**{ 'lvl_pt_xml': lvl_pt_xml(level), }) return xml
[ "def", "_lvl_xml", "(", "self", ",", "categories", ")", ":", "def", "lvl_pt_xml", "(", "level", ")", ":", "xml", "=", "''", "for", "idx", ",", "name", "in", "level", ":", "xml", "+=", "(", "' <c:pt idx=\"%d\">\\n'", "' <c:v>...
The unicode XML snippet for the ``<c:lvl>`` elements containing multi-level category names.
[ "The", "unicode", "XML", "snippet", "for", "the", "<c", ":", "lvl", ">", "elements", "containing", "multi", "-", "level", "category", "names", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L2666-L2714
def pop_range(ol,start_index,end_index,**kwargs): ''' from elist.jprint import pobj from elist.elist import * ol = [1,2,3,4,5,6] id(ol) rslt = pop_range(ol,2,4) ol id(ol) id(rslt['list']) #### ol = [1,2,3,4,5,6] id(ol) rslt = pop_range(ol,2,4,mode="original") rslt ol id(ol) ''' length = ol.__len__() start_index = uniform_index(start_index,length) end_index = uniform_index(end_index,length) if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): cpol = copy.deepcopy(ol) new = [] popped = [] for i in range(0,start_index): new.append(cpol[i]) for i in range(start_index,end_index): popped.append(cpol[i]) for i in range(end_index,length): new.append(cpol[i]) return({'popped':popped,'list':new}) else: tmp = [] popped = [] for i in range(0,start_index): tmp.append(ol[i]) for i in range(start_index,end_index): popped.append(ol[i]) for i in range(end_index,length): tmp.append(ol[i]) ol.clear() for i in range(0,tmp.__len__()): ol.append(tmp[i]) return(popped)
[ "def", "pop_range", "(", "ol", ",", "start_index", ",", "end_index", ",", "*", "*", "kwargs", ")", ":", "length", "=", "ol", ".", "__len__", "(", ")", "start_index", "=", "uniform_index", "(", "start_index", ",", "length", ")", "end_index", "=", "uniform...
from elist.jprint import pobj from elist.elist import * ol = [1,2,3,4,5,6] id(ol) rslt = pop_range(ol,2,4) ol id(ol) id(rslt['list']) #### ol = [1,2,3,4,5,6] id(ol) rslt = pop_range(ol,2,4,mode="original") rslt ol id(ol)
[ "from", "elist", ".", "jprint", "import", "pobj", "from", "elist", ".", "elist", "import", "*", "ol", "=", "[", "1", "2", "3", "4", "5", "6", "]", "id", "(", "ol", ")", "rslt", "=", "pop_range", "(", "ol", "2", "4", ")", "ol", "id", "(", "ol"...
python
valid
splunk/splunk-sdk-python
splunklib/modularinput/event_writer.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/modularinput/event_writer.py#L74-L81
def write_xml_document(self, document): """Writes a string representation of an ``ElementTree`` object to the output stream. :param document: An ``ElementTree`` object. """ self._out.write(ET.tostring(document)) self._out.flush()
[ "def", "write_xml_document", "(", "self", ",", "document", ")", ":", "self", ".", "_out", ".", "write", "(", "ET", ".", "tostring", "(", "document", ")", ")", "self", ".", "_out", ".", "flush", "(", ")" ]
Writes a string representation of an ``ElementTree`` object to the output stream. :param document: An ``ElementTree`` object.
[ "Writes", "a", "string", "representation", "of", "an", "ElementTree", "object", "to", "the", "output", "stream", "." ]
python
train
theosysbio/means
src/means/util/sympyhelpers.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/util/sympyhelpers.py#L63-L87
def to_sympy_matrix(value): """ Converts value to a `sympy.Matrix` object, if possible. Leaves the value as `sympy.Matrix` if it already was :param value: value to convert :return: :rtype: `sympy.Matrix` """ if isinstance(value, sympy.Matrix): return value try: return sympy.Matrix(value) except ValueError as original_exception: if isinstance(value, list) and len(value) and all([not isinstance(x, list) for x in value]): # Let's try to convert the contents into a list # (this is required for 0.7.4 if we pass in a list of strings) # See `test_creation_of_column_matrix_from_list_of_strings` test list_of_lists_value = [[x] for x in value] try: m = sympy.Matrix(list_of_lists_value) return m except Exception: raise original_exception else: raise original_exception
[ "def", "to_sympy_matrix", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "sympy", ".", "Matrix", ")", ":", "return", "value", "try", ":", "return", "sympy", ".", "Matrix", "(", "value", ")", "except", "ValueError", "as", "original_exception...
Converts value to a `sympy.Matrix` object, if possible. Leaves the value as `sympy.Matrix` if it already was :param value: value to convert :return: :rtype: `sympy.Matrix`
[ "Converts", "value", "to", "a", "sympy", ".", "Matrix", "object", "if", "possible", ".", "Leaves", "the", "value", "as", "sympy", ".", "Matrix", "if", "it", "already", "was", ":", "param", "value", ":", "value", "to", "convert", ":", "return", ":", ":"...
python
train
manns/pyspread
pyspread/src/lib/_grid_cairo_renderer.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L1080-L1094
def get_above_right_key_rect(self): """Returns tuple key rect of above right cell""" key_above = self.row - 1, self.col, self.tab key_above_right = self.row - 1, self.col + 1, self.tab border_width_right = \ float(self.cell_attributes[key_above]["borderwidth_right"]) / 2.0 border_width_bottom = \ float(self.cell_attributes[key_above_right]["borderwidth_bottom"])\ / 2.0 rect_above_right = (self.x+self.width, self.y-border_width_bottom, border_width_right, border_width_bottom) return key_above_right, rect_above_right
[ "def", "get_above_right_key_rect", "(", "self", ")", ":", "key_above", "=", "self", ".", "row", "-", "1", ",", "self", ".", "col", ",", "self", ".", "tab", "key_above_right", "=", "self", ".", "row", "-", "1", ",", "self", ".", "col", "+", "1", ","...
Returns tuple key rect of above right cell
[ "Returns", "tuple", "key", "rect", "of", "above", "right", "cell" ]
python
train
Microsoft/knack
knack/cli.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/cli.py#L122-L126
def show_version(self): """ Print version information to the out file. """ version_info = self.get_cli_version() version_info += self.get_runtime_version() print(version_info, file=self.out_file)
[ "def", "show_version", "(", "self", ")", ":", "version_info", "=", "self", ".", "get_cli_version", "(", ")", "version_info", "+=", "self", ".", "get_runtime_version", "(", ")", "print", "(", "version_info", ",", "file", "=", "self", ".", "out_file", ")" ]
Print version information to the out file.
[ "Print", "version", "information", "to", "the", "out", "file", "." ]
python
train
mardiros/pyshop
pyshop/models.py
https://github.com/mardiros/pyshop/blob/b42510b9c3fa16e0e5710457401ac38fea5bf7a0/pyshop/models.py#L113-L126
def by_name(cls, session, name): """ Get a package from a given name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param name: name of the group :type name: `unicode :return: package instance :rtype: :class:`pyshop.models.Group` """ return cls.first(session, where=(cls.name == name,))
[ "def", "by_name", "(", "cls", ",", "session", ",", "name", ")", ":", "return", "cls", ".", "first", "(", "session", ",", "where", "=", "(", "cls", ".", "name", "==", "name", ",", ")", ")" ]
Get a package from a given name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param name: name of the group :type name: `unicode :return: package instance :rtype: :class:`pyshop.models.Group`
[ "Get", "a", "package", "from", "a", "given", "name", "." ]
python
train
spacetelescope/pysynphot
pysynphot/observation.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/observation.py#L607-L643
def efflam(self,binned=True): """Calculate :ref:`effective wavelength <pysynphot-formula-efflam>` of the observation. Calculation is done in the flux unit of ``flam``. .. note:: Similar to IRAF STSDAS SYNPHOT ``efflphot`` task. Parameters ---------- binned : bool Use binned dataset for calculations. Otherwise, use native dataset. Returns ------- ans : float Effective wavelength. """ myfluxunits=self.fluxunits.name self.convert('flam') if binned: wave=self.binwave flux=self.binflux else: wave=self.wave flux=self.flux num = self.trapezoidIntegration(wave,flux*wave*wave) den = self.trapezoidIntegration(wave,flux*wave) self.convert(myfluxunits) if num == 0.0 or den == 0.0: return 0.0 return num/den
[ "def", "efflam", "(", "self", ",", "binned", "=", "True", ")", ":", "myfluxunits", "=", "self", ".", "fluxunits", ".", "name", "self", ".", "convert", "(", "'flam'", ")", "if", "binned", ":", "wave", "=", "self", ".", "binwave", "flux", "=", "self", ...
Calculate :ref:`effective wavelength <pysynphot-formula-efflam>` of the observation. Calculation is done in the flux unit of ``flam``. .. note:: Similar to IRAF STSDAS SYNPHOT ``efflphot`` task. Parameters ---------- binned : bool Use binned dataset for calculations. Otherwise, use native dataset. Returns ------- ans : float Effective wavelength.
[ "Calculate", ":", "ref", ":", "effective", "wavelength", "<pysynphot", "-", "formula", "-", "efflam", ">", "of", "the", "observation", ".", "Calculation", "is", "done", "in", "the", "flux", "unit", "of", "flam", "." ]
python
train
PMEAL/OpenPNM
openpnm/io/CSV.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/io/CSV.py#L83-L130
def load(cls, filename, project=None, delim=' | '): r""" Opens a 'csv' file, reads in the data, and adds it to the **Network** Parameters ---------- filename : string (optional) The name of the file containing the data to import. The formatting of this file is outlined below. project : OpenPNM Project object A GenericNetwork is created and added to the specified Project. If no Project object is supplied then one will be created and returned. """ if project is None: project = ws.new_project() fname = cls._parse_filename(filename, ext='csv') a = pd.read_table(filepath_or_buffer=fname, sep=',', skipinitialspace=True, index_col=False, true_values=['T', 't', 'True', 'true', 'TRUE'], false_values=['F', 'f', 'False', 'false', 'FALSE']) dct = {} # First parse through all the items and re-merge columns keys = sorted(list(a.keys())) for item in keys: m = re.search(r'\[.\]', item) # The dot '.' is a wildcard if m: # m is None if pattern not found, otherwise merge cols pname = re.split(r'\[.\]', item)[0] # Get base propname # Find all other keys with same base propname merge_keys = [k for k in a.keys() if k.startswith(pname)] # Rerieve and remove arrays with same base propname merge_cols = [a.pop(k) for k in merge_keys] # Merge arrays into multi-column array and store in DataFrame dct[pname] = sp.vstack(merge_cols).T # Remove key from list of keys [keys.pop(keys.index(k)) for k in keys if k.startswith(pname)] else: dct[item] = sp.array(a.pop(item)) project = Dict.from_dict(dct, project=project, delim=delim) return project
[ "def", "load", "(", "cls", ",", "filename", ",", "project", "=", "None", ",", "delim", "=", "' | '", ")", ":", "if", "project", "is", "None", ":", "project", "=", "ws", ".", "new_project", "(", ")", "fname", "=", "cls", ".", "_parse_filename", "(", ...
r""" Opens a 'csv' file, reads in the data, and adds it to the **Network** Parameters ---------- filename : string (optional) The name of the file containing the data to import. The formatting of this file is outlined below. project : OpenPNM Project object A GenericNetwork is created and added to the specified Project. If no Project object is supplied then one will be created and returned.
[ "r", "Opens", "a", "csv", "file", "reads", "in", "the", "data", "and", "adds", "it", "to", "the", "**", "Network", "**" ]
python
train
bukun/TorCMS
torcms/model/relation_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/relation_model.py#L63-L89
def get_app_relations(app_id, num=20, kind='1'): ''' The the related infors. ''' info_tag = MInfor2Catalog.get_first_category(app_id) if info_tag: return TabPost2Tag.select( TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid') ).join( TabPost, on=(TabPost2Tag.post_id == TabPost.uid) ).where( (TabPost2Tag.tag_id == info_tag.tag_id) & (TabPost.kind == kind) ).order_by( peewee.fn.Random() ).limit(num) return TabPost2Tag.select( TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid') ).join( TabPost, on=(TabPost2Tag.post_id == TabPost.uid) ).where( TabPost.kind == kind ).order_by(peewee.fn.Random()).limit(num)
[ "def", "get_app_relations", "(", "app_id", ",", "num", "=", "20", ",", "kind", "=", "'1'", ")", ":", "info_tag", "=", "MInfor2Catalog", ".", "get_first_category", "(", "app_id", ")", "if", "info_tag", ":", "return", "TabPost2Tag", ".", "select", "(", "TabP...
The the related infors.
[ "The", "the", "related", "infors", "." ]
python
train
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1842-L1869
def as_html(self, max_rows=0): """Format table as HTML.""" if not max_rows or max_rows > self.num_rows: max_rows = self.num_rows omitted = max(0, self.num_rows - max_rows) labels = self.labels lines = [ (0, '<table border="1" class="dataframe">'), (1, '<thead>'), (2, '<tr>'), (3, ' '.join('<th>' + label + '</th>' for label in labels)), (2, '</tr>'), (1, '</thead>'), (1, '<tbody>'), ] fmts = self._get_column_formatters(max_rows, True) for row in itertools.islice(self.rows, max_rows): lines += [ (2, '<tr>'), (3, ' '.join('<td>' + fmt(v, label=False) + '</td>' for v, fmt in zip(row, fmts))), (2, '</tr>'), ] lines.append((1, '</tbody>')) lines.append((0, '</table>')) if omitted: lines.append((0, '<p>... ({} rows omitted)</p>'.format(omitted))) return '\n'.join(4 * indent * ' ' + text for indent, text in lines)
[ "def", "as_html", "(", "self", ",", "max_rows", "=", "0", ")", ":", "if", "not", "max_rows", "or", "max_rows", ">", "self", ".", "num_rows", ":", "max_rows", "=", "self", ".", "num_rows", "omitted", "=", "max", "(", "0", ",", "self", ".", "num_rows",...
Format table as HTML.
[ "Format", "table", "as", "HTML", "." ]
python
train
DataBiosphere/toil
src/toil/utils/toilStatus.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStatus.py#L256-L291
def traverseJobGraph(self, rootJob, jobsToReport=None, foundJobStoreIDs=None): """ Find all current jobs in the jobStore and return them as an Array. :param jobNode rootJob: The root job of the workflow. :param list jobsToReport: A list of jobNodes to be added to and returned. :param set foundJobStoreIDs: A set of jobStoreIDs used to keep track of jobStoreIDs encountered in traversal. :returns jobsToReport: The list of jobs currently in the job graph. """ if jobsToReport is None: jobsToReport = [] if foundJobStoreIDs is None: foundJobStoreIDs = set() if rootJob.jobStoreID in foundJobStoreIDs: return jobsToReport foundJobStoreIDs.add(rootJob.jobStoreID) jobsToReport.append(rootJob) # Traverse jobs in stack for jobs in rootJob.stack: for successorJobStoreID in [x.jobStoreID for x in jobs]: if successorJobStoreID not in foundJobStoreIDs and self.jobStore.exists(successorJobStoreID): self.traverseJobGraph(self.jobStore.load(successorJobStoreID), jobsToReport, foundJobStoreIDs) # Traverse service jobs for jobs in rootJob.services: for serviceJobStoreID in [x.jobStoreID for x in jobs]: if self.jobStore.exists(serviceJobStoreID): if serviceJobStoreID in foundJobStoreIDs: raise RuntimeError('Service job was unexpectedly found while traversing ') foundJobStoreIDs.add(serviceJobStoreID) jobsToReport.append(self.jobStore.load(serviceJobStoreID)) return jobsToReport
[ "def", "traverseJobGraph", "(", "self", ",", "rootJob", ",", "jobsToReport", "=", "None", ",", "foundJobStoreIDs", "=", "None", ")", ":", "if", "jobsToReport", "is", "None", ":", "jobsToReport", "=", "[", "]", "if", "foundJobStoreIDs", "is", "None", ":", "...
Find all current jobs in the jobStore and return them as an Array. :param jobNode rootJob: The root job of the workflow. :param list jobsToReport: A list of jobNodes to be added to and returned. :param set foundJobStoreIDs: A set of jobStoreIDs used to keep track of jobStoreIDs encountered in traversal. :returns jobsToReport: The list of jobs currently in the job graph.
[ "Find", "all", "current", "jobs", "in", "the", "jobStore", "and", "return", "them", "as", "an", "Array", "." ]
python
train
libtcod/python-tcod
tcod/event.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/event.py#L48-L52
def _pixel_to_tile(x: float, y: float) -> Tuple[float, float]: """Convert pixel coordinates to tile coordinates.""" xy = tcod.ffi.new("double[2]", (x, y)) tcod.lib.TCOD_sys_pixel_to_tile(xy, xy + 1) return xy[0], xy[1]
[ "def", "_pixel_to_tile", "(", "x", ":", "float", ",", "y", ":", "float", ")", "->", "Tuple", "[", "float", ",", "float", "]", ":", "xy", "=", "tcod", ".", "ffi", ".", "new", "(", "\"double[2]\"", ",", "(", "x", ",", "y", ")", ")", "tcod", ".", ...
Convert pixel coordinates to tile coordinates.
[ "Convert", "pixel", "coordinates", "to", "tile", "coordinates", "." ]
python
train
scopus-api/scopus
scopus/author_search.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/author_search.py#L9-L40
def authors(self): """A list of namedtuples storing author information, where each namedtuple corresponds to one author. The information in each namedtuple is (eid surname initials givenname documents affiliation affiliation_id city country areas). All entries are strings or None. Areas combines abbreviated subject areas followed by the number of documents in this subject. """ out = [] order = 'eid surname initials givenname affiliation documents '\ 'affiliation_id city country areas' auth = namedtuple('Author', order) for item in self._json: name = item.get('preferred-name', {}) aff = item.get('affiliation-current', {}) fields = item.get('subject-area', [{'@abbrev': '', '@frequency': ''}]) if isinstance(fields, dict): fields = [fields] areas = ["{} ({})".format(d.get('@abbrev', ''), d.get('@frequency', '')) for d in fields] new = auth(eid=item['eid'], initials=name.get('initials'), surname=name.get('surname'), areas="; ".join(areas), givenname=name.get('given-name'), documents=item.get('document-count', '0'), affiliation=aff.get('affiliation-name'), affiliation_id=aff.get('affiliation-id'), city=aff.get('affiliation-city'), country=aff.get('affiliation-country')) out.append(new) return out or None
[ "def", "authors", "(", "self", ")", ":", "out", "=", "[", "]", "order", "=", "'eid surname initials givenname affiliation documents '", "'affiliation_id city country areas'", "auth", "=", "namedtuple", "(", "'Author'", ",", "order", ")", "for", "item", "in", "self",...
A list of namedtuples storing author information, where each namedtuple corresponds to one author. The information in each namedtuple is (eid surname initials givenname documents affiliation affiliation_id city country areas). All entries are strings or None. Areas combines abbreviated subject areas followed by the number of documents in this subject.
[ "A", "list", "of", "namedtuples", "storing", "author", "information", "where", "each", "namedtuple", "corresponds", "to", "one", "author", ".", "The", "information", "in", "each", "namedtuple", "is", "(", "eid", "surname", "initials", "givenname", "documents", "...
python
train
IdentityPython/pysaml2
src/saml2/assertion.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/assertion.py#L810-L828
def apply_policy(self, sp_entity_id, policy, metadata=None): """ Apply policy to the assertion I'm representing :param sp_entity_id: The SP entity ID :param policy: The policy :param metadata: Metadata to use :return: The resulting AVA after the policy is applied """ policy.acs = self.acs ava = policy.restrict(self, sp_entity_id, metadata) for key, val in list(self.items()): if key in ava: self[key] = ava[key] else: del self[key] return ava
[ "def", "apply_policy", "(", "self", ",", "sp_entity_id", ",", "policy", ",", "metadata", "=", "None", ")", ":", "policy", ".", "acs", "=", "self", ".", "acs", "ava", "=", "policy", ".", "restrict", "(", "self", ",", "sp_entity_id", ",", "metadata", ")"...
Apply policy to the assertion I'm representing :param sp_entity_id: The SP entity ID :param policy: The policy :param metadata: Metadata to use :return: The resulting AVA after the policy is applied
[ "Apply", "policy", "to", "the", "assertion", "I", "m", "representing" ]
python
train
CalebBell/thermo
thermo/safety.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/safety.py#L1179-L1229
def Crowl_Louvar_UFL(atoms): r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_ correlation. Uses molecular formula only. The upper flammability limit of a gas is air is: .. math:: C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O \text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1} Parameters ---------- atoms : dict Dictionary of atoms and atom counts Returns ------- UFL : float Upper flammability limit, mole fraction Notes ----- Coefficient of 3.5 taken from [2]_ Examples -------- Hexane, example from [1]_, lit. 7.5 % >>> Crowl_Louvar_UFL({'H': 14, 'C': 6}) 0.07572479446127219 References ---------- .. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety: Fundamentals with Applications. 2E. Upper Saddle River, N.J: Prentice Hall, 2001. .. [2] Jones, G. W. "Inflammation Limits and Their Practical Application in Hazardous Industrial Operations." Chemical Reviews 22, no. 1 (February 1, 1938): 1-26. doi:10.1021/cr60071a001 ''' nC, nH, nO = 0, 0, 0 if 'C' in atoms and atoms['C']: nC = atoms['C'] else: return None if 'H' in atoms: nH = atoms['H'] if 'O' in atoms: nO = atoms['O'] return 3.5/(4.76*nC + 1.19*nH - 2.38*nO + 1.)
[ "def", "Crowl_Louvar_UFL", "(", "atoms", ")", ":", "nC", ",", "nH", ",", "nO", "=", "0", ",", "0", ",", "0", "if", "'C'", "in", "atoms", "and", "atoms", "[", "'C'", "]", ":", "nC", "=", "atoms", "[", "'C'", "]", "else", ":", "return", "None", ...
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_ correlation. Uses molecular formula only. The upper flammability limit of a gas is air is: .. math:: C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O \text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1} Parameters ---------- atoms : dict Dictionary of atoms and atom counts Returns ------- UFL : float Upper flammability limit, mole fraction Notes ----- Coefficient of 3.5 taken from [2]_ Examples -------- Hexane, example from [1]_, lit. 7.5 % >>> Crowl_Louvar_UFL({'H': 14, 'C': 6}) 0.07572479446127219 References ---------- .. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety: Fundamentals with Applications. 2E. Upper Saddle River, N.J: Prentice Hall, 2001. .. [2] Jones, G. W. "Inflammation Limits and Their Practical Application in Hazardous Industrial Operations." Chemical Reviews 22, no. 1 (February 1, 1938): 1-26. doi:10.1021/cr60071a001
[ "r", "Calculates", "upper", "flammability", "limit", "using", "the", "Crowl", "-", "Louvar", "[", "1", "]", "_", "correlation", ".", "Uses", "molecular", "formula", "only", "." ]
python
valid
The-Politico/politico-civic-election-night
electionnight/serializers/state.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/state.py#L52-L67
def get_elections(self, obj): """All elections in division.""" election_day = ElectionDay.objects.get( date=self.context['election_date']) elections = list(obj.elections.filter(election_day=election_day)) district = DivisionLevel.objects.get(name=DivisionLevel.DISTRICT) for district in obj.children.filter(level=district): elections.extend( list(district.elections.filter( election_day=election_day, meta__isnull=False )) ) return ElectionSerializer(elections, many=True).data
[ "def", "get_elections", "(", "self", ",", "obj", ")", ":", "election_day", "=", "ElectionDay", ".", "objects", ".", "get", "(", "date", "=", "self", ".", "context", "[", "'election_date'", "]", ")", "elections", "=", "list", "(", "obj", ".", "elections",...
All elections in division.
[ "All", "elections", "in", "division", "." ]
python
train
timothycrosley/simple_ci
simple_ci.py
https://github.com/timothycrosley/simple_ci/blob/3d2a7b0c527d34731f15b752ff200e76d5addd67/simple_ci.py#L43-L56
def repository(namespace, name, branch='master'): '''Returns a repository''' with TemporaryDirectory() as download_path: old_directory = str(pwd()).strip() try: git.clone('https://github.com/{0}/{1}.git'.format(namespace, name), download_path) cd(download_path) git.fetch('origin', branch) git.checkout(branch) yield (download_path, git('rev-parse', 'HEAD'), redis.Dict(key="{0}.{1}".format(namespace, name))) except ErrorReturnCode_128: mkdir(download_path) yield (None, None, None) cd(old_directory)
[ "def", "repository", "(", "namespace", ",", "name", ",", "branch", "=", "'master'", ")", ":", "with", "TemporaryDirectory", "(", ")", "as", "download_path", ":", "old_directory", "=", "str", "(", "pwd", "(", ")", ")", ".", "strip", "(", ")", "try", ":"...
Returns a repository
[ "Returns", "a", "repository" ]
python
train
django-crispy-forms/django-crispy-forms
crispy_forms/helper.py
https://github.com/django-crispy-forms/django-crispy-forms/blob/cd476927a756133c667c199bb12120f877bf6b7e/crispy_forms/helper.py#L353-L421
def get_attributes(self, template_pack=TEMPLATE_PACK): """ Used by crispy_forms_tags to get helper attributes """ items = { 'form_method': self.form_method.strip(), 'form_tag': self.form_tag, 'form_style': self.form_style.strip(), 'form_show_errors': self.form_show_errors, 'help_text_inline': self.help_text_inline, 'error_text_inline': self.error_text_inline, 'html5_required': self.html5_required, 'form_show_labels': self.form_show_labels, 'disable_csrf': self.disable_csrf, 'label_class': self.label_class, 'field_class': self.field_class, 'include_media': self.include_media } if template_pack == 'bootstrap4': bootstrap_size_match = re.findall('col-(xl|lg|md|sm)-(\d+)', self.label_class) if bootstrap_size_match: if template_pack == 'bootstrap4': offset_pattern = 'offset-%s-%s' else: offset_pattern = 'col-%s-offset-%s' items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match] else: bootstrap_size_match = re.findall('col-(lg|md|sm|xs)-(\d+)', self.label_class) if bootstrap_size_match: if template_pack == 'bootstrap4': offset_pattern = 'offset-%s-%s' else: offset_pattern = 'col-%s-offset-%s' items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match] items['attrs'] = {} if self.attrs: items['attrs'] = self.attrs.copy() if self.form_action: items['attrs']['action'] = self.form_action.strip() if self.form_id: items['attrs']['id'] = self.form_id.strip() if self.form_class: # uni_form TEMPLATE PACK has a uniForm class by default if template_pack == 'uni_form': items['attrs']['class'] = "uniForm %s" % self.form_class.strip() else: items['attrs']['class'] = self.form_class.strip() else: if template_pack == 'uni_form': items['attrs']['class'] = self.attrs.get('class', '') + " uniForm" if self.form_group_wrapper_class: items['attrs']['form_group_wrapper_class'] = self.form_group_wrapper_class items['flat_attrs'] = flatatt(items['attrs']) if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() for attribute_name, value in self.__dict__.items(): if attribute_name not in items and attribute_name not in ['layout', 'inputs'] and not attribute_name.startswith('_'): items[attribute_name] = value return items
[ "def", "get_attributes", "(", "self", ",", "template_pack", "=", "TEMPLATE_PACK", ")", ":", "items", "=", "{", "'form_method'", ":", "self", ".", "form_method", ".", "strip", "(", ")", ",", "'form_tag'", ":", "self", ".", "form_tag", ",", "'form_style'", "...
Used by crispy_forms_tags to get helper attributes
[ "Used", "by", "crispy_forms_tags", "to", "get", "helper", "attributes" ]
python
train
MSchnei/pyprf_feature
pyprf_feature/analysis/pyprf_sim_ep.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_sim_ep.py#L14-L54
def get_arg_parse(): """Parses the Command Line Arguments using argparse.""" # Create parser object: objParser = argparse.ArgumentParser() # Add argument to namespace -strCsvPrf results file path: objParser.add_argument('-strCsvPrf', required=True, metavar='/path/to/my_prior_res', help='Absolute file path of prior pRF results. \ Ignored if in testing mode.' ) # Add argument to namespace -strStmApr results file path: objParser.add_argument('-strStmApr', required=True, metavar='/path/to/my_prior_res', help='Absolute file path to npy file with \ stimulus apertures. Ignored if in testing \ mode.' ) # Add argument to namespace -lgcNoise flag: objParser.add_argument('-lgcNoise', dest='lgcNoise', action='store_true', default=False, help='Should noise be added to the simulated pRF\ time course?') # Add argument to namespace -lgcRtnNrl flag: objParser.add_argument('-lgcRtnNrl', dest='lgcRtnNrl', action='store_true', default=False, help='Should neural time course, unconvolved with \ hrf, be returned as well?') objParser.add_argument('-supsur', nargs='+', help='List of floats that represent the ratio of \ size neg surround to size pos center.', type=float, default=None) # Namespace object containign arguments and values: objNspc = objParser.parse_args() return objNspc
[ "def", "get_arg_parse", "(", ")", ":", "# Create parser object:", "objParser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# Add argument to namespace -strCsvPrf results file path:", "objParser", ".", "add_argument", "(", "'-strCsvPrf'", ",", "required", "=", "True...
Parses the Command Line Arguments using argparse.
[ "Parses", "the", "Command", "Line", "Arguments", "using", "argparse", "." ]
python
train
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L264-L272
def make_argument(expr, lineno, byref=None): """ Wrapper: Creates a node containing an ARGUMENT """ if expr is None: return # There were a syntax / semantic error if byref is None: byref = OPTIONS.byref.value return symbols.ARGUMENT(expr, lineno=lineno, byref=byref)
[ "def", "make_argument", "(", "expr", ",", "lineno", ",", "byref", "=", "None", ")", ":", "if", "expr", "is", "None", ":", "return", "# There were a syntax / semantic error", "if", "byref", "is", "None", ":", "byref", "=", "OPTIONS", ".", "byref", ".", "val...
Wrapper: Creates a node containing an ARGUMENT
[ "Wrapper", ":", "Creates", "a", "node", "containing", "an", "ARGUMENT" ]
python
train
pivotal-energy-solutions/django-datatable-view
datatableview/datatables.py
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/datatables.py#L962-L998
def preload_record_data(self, obj): """ Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``. """ data = {} for orm_path, column_name in self.value_queries.items(): value = obj[orm_path] if column_name not in data: data[column_name] = value else: if not isinstance(data[column_name], (tuple, list)): data[column_name] = [data[column_name]] data[column_name].append(value) obj.update(data) return super(ValuesDatatable, self).preload_record_data(obj)
[ "def", "preload_record_data", "(", "self", ",", "obj", ")", ":", "data", "=", "{", "}", "for", "orm_path", ",", "column_name", "in", "self", ".", "value_queries", ".", "items", "(", ")", ":", "value", "=", "obj", "[", "orm_path", "]", "if", "column_nam...
Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``.
[ "Modifies", "the", "obj", "values", "dict", "to", "alias", "the", "selected", "values", "to", "the", "column", "name", "that", "asked", "for", "its", "selection", "." ]
python
train
noobermin/pys
pys/__init__.py
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L54-L59
def check_vprint(s, vprinter): '''checked verbose printing''' if vprinter is True: print(s); elif callable(vprinter): vprinter(s);
[ "def", "check_vprint", "(", "s", ",", "vprinter", ")", ":", "if", "vprinter", "is", "True", ":", "print", "(", "s", ")", "elif", "callable", "(", "vprinter", ")", ":", "vprinter", "(", "s", ")" ]
checked verbose printing
[ "checked", "verbose", "printing" ]
python
train
CivicSpleen/ambry
ambry/library/warehouse.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L289-L318
def geoframe(self, sql, simplify=None, crs=None, epsg=4326): """ Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame """ import geopandas from shapely.wkt import loads from fiona.crs import from_epsg if crs is None: try: crs = from_epsg(epsg) except TypeError: raise TypeError('Must set either crs or epsg for output.') df = self.dataframe(sql) geometry = df['geometry'] if simplify: s = geometry.apply(lambda x: loads(x).simplify(simplify)) else: s = geometry.apply(lambda x: loads(x)) df['geometry'] = geopandas.GeoSeries(s) return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
[ "def", "geoframe", "(", "self", ",", "sql", ",", "simplify", "=", "None", ",", "crs", "=", "None", ",", "epsg", "=", "4326", ")", ":", "import", "geopandas", "from", "shapely", ".", "wkt", "import", "loads", "from", "fiona", ".", "crs", "import", "fr...
Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame
[ "Return", "geopandas", "dataframe" ]
python
train
fbngrm/babelpy
babelpy/babelfy.py
https://github.com/fbngrm/babelpy/blob/ff305abecddd66aed40c32f0010485cf192e5f17/babelpy/babelfy.py#L211-L231
def _wraps(self, tokens): """determine if a token is wrapped by another token """ def _differ(tokens): inner, outer = tokens not_same_start = inner.get('start') != outer.get('start') not_same_end = inner.get('end') != outer.get('end') return not_same_start or not_same_end def _in_range(tokens): inner, outer = tokens starts_in = outer.get('start') <= inner.get('start') \ <= outer.get('end') ends_in = outer.get('start') <= inner.get('end') \ <= outer.get('end') return starts_in and ends_in if not _differ(tokens): return False return _in_range(tokens)
[ "def", "_wraps", "(", "self", ",", "tokens", ")", ":", "def", "_differ", "(", "tokens", ")", ":", "inner", ",", "outer", "=", "tokens", "not_same_start", "=", "inner", ".", "get", "(", "'start'", ")", "!=", "outer", ".", "get", "(", "'start'", ")", ...
determine if a token is wrapped by another token
[ "determine", "if", "a", "token", "is", "wrapped", "by", "another", "token" ]
python
train
kragniz/python-etcd3
etcd3/client.py
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L1091-L1103
def client(host='localhost', port=2379, ca_cert=None, cert_key=None, cert_cert=None, timeout=None, user=None, password=None, grpc_options=None): """Return an instance of an Etcd3Client.""" return Etcd3Client(host=host, port=port, ca_cert=ca_cert, cert_key=cert_key, cert_cert=cert_cert, timeout=timeout, user=user, password=password, grpc_options=grpc_options)
[ "def", "client", "(", "host", "=", "'localhost'", ",", "port", "=", "2379", ",", "ca_cert", "=", "None", ",", "cert_key", "=", "None", ",", "cert_cert", "=", "None", ",", "timeout", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None",...
Return an instance of an Etcd3Client.
[ "Return", "an", "instance", "of", "an", "Etcd3Client", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3472-L3487
def schedule_forced_host_svc_checks(self, host, check_time): """Schedule a forced check on all services of a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None """ for service_id in host.services: service = self.daemon.services[service_id] self.schedule_forced_svc_check(service, check_time) self.send_an_element(service.get_update_status_brok())
[ "def", "schedule_forced_host_svc_checks", "(", "self", ",", "host", ",", "check_time", ")", ":", "for", "service_id", "in", "host", ".", "services", ":", "service", "=", "self", ".", "daemon", ".", "services", "[", "service_id", "]", "self", ".", "schedule_f...
Schedule a forced check on all services of a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None
[ "Schedule", "a", "forced", "check", "on", "all", "services", "of", "a", "host", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
odlgroup/odl
odl/discr/grid.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/grid.py#L564-L640
def is_subgrid(self, other, atol=0.0): """Return ``True`` if this grid is a subgrid of ``other``. Parameters ---------- other : `RectGrid` The other grid which is supposed to contain this grid atol : float, optional Allow deviations up to this number in absolute value per coordinate vector entry. Returns ------- is_subgrid : bool ``True`` if all coordinate vectors of ``self`` are within absolute distance ``atol`` of the other grid, else ``False``. Examples -------- >>> rg = uniform_grid([-2, -2], [0, 4], (3, 4)) >>> rg.coord_vectors (array([-2., -1., 0.]), array([-2., 0., 2., 4.])) >>> rg_sub = uniform_grid([-1, 2], [0, 4], (2, 2)) >>> rg_sub.coord_vectors (array([-1., 0.]), array([ 2., 4.])) >>> rg_sub.is_subgrid(rg) True Fuzzy check is also possible. Note that the tolerance still applies to the coordinate vectors. >>> rg_sub = uniform_grid([-1.015, 2], [0, 3.99], (2, 2)) >>> rg_sub.is_subgrid(rg, atol=0.01) False >>> rg_sub.is_subgrid(rg, atol=0.02) True """ # Optimization for some common cases if other is self: return True if not isinstance(other, RectGrid): return False if not all(self.shape[i] <= other.shape[i] and self.min_pt[i] >= other.min_pt[i] - atol and self.max_pt[i] <= other.max_pt[i] + atol for i in range(self.ndim)): return False if self.size == 0: return True if self.is_uniform and other.is_uniform: # For uniform grids, it suffices to show that min_pt, max_pt # and g[1,...,1] are contained in the other grid. For axes # with less than 2 points, this reduces to min_pt and max_pt, # and the corresponding indices in the other check point are # set to 0. minmax_contained = ( other.approx_contains(self.min_pt, atol=atol) and other.approx_contains(self.max_pt, atol=atol)) check_idx = np.zeros(self.ndim, dtype=int) check_idx[np.array(self.shape) >= 3] = 1 checkpt_contained = other.approx_contains(self[tuple(check_idx)], atol=atol) return minmax_contained and checkpt_contained else: # Array version of the fuzzy subgrid test, about 3 times faster # than the loop version. for vec_o, vec_s in zip(other.coord_vectors, self.coord_vectors): # Create array of differences of all entries in vec_o and # vec_s. If there is no almost zero entry in each row, # return False. vec_o_mg, vec_s_mg = sparse_meshgrid(vec_o, vec_s) if not np.all(np.any(np.isclose(vec_s_mg, vec_o_mg, atol=atol), axis=0)): return False return True
[ "def", "is_subgrid", "(", "self", ",", "other", ",", "atol", "=", "0.0", ")", ":", "# Optimization for some common cases", "if", "other", "is", "self", ":", "return", "True", "if", "not", "isinstance", "(", "other", ",", "RectGrid", ")", ":", "return", "Fa...
Return ``True`` if this grid is a subgrid of ``other``. Parameters ---------- other : `RectGrid` The other grid which is supposed to contain this grid atol : float, optional Allow deviations up to this number in absolute value per coordinate vector entry. Returns ------- is_subgrid : bool ``True`` if all coordinate vectors of ``self`` are within absolute distance ``atol`` of the other grid, else ``False``. Examples -------- >>> rg = uniform_grid([-2, -2], [0, 4], (3, 4)) >>> rg.coord_vectors (array([-2., -1., 0.]), array([-2., 0., 2., 4.])) >>> rg_sub = uniform_grid([-1, 2], [0, 4], (2, 2)) >>> rg_sub.coord_vectors (array([-1., 0.]), array([ 2., 4.])) >>> rg_sub.is_subgrid(rg) True Fuzzy check is also possible. Note that the tolerance still applies to the coordinate vectors. >>> rg_sub = uniform_grid([-1.015, 2], [0, 3.99], (2, 2)) >>> rg_sub.is_subgrid(rg, atol=0.01) False >>> rg_sub.is_subgrid(rg, atol=0.02) True
[ "Return", "True", "if", "this", "grid", "is", "a", "subgrid", "of", "other", "." ]
python
train
ejeschke/ginga
ginga/rv/plugins/TVMask.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/TVMask.py#L207-L228
def redo(self): """Image or masks have changed. Clear and redraw.""" if not self.gui_up: return self.clear_mask() image = self.fitsimage.get_image() if image is None: return n_obj = len(self._maskobjs) self.logger.debug('Displaying {0} masks'.format(n_obj)) if n_obj == 0: return # Display info table self.recreate_toc() # Draw on canvas self.masktag = self.canvas.add(self.dc.CompoundObject(*self._maskobjs)) self.fitsimage.redraw()
[ "def", "redo", "(", "self", ")", ":", "if", "not", "self", ".", "gui_up", ":", "return", "self", ".", "clear_mask", "(", ")", "image", "=", "self", ".", "fitsimage", ".", "get_image", "(", ")", "if", "image", "is", "None", ":", "return", "n_obj", "...
Image or masks have changed. Clear and redraw.
[ "Image", "or", "masks", "have", "changed", ".", "Clear", "and", "redraw", "." ]
python
train
paypal/baler
baler/baler.py
https://github.com/paypal/baler/blob/db4f09dd2c7729b2df5268c87ad3b4cb43396abf/baler/baler.py#L22-L41
def paths_in_directory(input_directory): """ Generate a list of all files in input_directory, each as a list containing path components. """ paths = [] for base_path, directories, filenames in os.walk(input_directory): relative_path = os.path.relpath(base_path, input_directory) path_components = relative_path.split(os.sep) if path_components[0] == ".": path_components = path_components[1:] if path_components and path_components[0].startswith("."): # hidden dir continue path_components = filter(bool, path_components) # remove empty components for filename in filenames: if filename.startswith("."): # hidden file continue paths.append(path_components + [filename]) return paths
[ "def", "paths_in_directory", "(", "input_directory", ")", ":", "paths", "=", "[", "]", "for", "base_path", ",", "directories", ",", "filenames", "in", "os", ".", "walk", "(", "input_directory", ")", ":", "relative_path", "=", "os", ".", "path", ".", "relpa...
Generate a list of all files in input_directory, each as a list containing path components.
[ "Generate", "a", "list", "of", "all", "files", "in", "input_directory", "each", "as", "a", "list", "containing", "path", "components", "." ]
python
train
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L206-L209
def left_to_right(self): """This is for text that flows Left to Right""" self._entry_mode |= Command.MODE_INCREMENT self.command(self._entry_mode)
[ "def", "left_to_right", "(", "self", ")", ":", "self", ".", "_entry_mode", "|=", "Command", ".", "MODE_INCREMENT", "self", ".", "command", "(", "self", ".", "_entry_mode", ")" ]
This is for text that flows Left to Right
[ "This", "is", "for", "text", "that", "flows", "Left", "to", "Right" ]
python
train
ionelmc/python-cogen
cogen/core/schedulers.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/schedulers.py#L89-L98
def add(self, coro, args=(), kwargs={}, first=True): """Add a coroutine in the scheduler. You can add arguments (_args_, _kwargs_) to init the coroutine with.""" assert callable(coro), "'%s' not a callable object" % coro coro = coro(*args, **kwargs) if first: self.active.append( (None, coro) ) else: self.active.appendleft( (None, coro) ) return coro
[ "def", "add", "(", "self", ",", "coro", ",", "args", "=", "(", ")", ",", "kwargs", "=", "{", "}", ",", "first", "=", "True", ")", ":", "assert", "callable", "(", "coro", ")", ",", "\"'%s' not a callable object\"", "%", "coro", "coro", "=", "coro", ...
Add a coroutine in the scheduler. You can add arguments (_args_, _kwargs_) to init the coroutine with.
[ "Add", "a", "coroutine", "in", "the", "scheduler", ".", "You", "can", "add", "arguments", "(", "_args_", "_kwargs_", ")", "to", "init", "the", "coroutine", "with", "." ]
python
train
dhylands/rshell
rshell/main.py
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L567-L576
def extra_funcs(*funcs): """Decorator which adds extra functions to be downloaded to the pyboard.""" def extra_funcs_decorator(real_func): def wrapper(*args, **kwargs): return real_func(*args, **kwargs) wrapper.extra_funcs = list(funcs) wrapper.source = inspect.getsource(real_func) wrapper.name = real_func.__name__ return wrapper return extra_funcs_decorator
[ "def", "extra_funcs", "(", "*", "funcs", ")", ":", "def", "extra_funcs_decorator", "(", "real_func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "real_func", "(", "*", "args", ",", "*", "*", "kwargs", ")...
Decorator which adds extra functions to be downloaded to the pyboard.
[ "Decorator", "which", "adds", "extra", "functions", "to", "be", "downloaded", "to", "the", "pyboard", "." ]
python
train
collectiveacuity/labPack
labpack/databases/couchbase.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L605-L639
def create_session(self, uid, duration=0): ''' a method to create a session token for the user :param uid: string with id of user in bucket :param duration: integer with number of seconds to last (default: 24hrs) :return: dictionary with account fields for user ''' title = '%s.create_session' % self.__class__.__name__ # validate inputs input_fields = { 'uid': uid, 'duration': duration } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = self.bucket_url + '/_session' json_data = { 'name': uid } if duration: json_data['ttl'] = duration # send request and unwrap response response = requests.post(url, json=json_data) response = response.json() return response
[ "def", "create_session", "(", "self", ",", "uid", ",", "duration", "=", "0", ")", ":", "title", "=", "'%s.create_session'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'uid'", ":", "uid", ",", "'duration'",...
a method to create a session token for the user :param uid: string with id of user in bucket :param duration: integer with number of seconds to last (default: 24hrs) :return: dictionary with account fields for user
[ "a", "method", "to", "create", "a", "session", "token", "for", "the", "user", ":", "param", "uid", ":", "string", "with", "id", "of", "user", "in", "bucket", ":", "param", "duration", ":", "integer", "with", "number", "of", "seconds", "to", "last", "("...
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/account_admin_api.py#L242-L263
def add_subjects_to_group(self, group_id, body, **kwargs): # noqa: E501 """Add members to a group. # noqa: E501 An endpoint for adding users and API keys to a group. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -d '{\"users\": [0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_subjects_to_group(group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str group_id: The ID of the group to be updated. (required) :param SubjectList body: A list of users and API keys to be added to the group. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.add_subjects_to_group_with_http_info(group_id, body, **kwargs) # noqa: E501 else: (data) = self.add_subjects_to_group_with_http_info(group_id, body, **kwargs) # noqa: E501 return data
[ "def", "add_subjects_to_group", "(", "self", ",", "group_id", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", ...
Add members to a group. # noqa: E501 An endpoint for adding users and API keys to a group. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -d '{\"users\": [0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_subjects_to_group(group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str group_id: The ID of the group to be updated. (required) :param SubjectList body: A list of users and API keys to be added to the group. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Add", "members", "to", "a", "group", ".", "#", "noqa", ":", "E501" ]
python
train
molmod/molmod
molmod/io/fchk.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/fchk.py#L214-L226
def get_hessian(self): """Return the hessian""" force_const = self.fields.get("Cartesian Force Constants") if force_const is None: return None N = len(self.molecule.numbers) result = np.zeros((3*N, 3*N), float) counter = 0 for row in range(3*N): result[row, :row+1] = force_const[counter:counter+row+1] result[:row+1, row] = force_const[counter:counter+row+1] counter += row + 1 return result
[ "def", "get_hessian", "(", "self", ")", ":", "force_const", "=", "self", ".", "fields", ".", "get", "(", "\"Cartesian Force Constants\"", ")", "if", "force_const", "is", "None", ":", "return", "None", "N", "=", "len", "(", "self", ".", "molecule", ".", "...
Return the hessian
[ "Return", "the", "hessian" ]
python
train
pydata/xarray
xarray/core/variable.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L1667-L1681
def coarsen(self, windows, func, boundary='exact', side='left'): """ Apply """ windows = {k: v for k, v in windows.items() if k in self.dims} if not windows: return self.copy() reshaped, axes = self._coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError('{} is not a valid method.'.format(name)) return type(self)(self.dims, func(reshaped, axis=axes), self._attrs)
[ "def", "coarsen", "(", "self", ",", "windows", ",", "func", ",", "boundary", "=", "'exact'", ",", "side", "=", "'left'", ")", ":", "windows", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "windows", ".", "items", "(", ")", "if", "k", "in...
Apply
[ "Apply" ]
python
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L472-L485
def dmail_create(self, to_name, title, body): """Create a dmail (Requires login) Parameters: to_name (str): The recipient's name. title (str): The title of the message. body (str): The body of the message. """ params = { 'dmail[to_name]': to_name, 'dmail[title]': title, 'dmail[body]': body } return self._get('dmails.json', params, 'POST', auth=True)
[ "def", "dmail_create", "(", "self", ",", "to_name", ",", "title", ",", "body", ")", ":", "params", "=", "{", "'dmail[to_name]'", ":", "to_name", ",", "'dmail[title]'", ":", "title", ",", "'dmail[body]'", ":", "body", "}", "return", "self", ".", "_get", "...
Create a dmail (Requires login) Parameters: to_name (str): The recipient's name. title (str): The title of the message. body (str): The body of the message.
[ "Create", "a", "dmail", "(", "Requires", "login", ")" ]
python
train
saltstack/salt
salt/modules/win_system.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L70-L76
def _convert_date_time_string(dt_string): ''' convert string to date time object ''' dt_string = dt_string.split('.')[0] dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S') return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
[ "def", "_convert_date_time_string", "(", "dt_string", ")", ":", "dt_string", "=", "dt_string", ".", "split", "(", "'.'", ")", "[", "0", "]", "dt_obj", "=", "datetime", ".", "strptime", "(", "dt_string", ",", "'%Y%m%d%H%M%S'", ")", "return", "dt_obj", ".", ...
convert string to date time object
[ "convert", "string", "to", "date", "time", "object" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/diffusion_analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/diffusion_analyzer.py#L868-L908
def get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None, **kwargs): """ Returns an Arrhenius plot. Args: temps ([float]): A sequence of temperatures. diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). diffusivity_errors ([float]): A sequence of errors for the diffusivities. If None, no error bar is plotted. \\*\\*kwargs: Any keyword args supported by matplotlib.pyplot.plot. Returns: A matplotlib.pyplot object. Do plt.show() to show the plot. """ Ea, c, _ = fit_arrhenius(temps, diffusivities) from pymatgen.util.plotting import pretty_plot plt = pretty_plot(12, 8) # log10 of the arrhenius fit arr = c * np.exp(-Ea / (const.k / const.e * np.array(temps))) t_1 = 1000 / np.array(temps) plt.plot(t_1, diffusivities, 'ko', t_1, arr, 'k--', markersize=10, **kwargs) if diffusivity_errors is not None: n = len(diffusivity_errors) plt.errorbar(t_1[0:n], diffusivities[0:n], yerr=diffusivity_errors, fmt='ko', ecolor='k', capthick=2, linewidth=2) ax = plt.axes() ax.set_yscale('log') plt.text(0.6, 0.85, "E$_a$ = {:.0f} meV".format(Ea * 1000), fontsize=30, transform=plt.axes().transAxes) plt.ylabel("D (cm$^2$/s)") plt.xlabel("1000/T (K$^{-1}$)") plt.tight_layout() return plt
[ "def", "get_arrhenius_plot", "(", "temps", ",", "diffusivities", ",", "diffusivity_errors", "=", "None", ",", "*", "*", "kwargs", ")", ":", "Ea", ",", "c", ",", "_", "=", "fit_arrhenius", "(", "temps", ",", "diffusivities", ")", "from", "pymatgen", ".", ...
Returns an Arrhenius plot. Args: temps ([float]): A sequence of temperatures. diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). diffusivity_errors ([float]): A sequence of errors for the diffusivities. If None, no error bar is plotted. \\*\\*kwargs: Any keyword args supported by matplotlib.pyplot.plot. Returns: A matplotlib.pyplot object. Do plt.show() to show the plot.
[ "Returns", "an", "Arrhenius", "plot", "." ]
python
train
estnltk/estnltk
estnltk/syntax/utils.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/utils.py#L746-L838
def as_dependencygraph( self, keep_dummy_root=False, add_morph=True ): ''' Returns this tree as NLTK's DependencyGraph object. Note that this method constructs 'zero_based' graph, where counting of the words starts from 0 and the root index is -1 (not 0, as in Malt-TAB format); Parameters ----------- add_morph : bool Specifies whether the morphological information (information about word lemmas, part-of-speech, and features) should be added to graph nodes. Note that even if **add_morph==True**, morphological information is only added if it is available via estnltk's layer token['analysis']; Default: True keep_dummy_root : bool Specifies whether the graph should include a dummy TOP / ROOT node, which does not refer to any word, and yet is the topmost node of the tree. If the dummy root node is not used, then the root node is the word node headed by -1; Default: False For more information about NLTK's DependencyGraph, see: http://www.nltk.org/_modules/nltk/parse/dependencygraph.html ''' from nltk.parse.dependencygraph import DependencyGraph graph = DependencyGraph( zero_based = True ) all_tree_nodes = [self] + self.get_children() # # 0) Fix the root # if keep_dummy_root: # Note: we have to re-construct the root node manually, # as DependencyGraph's current interface seems to provide # no easy/convenient means for fixing the root node; graph.nodes[-1] = graph.nodes[0] graph.nodes[-1].update( { 'address': -1 } ) graph.root = graph.nodes[-1] del graph.nodes[0] # # 1) Update / Add nodes of the graph # for child in all_tree_nodes: rel = 'xxx' if not child.labels else '|'.join(child.labels) address = child.word_id word = child.text graph.nodes[address].update( { 'address': address, 'word': child.text, 'rel': rel, } ) if not keep_dummy_root and child == self: # If we do not keep the dummy root node, set this tree # as the root node graph.root = graph.nodes[address] if add_morph and child.morph: # Add morphological information, if possible lemmas = set([analysis[LEMMA] for analysis in child.morph]) postags = set([analysis[POSTAG] for analysis in child.morph]) feats = set([analysis[FORM] for analysis in child.morph]) lemma = ('|'.join( list(lemmas) )).replace(' ','_') postag = ('|'.join( list(postags) )).replace(' ','_') feats = ('|'.join( list(feats) )).replace(' ','_') graph.nodes[address].update( { 'tag ': postag, 'ctag' : postag, 'feats': feats, 'lemma': lemma } ) # # 2) Update / Add arcs of the graph # for child in all_tree_nodes: # Connect children of given word deps = [] if not child.children else [c.word_id for c in child.children] head_address = child.word_id for dep in deps: graph.add_arc( head_address, dep ) if child.parent == None and keep_dummy_root: graph.add_arc( -1, head_address ) # Connect the parent of given node head = -1 if not child.parent else child.parent.word_id graph.nodes[head_address].update( { 'head': head, } ) return graph
[ "def", "as_dependencygraph", "(", "self", ",", "keep_dummy_root", "=", "False", ",", "add_morph", "=", "True", ")", ":", "from", "nltk", ".", "parse", ".", "dependencygraph", "import", "DependencyGraph", "graph", "=", "DependencyGraph", "(", "zero_based", "=", ...
Returns this tree as NLTK's DependencyGraph object. Note that this method constructs 'zero_based' graph, where counting of the words starts from 0 and the root index is -1 (not 0, as in Malt-TAB format); Parameters ----------- add_morph : bool Specifies whether the morphological information (information about word lemmas, part-of-speech, and features) should be added to graph nodes. Note that even if **add_morph==True**, morphological information is only added if it is available via estnltk's layer token['analysis']; Default: True keep_dummy_root : bool Specifies whether the graph should include a dummy TOP / ROOT node, which does not refer to any word, and yet is the topmost node of the tree. If the dummy root node is not used, then the root node is the word node headed by -1; Default: False For more information about NLTK's DependencyGraph, see: http://www.nltk.org/_modules/nltk/parse/dependencygraph.html
[ "Returns", "this", "tree", "as", "NLTK", "s", "DependencyGraph", "object", ".", "Note", "that", "this", "method", "constructs", "zero_based", "graph", "where", "counting", "of", "the", "words", "starts", "from", "0", "and", "the", "root", "index", "is", "-",...
python
train
raiden-network/raiden
raiden/transfer/balance_proof.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/balance_proof.py#L7-L35
def pack_balance_proof( nonce: Nonce, balance_hash: BalanceHash, additional_hash: AdditionalHash, canonical_identifier: CanonicalIdentifier, msg_type: MessageTypeId = MessageTypeId.BALANCE_PROOF, ) -> bytes: """Packs balance proof data to be signed Packs the given arguments in a byte array in the same configuration the contracts expect the signed data to have. """ return pack_data([ 'address', 'uint256', 'uint256', 'uint256', 'bytes32', 'uint256', 'bytes32', ], [ canonical_identifier.token_network_address, canonical_identifier.chain_identifier, msg_type, canonical_identifier.channel_identifier, balance_hash, nonce, additional_hash, ])
[ "def", "pack_balance_proof", "(", "nonce", ":", "Nonce", ",", "balance_hash", ":", "BalanceHash", ",", "additional_hash", ":", "AdditionalHash", ",", "canonical_identifier", ":", "CanonicalIdentifier", ",", "msg_type", ":", "MessageTypeId", "=", "MessageTypeId", ".", ...
Packs balance proof data to be signed Packs the given arguments in a byte array in the same configuration the contracts expect the signed data to have.
[ "Packs", "balance", "proof", "data", "to", "be", "signed" ]
python
train
openid/python-openid
examples/djopenid/server/views.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/examples/djopenid/server/views.py#L63-L72
def server(request): """ Respond to requests for the server's primary web page. """ return direct_to_template( request, 'server/index.html', {'user_url': getViewURL(request, idPage), 'server_xrds_url': getViewURL(request, idpXrds), })
[ "def", "server", "(", "request", ")", ":", "return", "direct_to_template", "(", "request", ",", "'server/index.html'", ",", "{", "'user_url'", ":", "getViewURL", "(", "request", ",", "idPage", ")", ",", "'server_xrds_url'", ":", "getViewURL", "(", "request", "...
Respond to requests for the server's primary web page.
[ "Respond", "to", "requests", "for", "the", "server", "s", "primary", "web", "page", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/posix_time.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/posix_time.py#L236-L251
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = ( decimal.Decimal(self._timestamp) / definitions.MICROSECONDS_PER_SECOND) return self._normalized_timestamp
[ "def", "_GetNormalizedTimestamp", "(", "self", ")", ":", "if", "self", ".", "_normalized_timestamp", "is", "None", ":", "if", "self", ".", "_timestamp", "is", "not", "None", ":", "self", ".", "_normalized_timestamp", "=", "(", "decimal", ".", "Decimal", "(",...
Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined.
[ "Retrieves", "the", "normalized", "timestamp", "." ]
python
train
happyleavesaoc/python-snapcast
snapcast/control/group.py
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/control/group.py#L128-L132
def update_stream(self, data): """Update stream.""" self._group['stream_id'] = data['stream_id'] self.callback() _LOGGER.info('updated stream to %s on %s', self.stream, self.friendly_name)
[ "def", "update_stream", "(", "self", ",", "data", ")", ":", "self", ".", "_group", "[", "'stream_id'", "]", "=", "data", "[", "'stream_id'", "]", "self", ".", "callback", "(", ")", "_LOGGER", ".", "info", "(", "'updated stream to %s on %s'", ",", "self", ...
Update stream.
[ "Update", "stream", "." ]
python
train
robotools/fontMath
Lib/fontMath/mathTransform.py
https://github.com/robotools/fontMath/blob/6abcb9d5a1ca19788fbde4418d7b5630c60990d8/Lib/fontMath/mathTransform.py#L338-L340
def _linearInterpolationTransformMatrix(matrix1, matrix2, value): """ Linear, 'oldstyle' interpolation of the transform matrix.""" return tuple(_interpolateValue(matrix1[i], matrix2[i], value) for i in range(len(matrix1)))
[ "def", "_linearInterpolationTransformMatrix", "(", "matrix1", ",", "matrix2", ",", "value", ")", ":", "return", "tuple", "(", "_interpolateValue", "(", "matrix1", "[", "i", "]", ",", "matrix2", "[", "i", "]", ",", "value", ")", "for", "i", "in", "range", ...
Linear, 'oldstyle' interpolation of the transform matrix.
[ "Linear", "oldstyle", "interpolation", "of", "the", "transform", "matrix", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/library_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/library_state.py#L567-L580
def library_hierarchy_depth(self): """ Calculates the library hierarchy depth Counting starts at the current library state. So if the there is no upper library state the depth is one. :return: library hierarchy depth :rtype: int """ current_library_hierarchy_depth = 1 library_root_state = self.get_next_upper_library_root_state() while library_root_state is not None: current_library_hierarchy_depth += 1 library_root_state = library_root_state.parent.get_next_upper_library_root_state() return current_library_hierarchy_depth
[ "def", "library_hierarchy_depth", "(", "self", ")", ":", "current_library_hierarchy_depth", "=", "1", "library_root_state", "=", "self", ".", "get_next_upper_library_root_state", "(", ")", "while", "library_root_state", "is", "not", "None", ":", "current_library_hierarchy...
Calculates the library hierarchy depth Counting starts at the current library state. So if the there is no upper library state the depth is one. :return: library hierarchy depth :rtype: int
[ "Calculates", "the", "library", "hierarchy", "depth" ]
python
train
google/grumpy
third_party/stdlib/rfc822.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/rfc822.py#L492-L498
def parseaddr(address): """Parse an address into a (realname, mailaddr) tuple.""" a = AddressList(address) lst = a.addresslist if not lst: return (None, None) return lst[0]
[ "def", "parseaddr", "(", "address", ")", ":", "a", "=", "AddressList", "(", "address", ")", "lst", "=", "a", ".", "addresslist", "if", "not", "lst", ":", "return", "(", "None", ",", "None", ")", "return", "lst", "[", "0", "]" ]
Parse an address into a (realname, mailaddr) tuple.
[ "Parse", "an", "address", "into", "a", "(", "realname", "mailaddr", ")", "tuple", "." ]
python
valid
mattrobenolt/ec2
ec2/helpers.py
https://github.com/mattrobenolt/ec2/blob/fc1f8bce6cf76899165d9ac006371181d52439f8/ec2/helpers.py#L12-L22
def make_compare(key, value, obj): "Map a key name to a specific comparison function" if '__' not in key: # If no __ exists, default to doing an "exact" comparison key, comp = key, 'exact' else: key, comp = key.rsplit('__', 1) # Check if comp is valid if hasattr(Compare, comp): return getattr(Compare, comp)(key, value, obj) raise AttributeError("No comparison '%s'" % comp)
[ "def", "make_compare", "(", "key", ",", "value", ",", "obj", ")", ":", "if", "'__'", "not", "in", "key", ":", "# If no __ exists, default to doing an \"exact\" comparison", "key", ",", "comp", "=", "key", ",", "'exact'", "else", ":", "key", ",", "comp", "=",...
Map a key name to a specific comparison function
[ "Map", "a", "key", "name", "to", "a", "specific", "comparison", "function" ]
python
train
LordDarkula/chess_py
chess_py/core/board.py
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/board.py#L73-L108
def init_default(cls): """ Creates a ``Board`` with the standard chess starting position. :rtype: Board """ return cls([ # First rank [Rook(white, Location(0, 0)), Knight(white, Location(0, 1)), Bishop(white, Location(0, 2)), Queen(white, Location(0, 3)), King(white, Location(0, 4)), Bishop(white, Location(0, 5)), Knight(white, Location(0, 6)), Rook(white, Location(0, 7))], # Second rank [Pawn(white, Location(1, file)) for file in range(8)], # Third rank [None for _ in range(8)], # Fourth rank [None for _ in range(8)], # Fifth rank [None for _ in range(8)], # Sixth rank [None for _ in range(8)], # Seventh rank [Pawn(black, Location(6, file)) for file in range(8)], # Eighth rank [Rook(black, Location(7, 0)), Knight(black, Location(7, 1)), Bishop(black, Location(7, 2)), Queen(black, Location(7, 3)), King(black, Location(7, 4)), Bishop(black, Location(7, 5)), Knight(black, Location(7, 6)), Rook(black, Location(7, 7))] ])
[ "def", "init_default", "(", "cls", ")", ":", "return", "cls", "(", "[", "# First rank", "[", "Rook", "(", "white", ",", "Location", "(", "0", ",", "0", ")", ")", ",", "Knight", "(", "white", ",", "Location", "(", "0", ",", "1", ")", ")", ",", "...
Creates a ``Board`` with the standard chess starting position. :rtype: Board
[ "Creates", "a", "Board", "with", "the", "standard", "chess", "starting", "position", "." ]
python
train
saltstack/salt
salt/modules/ssh.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ssh.py#L123-L134
def _get_config_file(user, config): ''' Get absolute path to a user's ssh_config. ''' uinfo = __salt__['user.info'](user) if not uinfo: raise CommandExecutionError('User \'{0}\' does not exist'.format(user)) home = uinfo['home'] config = _expand_authorized_keys_path(config, user, home) if not os.path.isabs(config): config = os.path.join(home, config) return config
[ "def", "_get_config_file", "(", "user", ",", "config", ")", ":", "uinfo", "=", "__salt__", "[", "'user.info'", "]", "(", "user", ")", "if", "not", "uinfo", ":", "raise", "CommandExecutionError", "(", "'User \\'{0}\\' does not exist'", ".", "format", "(", "user...
Get absolute path to a user's ssh_config.
[ "Get", "absolute", "path", "to", "a", "user", "s", "ssh_config", "." ]
python
train
ntoll/uflash
uflash.py
https://github.com/ntoll/uflash/blob/867468d386da0aa20212b69a152ce8bfc0972366/uflash.py#L358-L376
def watch_file(path, func, *args, **kwargs): """ Watch a file for changes by polling its last modification time. Call the provided function with *args and **kwargs upon modification. """ if not path: raise ValueError('Please specify a file to watch') print('Watching "{}" for changes'.format(path)) last_modification_time = os.path.getmtime(path) try: while True: time.sleep(1) new_modification_time = os.path.getmtime(path) if new_modification_time == last_modification_time: continue func(*args, **kwargs) last_modification_time = new_modification_time except KeyboardInterrupt: pass
[ "def", "watch_file", "(", "path", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "path", ":", "raise", "ValueError", "(", "'Please specify a file to watch'", ")", "print", "(", "'Watching \"{}\" for changes'", ".", "format", "...
Watch a file for changes by polling its last modification time. Call the provided function with *args and **kwargs upon modification.
[ "Watch", "a", "file", "for", "changes", "by", "polling", "its", "last", "modification", "time", ".", "Call", "the", "provided", "function", "with", "*", "args", "and", "**", "kwargs", "upon", "modification", "." ]
python
train
wdbm/abstraction
abstraction.py
https://github.com/wdbm/abstraction/blob/58c81e73954cc6b4cd2f79b2216467528a96376b/abstraction.py#L1930-L1956
def sentiment( text = None, confidence = False ): """ This function accepts a string text input. It calculates the sentiment of the text, "pos" or "neg". By default, it returns this calculated sentiment. If selected, it returns a tuple of the calculated sentiment and the classificaton confidence. """ try: words = text.split(" ") # Remove empty strings. words = [word for word in words if word] features = word_features(words) classification = classifier.classify(features) confidence_classification = classifier.prob_classify(features).prob(classification) except: classification = None confidence_classification = None if confidence: return ( classification, confidence_classification ) else: return classification
[ "def", "sentiment", "(", "text", "=", "None", ",", "confidence", "=", "False", ")", ":", "try", ":", "words", "=", "text", ".", "split", "(", "\" \"", ")", "# Remove empty strings.", "words", "=", "[", "word", "for", "word", "in", "words", "if", "word"...
This function accepts a string text input. It calculates the sentiment of the text, "pos" or "neg". By default, it returns this calculated sentiment. If selected, it returns a tuple of the calculated sentiment and the classificaton confidence.
[ "This", "function", "accepts", "a", "string", "text", "input", ".", "It", "calculates", "the", "sentiment", "of", "the", "text", "pos", "or", "neg", ".", "By", "default", "it", "returns", "this", "calculated", "sentiment", ".", "If", "selected", "it", "ret...
python
train
iotile/coretools
iotilecore/iotile/core/utilities/schema_verify/literal_verify.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/schema_verify/literal_verify.py#L35-L49
def format(self, indent_level, indent_size=4): """Format this verifier Returns: string: A formatted string """ name = self.format_name('Literal', indent_size) if self.long_desc is not None: name += '\n' name += self.wrap_lines('value: %s\n' % str(self._literal), 1, indent_size) return self.wrap_lines(name, indent_level, indent_size)
[ "def", "format", "(", "self", ",", "indent_level", ",", "indent_size", "=", "4", ")", ":", "name", "=", "self", ".", "format_name", "(", "'Literal'", ",", "indent_size", ")", "if", "self", ".", "long_desc", "is", "not", "None", ":", "name", "+=", "'\\n...
Format this verifier Returns: string: A formatted string
[ "Format", "this", "verifier" ]
python
train
klen/muffin-session
example.py
https://github.com/klen/muffin-session/blob/f1d14d12b7d09d8cc40be14b0dfa0b1e2f4ae8e9/example.py#L23-L27
def update(request): """Update a current user's session.""" session = yield from app.ps.session.load(request) session['random'] = random.random() return session
[ "def", "update", "(", "request", ")", ":", "session", "=", "yield", "from", "app", ".", "ps", ".", "session", ".", "load", "(", "request", ")", "session", "[", "'random'", "]", "=", "random", ".", "random", "(", ")", "return", "session" ]
Update a current user's session.
[ "Update", "a", "current", "user", "s", "session", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L21823-L21845
def wait_for_completion(self, timeout): """Waits until the task is done (including all sub-operations) with a given timeout in milliseconds; specify -1 for an indefinite wait. Note that the VirtualBox/XPCOM/COM/native event queues of the calling thread are not processed while waiting. Neglecting event queues may have dire consequences (degrade performance, resource hogs, deadlocks, etc.), this is specially so for the main thread on platforms using XPCOM. Callers are advised wait for short periods and service their event queues between calls, or to create a worker thread to do the waiting. in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for task completion. """ if not isinstance(timeout, baseinteger): raise TypeError("timeout can only be an instance of type baseinteger") self._call("waitForCompletion", in_p=[timeout])
[ "def", "wait_for_completion", "(", "self", ",", "timeout", ")", ":", "if", "not", "isinstance", "(", "timeout", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"timeout can only be an instance of type baseinteger\"", ")", "self", ".", "_call", "(", "\"w...
Waits until the task is done (including all sub-operations) with a given timeout in milliseconds; specify -1 for an indefinite wait. Note that the VirtualBox/XPCOM/COM/native event queues of the calling thread are not processed while waiting. Neglecting event queues may have dire consequences (degrade performance, resource hogs, deadlocks, etc.), this is specially so for the main thread on platforms using XPCOM. Callers are advised wait for short periods and service their event queues between calls, or to create a worker thread to do the waiting. in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for task completion.
[ "Waits", "until", "the", "task", "is", "done", "(", "including", "all", "sub", "-", "operations", ")", "with", "a", "given", "timeout", "in", "milliseconds", ";", "specify", "-", "1", "for", "an", "indefinite", "wait", ".", "Note", "that", "the", "Virtua...
python
train
persephone-tools/persephone
persephone/utils.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L117-L130
def get_prefixes(dirname: str, extension: str) -> List[str]: """ Returns a list of prefixes to files in the directory (which might be a whole corpus, or a train/valid/test subset. The prefixes include the path leading up to it, but only the filename up until the first observed period '.' """ prefixes = [] for root, _, filenames in os.walk(dirname): for filename in filenames: if filename.endswith(extension): # Then it's an input feature file and its prefix will # correspond to a training example prefixes.append(os.path.join(root, filename.split(".")[0])) return sorted(prefixes)
[ "def", "get_prefixes", "(", "dirname", ":", "str", ",", "extension", ":", "str", ")", "->", "List", "[", "str", "]", ":", "prefixes", "=", "[", "]", "for", "root", ",", "_", ",", "filenames", "in", "os", ".", "walk", "(", "dirname", ")", ":", "fo...
Returns a list of prefixes to files in the directory (which might be a whole corpus, or a train/valid/test subset. The prefixes include the path leading up to it, but only the filename up until the first observed period '.'
[ "Returns", "a", "list", "of", "prefixes", "to", "files", "in", "the", "directory", "(", "which", "might", "be", "a", "whole", "corpus", "or", "a", "train", "/", "valid", "/", "test", "subset", ".", "The", "prefixes", "include", "the", "path", "leading", ...
python
train
quantmind/pulsar
pulsar/utils/config.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/config.py#L613-L641
def add_argument(self, parser, set_default=False): """Add this :class:`Setting` to the ``parser``. The operation is carried out only if :attr:`flags` or :attr:`nargs` and :attr:`name` are defined. """ default = self.default if set_default else None kwargs = dict( nargs=self.nargs, default=default, help="%s [%s]" % (self.short, default) ) kwargs.update(self.extra) if self.flags: args = tuple(self.flags) kwargs.update({'dest': self.name, 'action': self.action or "store"}) if kwargs["action"] != "store": kwargs.pop("type", None) kwargs.pop("nargs", None) elif self.nargs and self.name: args = (self.name,) kwargs.update({'metavar': self.meta or None}) else: # Not added to argparser return if self.meta: kwargs['metavar'] = self.meta parser.add_argument(*args, **kwargs)
[ "def", "add_argument", "(", "self", ",", "parser", ",", "set_default", "=", "False", ")", ":", "default", "=", "self", ".", "default", "if", "set_default", "else", "None", "kwargs", "=", "dict", "(", "nargs", "=", "self", ".", "nargs", ",", "default", ...
Add this :class:`Setting` to the ``parser``. The operation is carried out only if :attr:`flags` or :attr:`nargs` and :attr:`name` are defined.
[ "Add", "this", ":", "class", ":", "Setting", "to", "the", "parser", "." ]
python
train
google/python-gflags
gflags2man.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L274-L321
def ParsePythonFlags(self, start_line=0): """Parse python/swig style flags.""" modname = None # name of current module modlist = [] flag = None for line_num in range(start_line, len(self.output)): # collect flags line = self.output[line_num].rstrip() if not line: # blank continue mobj = self.module_py_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_py_re.match(line) if mobj: # start of a new flag if flag: modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue if not flag: # continuation of a flag logging.error('Flag info, but no current flag "%s"' % line) mobj = self.flag_default_py_re.match(line) if mobj: # (default: '...') flag.default = mobj.group(1) logging.debug('Fdef: %s' % line) continue mobj = self.flag_tips_py_re.match(line) if mobj: # (tips) flag.tips = mobj.group(1) logging.debug('Ftip: %s' % line) continue if flag and flag.help: flag.help += line # multiflags tack on an extra line else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
[ "def", "ParsePythonFlags", "(", "self", ",", "start_line", "=", "0", ")", ":", "modname", "=", "None", "# name of current module", "modlist", "=", "[", "]", "flag", "=", "None", "for", "line_num", "in", "range", "(", "start_line", ",", "len", "(", "self", ...
Parse python/swig style flags.
[ "Parse", "python", "/", "swig", "style", "flags", "." ]
python
train
quodlibet/mutagen
mutagen/easymp4.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/easymp4.py#L152-L180
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 freeform atom (----) and name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterFreeformKey( "musicbrainz_artistid", "MusicBrainz Artist Id") """ atomid = "----:" + mean + ":" + name def getter(tags, key): return [s.decode("utf-8", "replace") for s in tags[atomid]] def setter(tags, key, value): encoded = [] for v in value: if not isinstance(v, text_type): if PY3: raise TypeError("%r not str" % v) v = v.decode("utf-8") encoded.append(v.encode("utf-8")) tags[atomid] = encoded def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
[ "def", "RegisterFreeformKey", "(", "cls", ",", "key", ",", "name", ",", "mean", "=", "\"com.apple.iTunes\"", ")", ":", "atomid", "=", "\"----:\"", "+", "mean", "+", "\":\"", "+", "name", "def", "getter", "(", "tags", ",", "key", ")", ":", "return", "["...
Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 freeform atom (----) and name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterFreeformKey( "musicbrainz_artistid", "MusicBrainz Artist Id")
[ "Register", "a", "text", "key", "." ]
python
train
SmokinCaterpillar/pypet
pypet/utils/explore.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/explore.py#L66-L108
def find_unique_points(explored_parameters): """Takes a list of explored parameters and finds unique parameter combinations. If parameter ranges are hashable operates in O(N), otherwise O(N**2). :param explored_parameters: List of **explored** parameters :return: List of tuples, first entry being the parameter values, second entry a list containing the run position of the unique combination. """ ranges = [param.f_get_range(copy=False) for param in explored_parameters] zipped_tuples = list(zip(*ranges)) try: unique_elements = OrderedDict() for idx, val_tuple in enumerate(zipped_tuples): if val_tuple not in unique_elements: unique_elements[val_tuple] = [] unique_elements[val_tuple].append(idx) return list(unique_elements.items()) except TypeError: logger = logging.getLogger('pypet.find_unique') logger.error('Your parameter entries could not be hashed, ' 'now I am sorting slowly in O(N**2).') unique_elements = [] for idx, val_tuple in enumerate(zipped_tuples): matches = False for added_tuple, pos_list in unique_elements: matches = True for idx2, val in enumerate(added_tuple): if not explored_parameters[idx2]._equal_values(val_tuple[idx2], val): matches = False break if matches: pos_list.append(idx) break if not matches: unique_elements.append((val_tuple, [idx])) return unique_elements
[ "def", "find_unique_points", "(", "explored_parameters", ")", ":", "ranges", "=", "[", "param", ".", "f_get_range", "(", "copy", "=", "False", ")", "for", "param", "in", "explored_parameters", "]", "zipped_tuples", "=", "list", "(", "zip", "(", "*", "ranges"...
Takes a list of explored parameters and finds unique parameter combinations. If parameter ranges are hashable operates in O(N), otherwise O(N**2). :param explored_parameters: List of **explored** parameters :return: List of tuples, first entry being the parameter values, second entry a list containing the run position of the unique combination.
[ "Takes", "a", "list", "of", "explored", "parameters", "and", "finds", "unique", "parameter", "combinations", "." ]
python
test
nerdvegas/rez
src/support/package_utils/set_authors.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/support/package_utils/set_authors.py#L5-L21
def set_authors(data): """Add 'authors' attribute based on repo contributions """ if "authors" in data: return shfile = os.path.join(os.path.dirname(__file__), "get_committers.sh") p = subprocess.Popen(["bash", shfile], stdout=subprocess.PIPE) out, _ = p.communicate() if p.returncode: return authors = out.strip().split('\n') authors = [x.strip() for x in authors] data["authors"] = authors
[ "def", "set_authors", "(", "data", ")", ":", "if", "\"authors\"", "in", "data", ":", "return", "shfile", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"get_committers.sh\"", ")", "p", "=", "...
Add 'authors' attribute based on repo contributions
[ "Add", "authors", "attribute", "based", "on", "repo", "contributions" ]
python
train
aiortc/aiortc
aiortc/rtcrtpreceiver.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcrtpreceiver.py#L493-L499
async def _send_rtcp_pli(self, media_ssrc): """ Send an RTCP packet to report picture loss. """ if self.__rtcp_ssrc is not None: packet = RtcpPsfbPacket(fmt=RTCP_PSFB_PLI, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc) await self._send_rtcp(packet)
[ "async", "def", "_send_rtcp_pli", "(", "self", ",", "media_ssrc", ")", ":", "if", "self", ".", "__rtcp_ssrc", "is", "not", "None", ":", "packet", "=", "RtcpPsfbPacket", "(", "fmt", "=", "RTCP_PSFB_PLI", ",", "ssrc", "=", "self", ".", "__rtcp_ssrc", ",", ...
Send an RTCP packet to report picture loss.
[ "Send", "an", "RTCP", "packet", "to", "report", "picture", "loss", "." ]
python
train
AtteqCom/zsl
src/zsl/utils/cache_helper.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/cache_helper.py#L181-L194
def create_key_for_data(prefix, data, key_params): """ From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator) """ d = data.get_data() values = [] for k in key_params: if k in d and type(d[k]) is list: values.append("{0}:{1}".format(k, " -".join(d[k]))) else: value = d[k] if k in d else '' values.append("{0}:{1}".format(k, value)) return "{0}-{1}".format(prefix, "-".join(values))
[ "def", "create_key_for_data", "(", "prefix", ",", "data", ",", "key_params", ")", ":", "d", "=", "data", ".", "get_data", "(", ")", "values", "=", "[", "]", "for", "k", "in", "key_params", ":", "if", "k", "in", "d", "and", "type", "(", "d", "[", ...
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
[ "From", "data", "params", "in", "task", "create", "corresponding", "key", "with", "help", "of", "key_params", "(", "defined", "in", "decorator", ")" ]
python
train
CityOfZion/neo-python
neo/SmartContract/ContractParameter.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/SmartContract/ContractParameter.py#L27-L53
def ToParameter(item: StackItem): """ Convert a StackItem to a ContractParameter object Args: item (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object Returns: ContractParameter """ if isinstance(item, Array) or isinstance(item, Struct): items = item.GetArray() output = [ContractParameter.ToParameter(subitem) for subitem in items] return ContractParameter(type=ContractParameterType.Array, value=output) elif isinstance(item, Boolean): return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean()) elif isinstance(item, ByteArray): return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray()) elif isinstance(item, Integer): return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger())) elif isinstance(item, InteropInterface): return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())
[ "def", "ToParameter", "(", "item", ":", "StackItem", ")", ":", "if", "isinstance", "(", "item", ",", "Array", ")", "or", "isinstance", "(", "item", ",", "Struct", ")", ":", "items", "=", "item", ".", "GetArray", "(", ")", "output", "=", "[", "Contrac...
Convert a StackItem to a ContractParameter object Args: item (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object Returns: ContractParameter
[ "Convert", "a", "StackItem", "to", "a", "ContractParameter", "object" ]
python
train
mitodl/PyLmod
pylmod/gradebook.py
https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/gradebook.py#L723-L838
def get_students( self, gradebook_id='', simple=False, section_name='', include_photo=False, include_grade_info=False, include_grade_history=False, include_makeup_grades=False ): """Get students for a gradebook. Get a list of students for a given gradebook, specified by a gradebook id. Does not include grade data. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): if ``True``, just return dictionary with keys ``email``, ``name``, ``section``, default = ``False`` section_name (str): section name include_photo (bool): include student photo, default= ``False`` include_grade_info (bool): include student's grade info, default= ``False`` include_grade_history (bool): include student's grade history, default= ``False`` include_makeup_grades (bool): include student's makeup grades, default= ``False`` Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: list of student dictionaries .. code-block:: python [{ u'accountEmail': u'stellar.test2@gmail.com', u'displayName': u'Molly Parker', u'photoUrl': None, u'middleName': None, u'section': u'Unassigned', u'sectionId': 1293925, u'editable': False, u'overallGradeInformation': None, u'studentId': 1145, u'studentAssignmentInfo': None, u'sortableName': u'Parker, Molly', u'surname': u'Parker', u'givenName': u'Molly', u'nickName': u'Molly', u'email': u'stellar.test2@gmail.com' },] """ # These are parameters required for the remote API call, so # there aren't too many arguments, or too many variables # pylint: disable=too-many-arguments,too-many-locals # Set params by arguments params = dict( includePhoto=json.dumps(include_photo), includeGradeInfo=json.dumps(include_grade_info), includeGradeHistory=json.dumps(include_grade_history), includeMakeupGrades=json.dumps(include_makeup_grades), ) url = 'students/{gradebookId}' if section_name: group_id, _ = self.get_section_by_name(section_name) if group_id is None: failure_message = ( 'in get_students -- Error: ' 'No such section %s' % section_name ) log.critical(failure_message) raise PyLmodNoSuchSection(failure_message) url += '/section/{0}'.format(group_id) student_data = self.get( url.format( gradebookId=gradebook_id or self.gradebook_id ), params=params, ) if simple: # just return dict with keys email, name, section student_map = dict( accountEmail='email', displayName='name', section='section' ) def remap(students): """Convert mit.edu domain to upper-case for student emails. The mit.edu domain for user email must be upper-case, i.e. MIT.EDU. Args: students (list): list of students Returns: dict: dictionary of updated student email domains """ newx = dict((student_map[k], students[k]) for k in student_map) # match certs newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU') return newx return [remap(x) for x in student_data['data']] return student_data['data']
[ "def", "get_students", "(", "self", ",", "gradebook_id", "=", "''", ",", "simple", "=", "False", ",", "section_name", "=", "''", ",", "include_photo", "=", "False", ",", "include_grade_info", "=", "False", ",", "include_grade_history", "=", "False", ",", "in...
Get students for a gradebook. Get a list of students for a given gradebook, specified by a gradebook id. Does not include grade data. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): if ``True``, just return dictionary with keys ``email``, ``name``, ``section``, default = ``False`` section_name (str): section name include_photo (bool): include student photo, default= ``False`` include_grade_info (bool): include student's grade info, default= ``False`` include_grade_history (bool): include student's grade history, default= ``False`` include_makeup_grades (bool): include student's makeup grades, default= ``False`` Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: list of student dictionaries .. code-block:: python [{ u'accountEmail': u'stellar.test2@gmail.com', u'displayName': u'Molly Parker', u'photoUrl': None, u'middleName': None, u'section': u'Unassigned', u'sectionId': 1293925, u'editable': False, u'overallGradeInformation': None, u'studentId': 1145, u'studentAssignmentInfo': None, u'sortableName': u'Parker, Molly', u'surname': u'Parker', u'givenName': u'Molly', u'nickName': u'Molly', u'email': u'stellar.test2@gmail.com' },]
[ "Get", "students", "for", "a", "gradebook", "." ]
python
train
bububa/pyTOP
pyTOP/category.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/category.py#L63-L75
def get(self, parent_cid=None, cids=[], fields=[]): '''taobao.itemcats.get 获取后台供卖家发布商品的标准商品类目''' request = TOPRequest('taobao.itemcats.get') if not fields: itemCat = ItemCat() fields = itemCat.fields request['fields'] = fields if parent_cid!=None: request['parent_cid'] = parent_cid if cids: request['cids'] = ','.join([str(cid) for cid in cids]) self.create(self.execute(request)) return self.item_cats
[ "def", "get", "(", "self", ",", "parent_cid", "=", "None", ",", "cids", "=", "[", "]", ",", "fields", "=", "[", "]", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.itemcats.get'", ")", "if", "not", "fields", ":", "itemCat", "=", "ItemCat", "("...
taobao.itemcats.get 获取后台供卖家发布商品的标准商品类目
[ "taobao", ".", "itemcats", ".", "get", "获取后台供卖家发布商品的标准商品类目" ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L2526-L2531
def is_any_down(self): """Is any button depressed?""" for key in range(len(self.current_state.key_states)): if self.is_down(key): return True return False
[ "def", "is_any_down", "(", "self", ")", ":", "for", "key", "in", "range", "(", "len", "(", "self", ".", "current_state", ".", "key_states", ")", ")", ":", "if", "self", ".", "is_down", "(", "key", ")", ":", "return", "True", "return", "False" ]
Is any button depressed?
[ "Is", "any", "button", "depressed?" ]
python
train
saltstack/salt
salt/modules/swift.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swift.py#L179-L202
def put(cont, path=None, local_file=None, profile=None): ''' Create a new container, or upload an object to a container. CLI Example to create a container: .. code-block:: bash salt myminion swift.put mycontainer CLI Example to upload an object to a container: .. code-block:: bash salt myminion swift.put mycontainer remotepath local_file=/path/to/file ''' swift_conn = _auth(profile) if path is None: return swift_conn.put_container(cont) elif local_file is not None: return swift_conn.put_object(cont, path, local_file) else: return False
[ "def", "put", "(", "cont", ",", "path", "=", "None", ",", "local_file", "=", "None", ",", "profile", "=", "None", ")", ":", "swift_conn", "=", "_auth", "(", "profile", ")", "if", "path", "is", "None", ":", "return", "swift_conn", ".", "put_container", ...
Create a new container, or upload an object to a container. CLI Example to create a container: .. code-block:: bash salt myminion swift.put mycontainer CLI Example to upload an object to a container: .. code-block:: bash salt myminion swift.put mycontainer remotepath local_file=/path/to/file
[ "Create", "a", "new", "container", "or", "upload", "an", "object", "to", "a", "container", "." ]
python
train
google/transitfeed
kmlwriter.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/kmlwriter.py#L213-L242
def _CreateLineString(self, parent, coordinate_list): """Create a KML LineString element. The points of the string are given in coordinate_list. Every element of coordinate_list should be one of a tuple (longitude, latitude) or a tuple (longitude, latitude, altitude). Args: parent: The parent ElementTree.Element instance. coordinate_list: The list of coordinates. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty. """ if not coordinate_list: return None linestring = ET.SubElement(parent, 'LineString') tessellate = ET.SubElement(linestring, 'tessellate') tessellate.text = '1' if len(coordinate_list[0]) == 3: altitude_mode = ET.SubElement(linestring, 'altitudeMode') altitude_mode.text = 'absolute' coordinates = ET.SubElement(linestring, 'coordinates') if len(coordinate_list[0]) == 3: coordinate_str_list = ['%f,%f,%f' % t for t in coordinate_list] else: coordinate_str_list = ['%f,%f' % t for t in coordinate_list] coordinates.text = ' '.join(coordinate_str_list) return linestring
[ "def", "_CreateLineString", "(", "self", ",", "parent", ",", "coordinate_list", ")", ":", "if", "not", "coordinate_list", ":", "return", "None", "linestring", "=", "ET", ".", "SubElement", "(", "parent", ",", "'LineString'", ")", "tessellate", "=", "ET", "."...
Create a KML LineString element. The points of the string are given in coordinate_list. Every element of coordinate_list should be one of a tuple (longitude, latitude) or a tuple (longitude, latitude, altitude). Args: parent: The parent ElementTree.Element instance. coordinate_list: The list of coordinates. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
[ "Create", "a", "KML", "LineString", "element", "." ]
python
train
jacebrowning/comparable
comparable/base.py
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L127-L132
def equal(obj1, obj2): """Calculate equality between two (Comparable) objects.""" Comparable.log(obj1, obj2, '==') equality = obj1.equality(obj2) Comparable.log(obj1, obj2, '==', result=equality) return equality
[ "def", "equal", "(", "obj1", ",", "obj2", ")", ":", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'=='", ")", "equality", "=", "obj1", ".", "equality", "(", "obj2", ")", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'=='", ...
Calculate equality between two (Comparable) objects.
[ "Calculate", "equality", "between", "two", "(", "Comparable", ")", "objects", "." ]
python
train
nerdvegas/rez
src/rez/utils/backcompat.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/backcompat.py#L40-L48
def convert_old_command_expansions(command): """Convert expansions from !OLD! style to {new}.""" command = command.replace("!VERSION!", "{version}") command = command.replace("!MAJOR_VERSION!", "{version.major}") command = command.replace("!MINOR_VERSION!", "{version.minor}") command = command.replace("!BASE!", "{base}") command = command.replace("!ROOT!", "{root}") command = command.replace("!USER!", "{system.user}") return command
[ "def", "convert_old_command_expansions", "(", "command", ")", ":", "command", "=", "command", ".", "replace", "(", "\"!VERSION!\"", ",", "\"{version}\"", ")", "command", "=", "command", ".", "replace", "(", "\"!MAJOR_VERSION!\"", ",", "\"{version.major}\"", ")", "...
Convert expansions from !OLD! style to {new}.
[ "Convert", "expansions", "from", "!OLD!", "style", "to", "{", "new", "}", "." ]
python
train
pkgw/pwkit
pwkit/kwargv.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/kwargv.py#L381-L461
def parse(self, args=None): """Parse textual keywords as described by this class’s attributes, and update this instance’s attributes with the parsed values. *args* is a list of strings; if ``None``, it defaults to ``sys.argv[1:]``. Returns *self* for convenience. Raises :exc:`KwargvError` if invalid keywords are encountered. See also :meth:`ParseKeywords.parse_or_die`. """ if args is None: import sys args = sys.argv[1:] seen = set() for arg in args: t = arg.split('=', 1) if len(t) < 2: raise KwargvError('don\'t know what to do with argument "%s"', arg) kw, val = t ki = self._kwinfos.get(kw) if ki is None: raise KwargvError('unrecognized keyword argument "%s"', kw) if not len(val): raise KwargvError('empty value for keyword argument "%s"', kw) try: pval = ki.parser(val) except ParseError as e : raise KwargvError('cannot parse value "%s" for keyword ' 'argument "%s": %s', val, kw, e) except Exception as e: if ki.printexc: raise KwargvError('cannot parse value "%s" for keyword ' 'argument "%s": %s', val, kw, e) raise KwargvError('cannot parse value "%s" for keyword ' 'argument "%s"', val, kw) if ki.maxvals is not None and len(pval) > ki.maxvals: raise KwargvError('keyword argument "%s" may have at most %d' ' values, but got %s ("%s")', kw, ki.maxvals, len(pval), val) if ki.scale is not None: pval = pval * ki.scale if ki.fixupfunc is not None: pval = ki.fixupfunc(pval) if ki.repeatable: # We can't just unilaterally append to the preexisting # list, since if we did that starting with the default value # we'd mutate the default list. cur = self.get(ki._attrname) if not len(cur): pval = [pval] else: cur.append(pval) pval = cur seen.add(kw) self.set_one(ki._attrname, pval) for kw, ki in six.iteritems(self._kwinfos): if kw not in seen: if ki.required: raise KwargvError('required keyword argument "%s" was not provided', kw) # If there's a fixup, process it even if the keyword wasn't # provided. This lets code use "interesting" defaults with # types that you might prefer to use when launching a task # programmatically; e.g. a default output stream that is # `sys.stdout`, not "-". if ki.fixupfunc is not None: self.set_one(ki._attrname, ki.fixupfunc(None)) return self
[ "def", "parse", "(", "self", ",", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "import", "sys", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "seen", "=", "set", "(", ")", "for", "arg", "in", "args", ":", "t", "=", ...
Parse textual keywords as described by this class’s attributes, and update this instance’s attributes with the parsed values. *args* is a list of strings; if ``None``, it defaults to ``sys.argv[1:]``. Returns *self* for convenience. Raises :exc:`KwargvError` if invalid keywords are encountered. See also :meth:`ParseKeywords.parse_or_die`.
[ "Parse", "textual", "keywords", "as", "described", "by", "this", "class’s", "attributes", "and", "update", "this", "instance’s", "attributes", "with", "the", "parsed", "values", ".", "*", "args", "*", "is", "a", "list", "of", "strings", ";", "if", "None", ...
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/tools.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/tools.py#L178-L248
def on_button_release(self, event): """Write back changes If one or more items have been moved, the new position are stored in the corresponding meta data and a signal notifying the change is emitted. :param event: The button event """ affected_models = {} for inmotion in self._movable_items: inmotion.move((event.x, event.y)) rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item, inmotion.item.handles()[NW]) if isinstance(inmotion.item, StateView): state_v = inmotion.item state_m = state_v.model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()['rel_pos'] != rel_pos: state_m.set_meta_data_editor('rel_pos', rel_pos) affected_models[state_m] = ("position", True, state_v) elif isinstance(inmotion.item, NameView): state_v = inmotion.item state_m = self.view.canvas.get_parent(state_v).model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()['name']['rel_pos'] != rel_pos: state_m.set_meta_data_editor('name.rel_pos', rel_pos) affected_models[state_m] = ("name_position", False, state_v) elif isinstance(inmotion.item, TransitionView): transition_v = inmotion.item transition_m = transition_v.model self.view.canvas.request_update(transition_v) current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v) old_waypoints = transition_m.get_meta_data_editor()['waypoints'] if current_waypoints != old_waypoints: transition_m.set_meta_data_editor('waypoints', current_waypoints) affected_models[transition_m] = ("waypoints", False, transition_v) if len(affected_models) == 1: model = next(iter(affected_models)) change, affects_children, view = affected_models[model] self.view.graphical_editor.emit('meta_data_changed', model, change, affects_children) elif len(affected_models) > 1: # if more than one item has been moved, we need to call the meta_data_changed signal on a common parent common_parents = None for change, affects_children, view in affected_models.values(): parents_of_view = set(self.view.canvas.get_ancestors(view)) if common_parents is None: common_parents = parents_of_view else: common_parents = common_parents.intersection(parents_of_view) assert len(common_parents) > 0, "The selected elements do not have common parent element" for state_v in common_parents: # Find most nested state_v children_of_state_v = self.view.canvas.get_all_children(state_v) if any(common_parent in children_of_state_v for common_parent in common_parents): continue self.view.graphical_editor.emit('meta_data_changed', state_v.model, "positions", True) break if not affected_models and self._old_selection is not None: # The selection is handled differently depending on whether states were moved or not # If no move operation was performed, we reset the selection to that is was before the button-press event # and let the state machine selection handle the selection self.view.unselect_all() self.view.select_item(self._old_selection) self.view.handle_new_selection(self._item) self._move_name_v = False self._old_selection = None return super(MoveItemTool, self).on_button_release(event)
[ "def", "on_button_release", "(", "self", ",", "event", ")", ":", "affected_models", "=", "{", "}", "for", "inmotion", "in", "self", ".", "_movable_items", ":", "inmotion", ".", "move", "(", "(", "event", ".", "x", ",", "event", ".", "y", ")", ")", "r...
Write back changes If one or more items have been moved, the new position are stored in the corresponding meta data and a signal notifying the change is emitted. :param event: The button event
[ "Write", "back", "changes" ]
python
train
awslabs/serverless-application-model
samtranslator/plugins/globals/globals_plugin.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/globals/globals_plugin.py#L20-L40
def on_before_transform_template(self, template_dict): """ Hook method that runs before a template gets transformed. In this method, we parse and process Globals section from the template (if present). :param dict template_dict: SAM template as a dictionary """ try: global_section = Globals(template_dict) except InvalidGlobalsSectionException as ex: raise InvalidDocumentException([ex]) # For each resource in template, try and merge with Globals if necessary template = SamTemplate(template_dict) for logicalId, resource in template.iterate(): resource.properties = global_section.merge(resource.type, resource.properties) template.set(logicalId, resource) # Remove the Globals section from template if necessary Globals.del_section(template_dict)
[ "def", "on_before_transform_template", "(", "self", ",", "template_dict", ")", ":", "try", ":", "global_section", "=", "Globals", "(", "template_dict", ")", "except", "InvalidGlobalsSectionException", "as", "ex", ":", "raise", "InvalidDocumentException", "(", "[", "...
Hook method that runs before a template gets transformed. In this method, we parse and process Globals section from the template (if present). :param dict template_dict: SAM template as a dictionary
[ "Hook", "method", "that", "runs", "before", "a", "template", "gets", "transformed", ".", "In", "this", "method", "we", "parse", "and", "process", "Globals", "section", "from", "the", "template", "(", "if", "present", ")", "." ]
python
train
inasafe/inasafe
safe/gui/tools/minimum_needs/needs_manager_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/minimum_needs/needs_manager_dialog.py#L234-L240
def populate_resource_list(self): """Populate the list resource list. """ minimum_needs = self.minimum_needs.get_full_needs() for full_resource in minimum_needs["resources"]: self.add_resource(full_resource) self.provenance.setText(minimum_needs["provenance"])
[ "def", "populate_resource_list", "(", "self", ")", ":", "minimum_needs", "=", "self", ".", "minimum_needs", ".", "get_full_needs", "(", ")", "for", "full_resource", "in", "minimum_needs", "[", "\"resources\"", "]", ":", "self", ".", "add_resource", "(", "full_re...
Populate the list resource list.
[ "Populate", "the", "list", "resource", "list", "." ]
python
train
knipknap/exscript
Exscript/account.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/account.py#L123-L138
def release(self, signal=True): """ Unlocks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the released_event signal. """ if not self.needs_lock: return with self.synclock: self.lock.release() if signal: self.released_event(self) self.synclock.notify_all()
[ "def", "release", "(", "self", ",", "signal", "=", "True", ")", ":", "if", "not", "self", ".", "needs_lock", ":", "return", "with", "self", ".", "synclock", ":", "self", ".", "lock", ".", "release", "(", ")", "if", "signal", ":", "self", ".", "rele...
Unlocks the account. Method has no effect if the constructor argument `needs_lock` wsa set to False. :type signal: bool :param signal: Whether to emit the released_event signal.
[ "Unlocks", "the", "account", ".", "Method", "has", "no", "effect", "if", "the", "constructor", "argument", "needs_lock", "wsa", "set", "to", "False", "." ]
python
train
ludeeus/pytautulli
pytautulli/__init__.py
https://github.com/ludeeus/pytautulli/blob/0cf602f6720a105abb2311c8fbc8c6b2f9581276/pytautulli/__init__.py#L72-L87
async def get_session_data(self): """Get Tautulli sessions.""" cmd = 'get_activity' url = self.base_url + cmd try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger("Status from Tautulli: " + str(response.status)) self.tautulli_session_data = await response.json() logger(self.tautulli_session_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = "Can not load data from Tautulli: {} - {}".format(url, error) logger(msg, 40)
[ "async", "def", "get_session_data", "(", "self", ")", ":", "cmd", "=", "'get_activity'", "url", "=", "self", ".", "base_url", "+", "cmd", "try", ":", "async", "with", "async_timeout", ".", "timeout", "(", "8", ",", "loop", "=", "self", ".", "_loop", ")...
Get Tautulli sessions.
[ "Get", "Tautulli", "sessions", "." ]
python
train
wbond/oscrypto
oscrypto/_win/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/asymmetric.py#L2945-L2994
def _encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): """ Encrypts a value using an RSA public key :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext """ if not isinstance(certificate_or_public_key, (Certificate, PublicKey)): raise TypeError(pretty_message( ''' certificate_or_public_key must be an instance of the Certificate or PublicKey class, not %s ''', type_name(certificate_or_public_key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if not isinstance(rsa_oaep_padding, bool): raise TypeError(pretty_message( ''' rsa_oaep_padding must be a bool, not %s ''', type_name(rsa_oaep_padding) )) if _backend == 'winlegacy': return _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding) return _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding)
[ "def", "_encrypt", "(", "certificate_or_public_key", ",", "data", ",", "rsa_oaep_padding", "=", "False", ")", ":", "if", "not", "isinstance", "(", "certificate_or_public_key", ",", "(", "Certificate", ",", "PublicKey", ")", ")", ":", "raise", "TypeError", "(", ...
Encrypts a value using an RSA public key :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
[ "Encrypts", "a", "value", "using", "an", "RSA", "public", "key" ]
python
valid
google/grr
api_client/python/grr_api_client/client.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/client.py#L179-L203
def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None, keep_client_alive=False): """Create a new approval for the current user to access this client.""" if not reason: raise ValueError("reason can't be empty") if not notified_users: raise ValueError("notified_users list can't be empty.") approval = user_pb2.ApiClientApproval( reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or []) args = user_pb2.ApiCreateClientApprovalArgs( client_id=self.client_id, approval=approval, keep_client_alive=keep_client_alive) data = self._context.SendRequest("CreateClientApproval", args) return ClientApproval( data=data, username=self._context.username, context=self._context)
[ "def", "CreateApproval", "(", "self", ",", "reason", "=", "None", ",", "notified_users", "=", "None", ",", "email_cc_addresses", "=", "None", ",", "keep_client_alive", "=", "False", ")", ":", "if", "not", "reason", ":", "raise", "ValueError", "(", "\"reason ...
Create a new approval for the current user to access this client.
[ "Create", "a", "new", "approval", "for", "the", "current", "user", "to", "access", "this", "client", "." ]
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L64-L70
def is_valid_folder(parser, arg): """Check if arg is a valid file that already exists on the file system.""" arg = os.path.abspath(arg) if not os.path.isdir(arg): parser.error("The folder %s does not exist!" % arg) else: return arg
[ "def", "is_valid_folder", "(", "parser", ",", "arg", ")", ":", "arg", "=", "os", ".", "path", ".", "abspath", "(", "arg", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"The folder %s does not ...
Check if arg is a valid file that already exists on the file system.
[ "Check", "if", "arg", "is", "a", "valid", "file", "that", "already", "exists", "on", "the", "file", "system", "." ]
python
train
radjkarl/imgProcessor
imgProcessor/transformations.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/transformations.py#L138-L146
def rgChromaticity(img): ''' returns the normalized RGB space (RGB/intensity) see https://en.wikipedia.org/wiki/Rg_chromaticity ''' out = _calc(img) if img.dtype == np.uint8: out = (255 * out).astype(np.uint8) return out
[ "def", "rgChromaticity", "(", "img", ")", ":", "out", "=", "_calc", "(", "img", ")", "if", "img", ".", "dtype", "==", "np", ".", "uint8", ":", "out", "=", "(", "255", "*", "out", ")", ".", "astype", "(", "np", ".", "uint8", ")", "return", "out"...
returns the normalized RGB space (RGB/intensity) see https://en.wikipedia.org/wiki/Rg_chromaticity
[ "returns", "the", "normalized", "RGB", "space", "(", "RGB", "/", "intensity", ")", "see", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Rg_chromaticity" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Dir.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Dir.py#L39-L44
def DirEntryScanner(**kw): """Return a prototype Scanner instance for "scanning" directory Nodes for their in-memory entries""" kw['node_factory'] = SCons.Node.FS.Entry kw['recursive'] = None return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
[ "def", "DirEntryScanner", "(", "*", "*", "kw", ")", ":", "kw", "[", "'node_factory'", "]", "=", "SCons", ".", "Node", ".", "FS", ".", "Entry", "kw", "[", "'recursive'", "]", "=", "None", "return", "SCons", ".", "Scanner", ".", "Base", "(", "scan_in_m...
Return a prototype Scanner instance for "scanning" directory Nodes for their in-memory entries
[ "Return", "a", "prototype", "Scanner", "instance", "for", "scanning", "directory", "Nodes", "for", "their", "in", "-", "memory", "entries" ]
python
train
materialsproject/pymatgen
pymatgen/io/vasp/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1141-L1175
def automatic_linemode(divisions, ibz): """ Convenient static constructor for a KPOINTS in mode line_mode. gamma centered Monkhorst-Pack grids and the number of subdivisions along each reciprocal lattice vector determined by the scheme in the VASP manual. Args: divisions: Parameter determining the number of k-points along each hight symetry lines. ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure) Returns: Kpoints object """ kpoints = list() labels = list() for path in ibz.kpath["path"]: kpoints.append(ibz.kpath["kpoints"][path[0]]) labels.append(path[0]) for i in range(1, len(path) - 1): kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[-1]]) labels.append(path[-1]) return Kpoints("Line_mode KPOINTS file", style=Kpoints.supported_modes.Line_mode, coord_type="Reciprocal", kpts=kpoints, labels=labels, num_kpts=int(divisions))
[ "def", "automatic_linemode", "(", "divisions", ",", "ibz", ")", ":", "kpoints", "=", "list", "(", ")", "labels", "=", "list", "(", ")", "for", "path", "in", "ibz", ".", "kpath", "[", "\"path\"", "]", ":", "kpoints", ".", "append", "(", "ibz", ".", ...
Convenient static constructor for a KPOINTS in mode line_mode. gamma centered Monkhorst-Pack grids and the number of subdivisions along each reciprocal lattice vector determined by the scheme in the VASP manual. Args: divisions: Parameter determining the number of k-points along each hight symetry lines. ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure) Returns: Kpoints object
[ "Convenient", "static", "constructor", "for", "a", "KPOINTS", "in", "mode", "line_mode", ".", "gamma", "centered", "Monkhorst", "-", "Pack", "grids", "and", "the", "number", "of", "subdivisions", "along", "each", "reciprocal", "lattice", "vector", "determined", ...
python
train
Yelp/py_zipkin
py_zipkin/logging_helper.py
https://github.com/Yelp/py_zipkin/blob/0944d9a3fb1f1798dbb276694aeed99f2b4283ba/py_zipkin/logging_helper.py#L71-L96
def emit_spans(self): """Main function to log all the annotations stored during the entire request. This is done if the request is sampled and the response was a success. It also logs the service (`ss` and `sr`) or the client ('cs' and 'cr') annotations. """ # FIXME: Should have a single aggregate handler if self.firehose_handler: # FIXME: We need to allow different batching settings per handler self._emit_spans_with_span_sender( ZipkinBatchSender(self.firehose_handler, self.max_span_batch_size, self.encoder) ) if not self.zipkin_attrs.is_sampled: self._get_tracer().clear() return span_sender = ZipkinBatchSender(self.transport_handler, self.max_span_batch_size, self.encoder) self._emit_spans_with_span_sender(span_sender) self._get_tracer().clear()
[ "def", "emit_spans", "(", "self", ")", ":", "# FIXME: Should have a single aggregate handler", "if", "self", ".", "firehose_handler", ":", "# FIXME: We need to allow different batching settings per handler", "self", ".", "_emit_spans_with_span_sender", "(", "ZipkinBatchSender", "...
Main function to log all the annotations stored during the entire request. This is done if the request is sampled and the response was a success. It also logs the service (`ss` and `sr`) or the client ('cs' and 'cr') annotations.
[ "Main", "function", "to", "log", "all", "the", "annotations", "stored", "during", "the", "entire", "request", ".", "This", "is", "done", "if", "the", "request", "is", "sampled", "and", "the", "response", "was", "a", "success", ".", "It", "also", "logs", ...
python
test
klahnakoski/pyLibrary
jx_python/meta.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/meta.py#L537-L543
def get_schema_from_list(table_name, frum): """ SCAN THE LIST FOR COLUMN TYPES """ columns = UniqueIndex(keys=("name",)) _get_schema_from_list(frum, ".", parent=".", nested_path=ROOT_PATH, columns=columns) return Schema(table_name=table_name, columns=list(columns))
[ "def", "get_schema_from_list", "(", "table_name", ",", "frum", ")", ":", "columns", "=", "UniqueIndex", "(", "keys", "=", "(", "\"name\"", ",", ")", ")", "_get_schema_from_list", "(", "frum", ",", "\".\"", ",", "parent", "=", "\".\"", ",", "nested_path", "...
SCAN THE LIST FOR COLUMN TYPES
[ "SCAN", "THE", "LIST", "FOR", "COLUMN", "TYPES" ]
python
train
makinacorpus/landez
landez/tiles.py
https://github.com/makinacorpus/landez/blob/6e5c71ded6071158e7943df204cd7bd1ed623a30/landez/tiles.py#L188-L193
def grid(self, z_x_y): """ Return the UTFGrid content """ # sources.py -> MapnikRenderer -> grid (z, x, y) = z_x_y content = self.reader.grid(z, x, y, self.grid_fields, self.grid_layer) return content
[ "def", "grid", "(", "self", ",", "z_x_y", ")", ":", "# sources.py -> MapnikRenderer -> grid", "(", "z", ",", "x", ",", "y", ")", "=", "z_x_y", "content", "=", "self", ".", "reader", ".", "grid", "(", "z", ",", "x", ",", "y", ",", "self", ".", "grid...
Return the UTFGrid content
[ "Return", "the", "UTFGrid", "content" ]
python
train
pgjones/quart
quart/logging.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/logging.py#L15-L27
def create_logger(app: 'Quart') -> Logger: """Create a logger for the app based on the app settings. This creates a logger named quart.app that has a log level based on the app configuration. """ logger = getLogger('quart.app') if app.debug and logger.level == NOTSET: logger.setLevel(DEBUG) logger.addHandler(default_handler) return logger
[ "def", "create_logger", "(", "app", ":", "'Quart'", ")", "->", "Logger", ":", "logger", "=", "getLogger", "(", "'quart.app'", ")", "if", "app", ".", "debug", "and", "logger", ".", "level", "==", "NOTSET", ":", "logger", ".", "setLevel", "(", "DEBUG", "...
Create a logger for the app based on the app settings. This creates a logger named quart.app that has a log level based on the app configuration.
[ "Create", "a", "logger", "for", "the", "app", "based", "on", "the", "app", "settings", "." ]
python
train
HDI-Project/RDT
rdt/hyper_transformer.py
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/hyper_transformer.py#L306-L336
def reverse_transform_table(self, table, table_meta, missing=None): """Transform a `table` back to its original format. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Table in original format. """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn( DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning) result = pd.DataFrame(index=table.index) table_name = table_meta['name'] for field in table_meta['fields']: new_column = self._reverse_transform_column(table, field, table_name) if new_column is not None: result[field['name']] = new_column return result
[ "def", "reverse_transform_table", "(", "self", ",", "table", ",", "table_meta", ",", "missing", "=", "None", ")", ":", "if", "missing", "is", "None", ":", "missing", "=", "self", ".", "missing", "else", ":", "self", ".", "missing", "=", "missing", "warni...
Transform a `table` back to its original format. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Table in original format.
[ "Transform", "a", "table", "back", "to", "its", "original", "format", "." ]
python
train
materialsproject/pymatgen
pymatgen/alchemy/materials.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/alchemy/materials.py#L300-L318
def from_poscar_string(poscar_string, transformations=None): """ Generates TransformedStructure from a poscar string. Args: poscar_string (str): Input POSCAR string. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. """ p = Poscar.from_string(poscar_string) if not p.true_names: raise ValueError("Transformation can be craeted only from POSCAR " "strings with proper VASP5 element symbols.") raw_string = re.sub(r"'", "\"", poscar_string) s = p.structure source_info = {"source": "POSCAR", "datetime": str(datetime.datetime.now()), "original_file": raw_string} return TransformedStructure(s, transformations, history=[source_info])
[ "def", "from_poscar_string", "(", "poscar_string", ",", "transformations", "=", "None", ")", ":", "p", "=", "Poscar", ".", "from_string", "(", "poscar_string", ")", "if", "not", "p", ".", "true_names", ":", "raise", "ValueError", "(", "\"Transformation can be cr...
Generates TransformedStructure from a poscar string. Args: poscar_string (str): Input POSCAR string. transformations ([Transformations]): Sequence of transformations to be applied to the input structure.
[ "Generates", "TransformedStructure", "from", "a", "poscar", "string", "." ]
python
train
ralphje/imagemounter
imagemounter/parser.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L130-L139
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True): """Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator""" for disk in self.disks: logger.info("Mounting volumes in {0}".format(disk)) for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions): yield volume
[ "def", "init_volumes", "(", "self", ",", "single", "=", "None", ",", "only_mount", "=", "None", ",", "skip_mount", "=", "None", ",", "swallow_exceptions", "=", "True", ")", ":", "for", "disk", "in", "self", ".", "disks", ":", "logger", ".", "info", "("...
Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator
[ "Detects", "volumes", "(", "as", "volume", "system", "or", "as", "single", "volume", ")", "in", "all", "disks", "and", "yields", "the", "volumes", ".", "This", "calls", ":", "func", ":", "Disk", ".", "init_volumes", "on", "all", "disks", "and", "should",...
python
train
mozilla-releng/scriptworker
scriptworker/artifacts.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/artifacts.py#L80-L108
def compress_artifact_if_supported(artifact_path): """Compress artifacts with GZip if they're known to be supported. This replaces the artifact given by a gzip binary. Args: artifact_path (str): the path to compress Returns: content_type, content_encoding (tuple): Type and encoding of the file. Encoding equals 'gzip' if compressed. """ content_type, encoding = guess_content_type_and_encoding(artifact_path) log.debug('"{}" is encoded with "{}" and has mime/type "{}"'.format(artifact_path, encoding, content_type)) if encoding is None and content_type in _GZIP_SUPPORTED_CONTENT_TYPE: log.info('"{}" can be gzip\'d. Compressing...'.format(artifact_path)) with open(artifact_path, 'rb') as f_in: text_content = f_in.read() with gzip.open(artifact_path, 'wb') as f_out: f_out.write(text_content) encoding = 'gzip' log.info('"{}" compressed'.format(artifact_path)) else: log.debug('"{}" is not supported for compression.'.format(artifact_path)) return content_type, encoding
[ "def", "compress_artifact_if_supported", "(", "artifact_path", ")", ":", "content_type", ",", "encoding", "=", "guess_content_type_and_encoding", "(", "artifact_path", ")", "log", ".", "debug", "(", "'\"{}\" is encoded with \"{}\" and has mime/type \"{}\"'", ".", "format", ...
Compress artifacts with GZip if they're known to be supported. This replaces the artifact given by a gzip binary. Args: artifact_path (str): the path to compress Returns: content_type, content_encoding (tuple): Type and encoding of the file. Encoding equals 'gzip' if compressed.
[ "Compress", "artifacts", "with", "GZip", "if", "they", "re", "known", "to", "be", "supported", "." ]
python
train
haikuginger/beekeeper
beekeeper/exceptions.py
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/exceptions.py#L71-L93
def top_level(self): """ Print just the top level of an object, being sure to show where it goes deeper """ output = {} if isinstance(self.obj, dict): for name, item in self.obj.items(): if isinstance(item, dict): if item: output[name] = StrReprWrapper('{...}') else: output[name] = StrReprWrapper('{}') elif isinstance(item, list): if item: output[name] = StrReprWrapper('[...]') else: output[name] = StrReprWrapper('[]') else: output[name] = item return output else: return self.obj
[ "def", "top_level", "(", "self", ")", ":", "output", "=", "{", "}", "if", "isinstance", "(", "self", ".", "obj", ",", "dict", ")", ":", "for", "name", ",", "item", "in", "self", ".", "obj", ".", "items", "(", ")", ":", "if", "isinstance", "(", ...
Print just the top level of an object, being sure to show where it goes deeper
[ "Print", "just", "the", "top", "level", "of", "an", "object", "being", "sure", "to", "show", "where", "it", "goes", "deeper" ]
python
train
bitesofcode/projexui
projexui/widgets/xtablewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtablewidget.py#L188-L198
def mimeData( self, items ): """ Returns the mime data for dragging for this instance. :param items | [<QTableWidgetItem>, ..] """ func = self.dataCollector() if ( func ): return func(self, items) return super(XTableWidget, self).mimeData(items)
[ "def", "mimeData", "(", "self", ",", "items", ")", ":", "func", "=", "self", ".", "dataCollector", "(", ")", "if", "(", "func", ")", ":", "return", "func", "(", "self", ",", "items", ")", "return", "super", "(", "XTableWidget", ",", "self", ")", "....
Returns the mime data for dragging for this instance. :param items | [<QTableWidgetItem>, ..]
[ "Returns", "the", "mime", "data", "for", "dragging", "for", "this", "instance", ".", ":", "param", "items", "|", "[", "<QTableWidgetItem", ">", "..", "]" ]
python
train
google/grr
grr/server/grr_response_server/console_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/console_utils.py#L177-L194
def ApprovalGrant(token=None): """Iterate through requested access approving or not.""" user = getpass.getuser() notifications = GetNotifications(user=user, token=token) requests = [n for n in notifications if n.type == "GrantAccess"] for request in requests: _, client_id, user, reason = rdfvalue.RDFURN(request.subject).Split() reason = utils.DecodeReasonString(reason) print(request) print("Reason: %s" % reason) if input("Do you approve this request? [y/N] ").lower() == "y": security.ClientApprovalGrantor( subject_urn=client_id, reason=reason, delegate=user, token=token).Grant() # TODO(user): Remove the notification. else: print("skipping request") print("Approval sent")
[ "def", "ApprovalGrant", "(", "token", "=", "None", ")", ":", "user", "=", "getpass", ".", "getuser", "(", ")", "notifications", "=", "GetNotifications", "(", "user", "=", "user", ",", "token", "=", "token", ")", "requests", "=", "[", "n", "for", "n", ...
Iterate through requested access approving or not.
[ "Iterate", "through", "requested", "access", "approving", "or", "not", "." ]
python
train
robinandeer/puzzle
puzzle/cli/base.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/base.py#L25-L35
def base(ctx, verbose, config): """Puzzle: manage DNA variant resources.""" # configure root logger to print to STDERR loglevel = LEVELS.get(min(verbose, 3)) configure_stream(level=loglevel) ctx.obj = {} if config and os.path.exists(config): ctx.obj = yaml.load(open(config, 'r')) or {} ctx.obj['config_path'] = config # launch the command line interface logger.debug('Booting up command line interface')
[ "def", "base", "(", "ctx", ",", "verbose", ",", "config", ")", ":", "# configure root logger to print to STDERR", "loglevel", "=", "LEVELS", ".", "get", "(", "min", "(", "verbose", ",", "3", ")", ")", "configure_stream", "(", "level", "=", "loglevel", ")", ...
Puzzle: manage DNA variant resources.
[ "Puzzle", ":", "manage", "DNA", "variant", "resources", "." ]
python
train