repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L284-L295
def insert_tracking_record(self): """ SQL statement that inserts tracking records. """ if self._insert_tracking_record is None: self._insert_tracking_record = self._prepare_insert( tmpl=self._insert_values_tmpl, placeholder_for_id=True, record_class=self.tracking_record_class, field_names=self.tracking_record_field_names, ) return self._insert_tracking_record
[ "def", "insert_tracking_record", "(", "self", ")", ":", "if", "self", ".", "_insert_tracking_record", "is", "None", ":", "self", ".", "_insert_tracking_record", "=", "self", ".", "_prepare_insert", "(", "tmpl", "=", "self", ".", "_insert_values_tmpl", ",", "plac...
SQL statement that inserts tracking records.
[ "SQL", "statement", "that", "inserts", "tracking", "records", "." ]
python
train
pyviz/holoviews
holoviews/plotting/plotly/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plotly/util.py#L487-L508
def merge_figure(fig, subfig): """ Merge a sub-figure into a parent figure Note: This function mutates the input fig dict, but it does not mutate the subfig dict Parameters ---------- fig: dict The plotly figure dict into which the sub figure will be merged subfig: dict The plotly figure dict that will be copied and then merged into `fig` """ # traces data = fig.setdefault('data', []) data.extend(copy.deepcopy(subfig.get('data', []))) # layout layout = fig.setdefault('layout', {}) _merge_layout_objs(layout, subfig.get('layout', {}))
[ "def", "merge_figure", "(", "fig", ",", "subfig", ")", ":", "# traces", "data", "=", "fig", ".", "setdefault", "(", "'data'", ",", "[", "]", ")", "data", ".", "extend", "(", "copy", ".", "deepcopy", "(", "subfig", ".", "get", "(", "'data'", ",", "[...
Merge a sub-figure into a parent figure Note: This function mutates the input fig dict, but it does not mutate the subfig dict Parameters ---------- fig: dict The plotly figure dict into which the sub figure will be merged subfig: dict The plotly figure dict that will be copied and then merged into `fig`
[ "Merge", "a", "sub", "-", "figure", "into", "a", "parent", "figure" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L667-L682
def metric(self, name, description, data_type, interval, keyed=False): """Get instance of the Metrics module. Args: name (string): The name for the metric. description (string): The description of the metric. data_type (string): The type of metric: Sum, Count, Min, Max, First, Last, and Average. interval (string): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly. keyed (boolean): Indicates whether the data will have a keyed value. Returns: (object): An instance of the Metrics Class. """ from .tcex_metrics_v2 import TcExMetricsV2 return TcExMetricsV2(self, name, description, data_type, interval, keyed)
[ "def", "metric", "(", "self", ",", "name", ",", "description", ",", "data_type", ",", "interval", ",", "keyed", "=", "False", ")", ":", "from", ".", "tcex_metrics_v2", "import", "TcExMetricsV2", "return", "TcExMetricsV2", "(", "self", ",", "name", ",", "de...
Get instance of the Metrics module. Args: name (string): The name for the metric. description (string): The description of the metric. data_type (string): The type of metric: Sum, Count, Min, Max, First, Last, and Average. interval (string): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly. keyed (boolean): Indicates whether the data will have a keyed value. Returns: (object): An instance of the Metrics Class.
[ "Get", "instance", "of", "the", "Metrics", "module", "." ]
python
train
aio-libs/yarl
yarl/__init__.py
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L347-L361
def origin(self): """Return an URL with scheme, host and port parts only. user, password, path, query and fragment are removed. """ # TODO: add a keyword-only option for keeping user/pass maybe? if not self.is_absolute(): raise ValueError("URL should be absolute") if not self._val.scheme: raise ValueError("URL should have scheme") v = self._val netloc = self._make_netloc(None, None, v.hostname, v.port, encode=False) val = v._replace(netloc=netloc, path="", query="", fragment="") return URL(val, encoded=True)
[ "def", "origin", "(", "self", ")", ":", "# TODO: add a keyword-only option for keeping user/pass maybe?", "if", "not", "self", ".", "is_absolute", "(", ")", ":", "raise", "ValueError", "(", "\"URL should be absolute\"", ")", "if", "not", "self", ".", "_val", ".", ...
Return an URL with scheme, host and port parts only. user, password, path, query and fragment are removed.
[ "Return", "an", "URL", "with", "scheme", "host", "and", "port", "parts", "only", "." ]
python
train
O365/python-o365
O365/message.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/message.py#L575-L611
def send(self, save_to_sent_folder=True): """ Sends this message :param bool save_to_sent_folder: whether or not to save it to sent folder :return: Success / Failure :rtype: bool """ if self.object_id and not self.__is_draft: return RuntimeError('Not possible to send a message that is not ' 'new or a draft. Use Reply or Forward instead.') if self.__is_draft and self.object_id: url = self.build_url( self._endpoints.get('send_draft').format(id=self.object_id)) if self._track_changes: # there are pending changes to be committed self.save_draft() data = None else: url = self.build_url(self._endpoints.get('send_mail')) data = {self._cc('message'): self.to_api_data()} if save_to_sent_folder is False: data[self._cc('saveToSentItems')] = False response = self.con.post(url, data=data) # response evaluates to false if 4XX or 5XX status codes are returned if not response: return False self.object_id = 'sent_message' if not self.object_id \ else self.object_id self.__is_draft = False return True
[ "def", "send", "(", "self", ",", "save_to_sent_folder", "=", "True", ")", ":", "if", "self", ".", "object_id", "and", "not", "self", ".", "__is_draft", ":", "return", "RuntimeError", "(", "'Not possible to send a message that is not '", "'new or a draft. Use Reply or ...
Sends this message :param bool save_to_sent_folder: whether or not to save it to sent folder :return: Success / Failure :rtype: bool
[ "Sends", "this", "message" ]
python
train
CalebBell/ht
ht/hx.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L6001-L6064
def Ntubes_VDI(DBundle=None, Ntp=None, Do=None, pitch=None, angle=30.): r'''A rough equation presented in the VDI Heat Atlas for estimating the number of tubes in a tube bundle of differing geometries and tube sizes. No accuracy estimation given. Parameters ---------- DBundle : float Outer diameter of tube bundle, [m] Ntp : float Number of tube passes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- N : float Number of tubes, [-] Notes ----- No coefficients for this method with Ntp=6 are available in [1]_. For consistency, estimated values were added to support 6 tube passes, f2 = 90.. This equation is a rearranged form of that presented in [1]_. The calculated tube count is rounded down to an integer. Examples -------- >>> Ntubes_VDI(DBundle=1.184, Ntp=2, Do=.028, pitch=.036, angle=30) 966 References ---------- .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' if Ntp == 1: f2 = 0. elif Ntp == 2: f2 = 22. elif Ntp == 4: f2 = 70. elif Ntp == 8: f2 = 105. elif Ntp == 6: f2 = 90. # Estimated! else: raise Exception('Only 1, 2, 4 and 8 passes are supported') if angle == 30 or angle == 60: f1 = 1.1 elif angle == 45 or angle == 90: f1 = 1.3 else: raise Exception('Only 30, 60, 45 and 90 degree layouts are supported') DBundle, Do, pitch = DBundle*1000, Do*1000, pitch*1000 # convert to mm, equation is dimensional. t = pitch Ntubes = (-(-4*f1*t**4*f2**2*Do + 4*f1*t**4*f2**2*DBundle**2 + t**4*f2**4)**0.5 - 2*f1*t**2*Do + 2*f1*t**2*DBundle**2 + t**2*f2**2) / (2*f1**2*t**4) return int(Ntubes)
[ "def", "Ntubes_VDI", "(", "DBundle", "=", "None", ",", "Ntp", "=", "None", ",", "Do", "=", "None", ",", "pitch", "=", "None", ",", "angle", "=", "30.", ")", ":", "if", "Ntp", "==", "1", ":", "f2", "=", "0.", "elif", "Ntp", "==", "2", ":", "f2...
r'''A rough equation presented in the VDI Heat Atlas for estimating the number of tubes in a tube bundle of differing geometries and tube sizes. No accuracy estimation given. Parameters ---------- DBundle : float Outer diameter of tube bundle, [m] Ntp : float Number of tube passes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- N : float Number of tubes, [-] Notes ----- No coefficients for this method with Ntp=6 are available in [1]_. For consistency, estimated values were added to support 6 tube passes, f2 = 90.. This equation is a rearranged form of that presented in [1]_. The calculated tube count is rounded down to an integer. Examples -------- >>> Ntubes_VDI(DBundle=1.184, Ntp=2, Do=.028, pitch=.036, angle=30) 966 References ---------- .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010.
[ "r", "A", "rough", "equation", "presented", "in", "the", "VDI", "Heat", "Atlas", "for", "estimating", "the", "number", "of", "tubes", "in", "a", "tube", "bundle", "of", "differing", "geometries", "and", "tube", "sizes", ".", "No", "accuracy", "estimation", ...
python
train
PyGithub/PyGithub
github/MainClass.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/MainClass.py#L317-L328
def get_project(self, id): """ :calls: `GET /projects/:project_id <https://developer.github.com/v3/projects/#get-a-project>`_ :rtype: :class:`github.Project.Project` :param id: integer """ headers, data = self.__requester.requestJsonAndCheck( "GET", "/projects/%d" % (id), headers={"Accept": Consts.mediaTypeProjectsPreview} ) return github.Project.Project(self.__requester, headers, data, completed=True)
[ "def", "get_project", "(", "self", ",", "id", ")", ":", "headers", ",", "data", "=", "self", ".", "__requester", ".", "requestJsonAndCheck", "(", "\"GET\"", ",", "\"/projects/%d\"", "%", "(", "id", ")", ",", "headers", "=", "{", "\"Accept\"", ":", "Const...
:calls: `GET /projects/:project_id <https://developer.github.com/v3/projects/#get-a-project>`_ :rtype: :class:`github.Project.Project` :param id: integer
[ ":", "calls", ":", "GET", "/", "projects", "/", ":", "project_id", "<https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "projects", "/", "#get", "-", "a", "-", "project", ">", "_", ":", "rtype", ":", ":", "class", ":", "gi...
python
train
saltstack/salt
salt/utils/dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L378-L401
def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False
[ "def", "_lookup_gai", "(", "name", ",", "rdtype", ",", "timeout", "=", "None", ")", ":", "try", ":", "sock_t", "=", "{", "'A'", ":", "socket", ".", "AF_INET", ",", "'AAAA'", ":", "socket", ".", "AF_INET6", "}", "[", "rdtype", "]", "except", "KeyError...
Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error
[ "Use", "Python", "s", "socket", "interface", "to", "lookup", "addresses", ":", "param", "name", ":", "Name", "of", "record", "to", "search", ":", "param", "rdtype", ":", "A", "or", "AAAA", ":", "param", "timeout", ":", "ignored", ":", "return", ":", "[...
python
train
QunarOPS/qg.core
qg/core/timeutils.py
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L135-L142
def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(utcnow.override_time is not None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta
[ "def", "advance_time_delta", "(", "timedelta", ")", ":", "assert", "(", "utcnow", ".", "override_time", "is", "not", "None", ")", "try", ":", "for", "dt", "in", "utcnow", ".", "override_time", ":", "dt", "+=", "timedelta", "except", "TypeError", ":", "utcn...
Advance overridden time using a datetime.timedelta.
[ "Advance", "overridden", "time", "using", "a", "datetime", ".", "timedelta", "." ]
python
train
wndhydrnt/python-oauth2
oauth2/grant.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/grant.py#L262-L279
def read_validate_params(self, request): """ Reads and validates data in an incoming request as required by the Authorization Request of the Authorization Code Grant and the Implicit Grant. """ self.client = self.client_authenticator.by_identifier(request) response_type = request.get_param("response_type") if self.client.response_type_supported(response_type) is False: raise OAuthInvalidError(error="unauthorized_client") self.state = request.get_param("state") self.scope_handler.parse(request, "query") return True
[ "def", "read_validate_params", "(", "self", ",", "request", ")", ":", "self", ".", "client", "=", "self", ".", "client_authenticator", ".", "by_identifier", "(", "request", ")", "response_type", "=", "request", ".", "get_param", "(", "\"response_type\"", ")", ...
Reads and validates data in an incoming request as required by the Authorization Request of the Authorization Code Grant and the Implicit Grant.
[ "Reads", "and", "validates", "data", "in", "an", "incoming", "request", "as", "required", "by", "the", "Authorization", "Request", "of", "the", "Authorization", "Code", "Grant", "and", "the", "Implicit", "Grant", "." ]
python
train
ArchiveTeam/wpull
wpull/path.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/path.py#L221-L281
def safe_filename(filename, os_type='unix', no_control=True, ascii_only=True, case=None, encoding='utf8', max_length=None): '''Return a safe filename or path part. Args: filename (str): The filename or path component. os_type (str): If ``unix``, escape the slash. If ``windows``, escape extra Windows characters. no_control (bool): If True, escape control characters. ascii_only (bool): If True, escape non-ASCII characters. case (str): If ``lower``, lowercase the string. If ``upper``, uppercase the string. encoding (str): The character encoding. max_length (int): The maximum length of the filename. This function assumes that `filename` has not already been percent-encoded. Returns: str ''' assert isinstance(filename, str), \ 'Expect str. Got {}.'.format(type(filename)) if filename in ('.', os.curdir): new_filename = '%2E' elif filename in ('.', os.pardir): new_filename = '%2E%2E' else: unix = os_type == 'unix' windows = os_type == 'windows' encoder_args = (unix, no_control, windows, ascii_only) if encoder_args not in _encoder_cache: _encoder_cache[encoder_args] = PercentEncoder( unix=unix, control=no_control, windows=windows, ascii_=ascii_only ) encoder = _encoder_cache[encoder_args] encoded_filename = filename.encode(encoding) new_filename = encoder.quote(encoded_filename).decode(encoding) if os_type == 'windows': if new_filename[-1] in ' .': new_filename = '{0}{1:02X}'.format( new_filename[:-1], new_filename[-1] ) if max_length and len(new_filename) > max_length: hash_obj = hashlib.sha1(new_filename.encode(encoding)) new_length = max(0, max_length - 8) new_filename = '{0}{1}'.format( new_filename[:new_length], hash_obj.hexdigest()[:8] ) if case == 'lower': new_filename = new_filename.lower() elif case == 'upper': new_filename = new_filename.upper() return new_filename
[ "def", "safe_filename", "(", "filename", ",", "os_type", "=", "'unix'", ",", "no_control", "=", "True", ",", "ascii_only", "=", "True", ",", "case", "=", "None", ",", "encoding", "=", "'utf8'", ",", "max_length", "=", "None", ")", ":", "assert", "isinsta...
Return a safe filename or path part. Args: filename (str): The filename or path component. os_type (str): If ``unix``, escape the slash. If ``windows``, escape extra Windows characters. no_control (bool): If True, escape control characters. ascii_only (bool): If True, escape non-ASCII characters. case (str): If ``lower``, lowercase the string. If ``upper``, uppercase the string. encoding (str): The character encoding. max_length (int): The maximum length of the filename. This function assumes that `filename` has not already been percent-encoded. Returns: str
[ "Return", "a", "safe", "filename", "or", "path", "part", "." ]
python
train
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L172-L186
def write_screen(self, font, color, screen_pos, text, align="left", valign="top"): """Write to the screen in font.size relative coordinates.""" pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize() text_surf = font.render(str(text), True, color) rect = text_surf.get_rect() if pos.x >= 0: setattr(rect, align, pos.x) else: setattr(rect, align, self.surf.get_width() + pos.x) if pos.y >= 0: setattr(rect, valign, pos.y) else: setattr(rect, valign, self.surf.get_height() + pos.y) self.surf.blit(text_surf, rect)
[ "def", "write_screen", "(", "self", ",", "font", ",", "color", ",", "screen_pos", ",", "text", ",", "align", "=", "\"left\"", ",", "valign", "=", "\"top\"", ")", ":", "pos", "=", "point", ".", "Point", "(", "*", "screen_pos", ")", "*", "point", ".", ...
Write to the screen in font.size relative coordinates.
[ "Write", "to", "the", "screen", "in", "font", ".", "size", "relative", "coordinates", "." ]
python
train
O365/python-o365
O365/calendar.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/calendar.py#L1752-L1789
def list_calendars(self, limit=None, *, query=None, order_by=None): """ Gets a list of calendars To use query an order_by check the OData specification here: http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/ part2-url-conventions/odata-v4.0-errata03-os-part2-url-conventions -complete.html :param int limit: max no. of calendars to get. Over 999 uses batch. :param query: applies a OData filter to the request :type query: Query or str :param order_by: orders the result set based on this condition :type order_by: Query or str :return: list of calendars :rtype: list[Calendar] """ url = self.build_url(self._endpoints.get('root_calendars')) params = {} if limit: params['$top'] = limit if query: params['$filter'] = str(query) if order_by: params['$orderby'] = order_by response = self.con.get(url, params=params or None) if not response: return [] data = response.json() # Everything received from cloud must be passed as self._cloud_data_key contacts = [self.calendar_constructor(parent=self, **{ self._cloud_data_key: x}) for x in data.get('value', [])] return contacts
[ "def", "list_calendars", "(", "self", ",", "limit", "=", "None", ",", "*", ",", "query", "=", "None", ",", "order_by", "=", "None", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'root_calendars'", "...
Gets a list of calendars To use query an order_by check the OData specification here: http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/ part2-url-conventions/odata-v4.0-errata03-os-part2-url-conventions -complete.html :param int limit: max no. of calendars to get. Over 999 uses batch. :param query: applies a OData filter to the request :type query: Query or str :param order_by: orders the result set based on this condition :type order_by: Query or str :return: list of calendars :rtype: list[Calendar]
[ "Gets", "a", "list", "of", "calendars" ]
python
train
peterbrittain/asciimatics
asciimatics/widgets.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L1292-L1306
def find_widget(self, name): """ Look for a widget with a specified name. :param name: The name to search for. :returns: The widget that matches or None if one couldn't be found. """ result = None for column in self._columns: for widget in column: if widget.name is not None and name == widget.name: result = widget break return result
[ "def", "find_widget", "(", "self", ",", "name", ")", ":", "result", "=", "None", "for", "column", "in", "self", ".", "_columns", ":", "for", "widget", "in", "column", ":", "if", "widget", ".", "name", "is", "not", "None", "and", "name", "==", "widget...
Look for a widget with a specified name. :param name: The name to search for. :returns: The widget that matches or None if one couldn't be found.
[ "Look", "for", "a", "widget", "with", "a", "specified", "name", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/reflect/PropertyReflector.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/PropertyReflector.py#L73-L102
def get_property(obj, name): """ Gets value of object property specified by its name. :param obj: an object to read property from. :param name: a name of the property to get. :return: the property value or null if property doesn't exist or introspection failed. """ if obj == None: raise Exception("Object cannot be null") if name == None: raise Exception("Property name cannot be null") name = name.lower() try: for property_name in dir(obj): if property_name.lower() != name: continue property = getattr(obj, property_name) if PropertyReflector._is_property(property, property_name): return property except: pass return None
[ "def", "get_property", "(", "obj", ",", "name", ")", ":", "if", "obj", "==", "None", ":", "raise", "Exception", "(", "\"Object cannot be null\"", ")", "if", "name", "==", "None", ":", "raise", "Exception", "(", "\"Property name cannot be null\"", ")", "name", ...
Gets value of object property specified by its name. :param obj: an object to read property from. :param name: a name of the property to get. :return: the property value or null if property doesn't exist or introspection failed.
[ "Gets", "value", "of", "object", "property", "specified", "by", "its", "name", "." ]
python
train
20c/vaping
vaping/plugins/__init__.py
https://github.com/20c/vaping/blob/c51f00586c99edb3d51e4abdbdfe3174755533ee/vaping/plugins/__init__.py#L413-L421
def format_filename(self, data, row): """ Returns a formatted filename using the template stored in self.filename - `data`: vaping message - `row`: vaping message data row """ return self.filename.format(**self.filename_formatters(data, row))
[ "def", "format_filename", "(", "self", ",", "data", ",", "row", ")", ":", "return", "self", ".", "filename", ".", "format", "(", "*", "*", "self", ".", "filename_formatters", "(", "data", ",", "row", ")", ")" ]
Returns a formatted filename using the template stored in self.filename - `data`: vaping message - `row`: vaping message data row
[ "Returns", "a", "formatted", "filename", "using", "the", "template", "stored", "in", "self", ".", "filename" ]
python
train
GNS3/gns3-server
gns3server/compute/port_manager.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/port_manager.py#L209-L253
def reserve_tcp_port(self, port, project, port_range_start=None, port_range_end=None): """ Reserve a specific TCP port number. If not available replace it by another. :param port: TCP port number :param project: Project instance :param port_range_start: Port range to use :param port_range_end: Port range to use :returns: The TCP port """ # use the default range is not specific one is given if port_range_start is None and port_range_end is None: port_range_start = self._console_port_range[0] port_range_end = self._console_port_range[1] if port in self._used_tcp_ports: old_port = port port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port) log.debug(msg) #project.emit("log.warning", {"message": msg}) return port if port < port_range_start or port > port_range_end: old_port = port port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port) log.debug(msg) #project.emit("log.warning", {"message": msg}) return port try: PortManager._check_port(self._console_host, port, "TCP") except OSError: old_port = port port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port) log.debug(msg) #project.emit("log.warning", {"message": msg}) return port self._used_tcp_ports.add(port) project.record_tcp_port(port) log.debug("TCP port {} has been reserved".format(port)) return port
[ "def", "reserve_tcp_port", "(", "self", ",", "port", ",", "project", ",", "port_range_start", "=", "None", ",", "port_range_end", "=", "None", ")", ":", "# use the default range is not specific one is given", "if", "port_range_start", "is", "None", "and", "port_range_...
Reserve a specific TCP port number. If not available replace it by another. :param port: TCP port number :param project: Project instance :param port_range_start: Port range to use :param port_range_end: Port range to use :returns: The TCP port
[ "Reserve", "a", "specific", "TCP", "port", "number", ".", "If", "not", "available", "replace", "it", "by", "another", "." ]
python
train
emirozer/fake2db
fake2db/redis_handler.py
https://github.com/emirozer/fake2db/blob/568cf42afb3ac10fc15c4faaa1cdb84fc1f4946c/fake2db/redis_handler.py#L143-L166
def data_filler_customer(self, number_of_rows, pipe): '''creates keys with customer data ''' try: for i in range(number_of_rows): pipe.hmset('customer:%s' % i, { 'id': rnd_id_generator(self), 'name': self.faker.first_name(), 'lastname': self.faker.last_name(), 'address': self.faker.address(), 'country': self.faker.country(), 'city': self.faker.city(), 'registry_date': self.faker.date(pattern="%d-%m-%Y"), 'birthdate': self.faker.date(pattern="%d-%m-%Y"), 'email': self.faker.safe_email(), 'phone_number': self.faker.phone_number(), 'locale': self.faker.locale() }) pipe.execute() logger.warning('customer Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
[ "def", "data_filler_customer", "(", "self", ",", "number_of_rows", ",", "pipe", ")", ":", "try", ":", "for", "i", "in", "range", "(", "number_of_rows", ")", ":", "pipe", ".", "hmset", "(", "'customer:%s'", "%", "i", ",", "{", "'id'", ":", "rnd_id_generat...
creates keys with customer data
[ "creates", "keys", "with", "customer", "data" ]
python
train
saltstack/salt
salt/modules/boto_s3_bucket.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3_bucket.py#L562-L597
def put_logging(Bucket, TargetBucket=None, TargetPrefix=None, TargetGrants=None, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, update the logging parameters for a bucket. Returns {updated: true} if parameters were updated and returns {updated: False} if parameters were not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_logging my_bucket log_bucket '[{...}]' prefix ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) logstate = {} targets = {'TargetBucket': TargetBucket, 'TargetGrants': TargetGrants, 'TargetPrefix': TargetPrefix} for key, val in six.iteritems(targets): if val is not None: logstate[key] = val if logstate: logstatus = {'LoggingEnabled': logstate} else: logstatus = {} if TargetGrants is not None and isinstance(TargetGrants, six.string_types): TargetGrants = salt.utils.json.loads(TargetGrants) conn.put_bucket_logging(Bucket=Bucket, BucketLoggingStatus=logstatus) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "put_logging", "(", "Bucket", ",", "TargetBucket", "=", "None", ",", "TargetPrefix", "=", "None", ",", "TargetGrants", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")",...
Given a valid config, update the logging parameters for a bucket. Returns {updated: true} if parameters were updated and returns {updated: False} if parameters were not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_logging my_bucket log_bucket '[{...}]' prefix
[ "Given", "a", "valid", "config", "update", "the", "logging", "parameters", "for", "a", "bucket", "." ]
python
train
AguaClara/aguaclara
aguaclara/design/sed_tank.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/sed_tank.py#L527-L547
def L_channel(Q_plant, sed_inputs=sed_dict): """Return the length of the inlet and exit channels for the sedimentation tank. Parameters ---------- Q_plant : float Total plant flow rate sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Length of the inlet and exit channels for the sedimentation tank. Examples -------- >>> from aide_design.play import* >>> """ n_tanks = n_tanks(Q_plant, sed_inputs) return ((n_tanks * sed_inputs['tank']['W']) + sed_inputs['thickness_wall'] + ((n_tanks-1) * sed_inputs['thickness_wall']))
[ "def", "L_channel", "(", "Q_plant", ",", "sed_inputs", "=", "sed_dict", ")", ":", "n_tanks", "=", "n_tanks", "(", "Q_plant", ",", "sed_inputs", ")", "return", "(", "(", "n_tanks", "*", "sed_inputs", "[", "'tank'", "]", "[", "'W'", "]", ")", "+", "sed_i...
Return the length of the inlet and exit channels for the sedimentation tank. Parameters ---------- Q_plant : float Total plant flow rate sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Length of the inlet and exit channels for the sedimentation tank. Examples -------- >>> from aide_design.play import* >>>
[ "Return", "the", "length", "of", "the", "inlet", "and", "exit", "channels", "for", "the", "sedimentation", "tank", ".", "Parameters", "----------", "Q_plant", ":", "float", "Total", "plant", "flow", "rate", "sed_inputs", ":", "dict", "A", "dictionary", "of", ...
python
train
martinpitt/python-dbusmock
dbusmock/templates/ofono.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/ofono.py#L169-L190
def add_voice_call_api(mock): '''Add org.ofono.VoiceCallManager API to a mock''' # also add an emergency number which is not a real one, in case one runs a # test case against a production ofono :-) mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373']) mock.calls = [] # object paths mock.AddMethods('org.ofono.VoiceCallManager', [ ('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'), ('Transfer', '', '', ''), ('SwapCalls', '', '', ''), ('ReleaseAndAnswer', '', '', ''), ('ReleaseAndSwap', '', '', ''), ('HoldAndAnswer', '', '', ''), ('SendTones', 's', '', ''), ('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED), ('CreateMultiparty', '', 'o', NOT_IMPLEMENTED), ('HangupMultiparty', '', '', NOT_IMPLEMENTED), ('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]') ])
[ "def", "add_voice_call_api", "(", "mock", ")", ":", "# also add an emergency number which is not a real one, in case one runs a", "# test case against a production ofono :-)", "mock", ".", "AddProperty", "(", "'org.ofono.VoiceCallManager'", ",", "'EmergencyNumbers'", ",", "[", "'91...
Add org.ofono.VoiceCallManager API to a mock
[ "Add", "org", ".", "ofono", ".", "VoiceCallManager", "API", "to", "a", "mock" ]
python
train
devries/bottle-session
bottle_session.py
https://github.com/devries/bottle-session/blob/aaa33eecbf977d6b2ad7d3835d2176f40b3231e5/bottle_session.py#L293-L300
def keys(self): """Return a list of all keys in the dictionary. Returns: list of str: [key1,key2,...,keyN] """ all_keys = [k.decode('utf-8') for k,v in self.rdb.hgetall(self.session_hash).items()] return all_keys
[ "def", "keys", "(", "self", ")", ":", "all_keys", "=", "[", "k", ".", "decode", "(", "'utf-8'", ")", "for", "k", ",", "v", "in", "self", ".", "rdb", ".", "hgetall", "(", "self", ".", "session_hash", ")", ".", "items", "(", ")", "]", "return", "...
Return a list of all keys in the dictionary. Returns: list of str: [key1,key2,...,keyN]
[ "Return", "a", "list", "of", "all", "keys", "in", "the", "dictionary", "." ]
python
train
thiagopbueno/rddl2tf
rddl2tf/fluent.py
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L601-L619
def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]: '''Maps the `vars_list` into a list of axis indices corresponding to the `fluent` scope. Args: x: The fluent. vars_list: The list of variables to be aggregated over. Returns: List[int]: a list of axis. ''' axis = [] for var in vars_list: if var in fluent.scope.as_list(): ax = fluent.scope.index(var) if fluent.batch: ax += 1 axis.append(ax) return axis
[ "def", "_varslist2axis", "(", "cls", ",", "fluent", ":", "'TensorFluent'", ",", "vars_list", ":", "List", "[", "str", "]", ")", "->", "List", "[", "int", "]", ":", "axis", "=", "[", "]", "for", "var", "in", "vars_list", ":", "if", "var", "in", "flu...
Maps the `vars_list` into a list of axis indices corresponding to the `fluent` scope. Args: x: The fluent. vars_list: The list of variables to be aggregated over. Returns: List[int]: a list of axis.
[ "Maps", "the", "vars_list", "into", "a", "list", "of", "axis", "indices", "corresponding", "to", "the", "fluent", "scope", "." ]
python
train
kapot65/python-df-parser
dfparser/rsh_parser.py
https://github.com/kapot65/python-df-parser/blob/bb3eec0fb7ca85d72cb1d9ed7415efe074594f26/dfparser/rsh_parser.py#L351-L379
def get_event(self, num): """Extract event from dataset.""" if num < 0 or num >= self.params["events_num"]: raise IndexError("Index out of range [0:%s]" % (self.params["events_num"])) ch_num = self.params['channel_number'] ev_size = self.params['b_size'] event = {} self.file.seek(7168 + num * (96 + 2 * ch_num * ev_size)) event["text_hdr"] = self.file.read(64) event["ev_num"] = struct.unpack('I', self.file.read(4))[0] self.file.read(4) start_time = struct.unpack('Q', self.file.read(8))[0] event["start_time"] = datetime.fromtimestamp(start_time) ns_since_epoch = struct.unpack('Q', self.file.read(8))[0] if ns_since_epoch: event['ns_since_epoch'] = ns_since_epoch self.file.read(8) event_data = self.file.read(2 * ev_size * ch_num) event["data"] = np.fromstring(event_data, np.short) return event
[ "def", "get_event", "(", "self", ",", "num", ")", ":", "if", "num", "<", "0", "or", "num", ">=", "self", ".", "params", "[", "\"events_num\"", "]", ":", "raise", "IndexError", "(", "\"Index out of range [0:%s]\"", "%", "(", "self", ".", "params", "[", ...
Extract event from dataset.
[ "Extract", "event", "from", "dataset", "." ]
python
train
CEA-COSMIC/ModOpt
modopt/opt/proximity.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/proximity.py#L295-L314
def _op_method(self, data, extra_factor=1.0): r"""Operator method This method returns the scaled version of the proximity operator as given by Lemma 2.8 of [CW2005]. Parameters ---------- data : np.ndarray Input data array extra_factor : float Additional multiplication factor Returns ------- np.ndarray result of the scaled proximity operator """ return self.linear_op.adj_op( self.prox_op.op(self.linear_op.op(data), extra_factor=extra_factor) )
[ "def", "_op_method", "(", "self", ",", "data", ",", "extra_factor", "=", "1.0", ")", ":", "return", "self", ".", "linear_op", ".", "adj_op", "(", "self", ".", "prox_op", ".", "op", "(", "self", ".", "linear_op", ".", "op", "(", "data", ")", ",", "e...
r"""Operator method This method returns the scaled version of the proximity operator as given by Lemma 2.8 of [CW2005]. Parameters ---------- data : np.ndarray Input data array extra_factor : float Additional multiplication factor Returns ------- np.ndarray result of the scaled proximity operator
[ "r", "Operator", "method" ]
python
train
materialsproject/pymatgen
pymatgen/io/vasp/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L1916-L1933
def read_cs_core_contribution(self): """ Parse the core contribution of NMR chemical shielding. Returns: G0 contribution matrix as list of list. """ header_pattern = r'^\s+Core NMR properties\s*$\n' \ r'\n' \ r'^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n' \ r'^\s+-{20,}$\n' row_pattern = r'\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)' footer_pattern = r'\s+-{20,}\s*$' self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=str, last_one_only=True, attribute_name="cs_core_contribution") core_contrib = {d['element']: float(d['shift']) for d in self.data["cs_core_contribution"]} self.data["cs_core_contribution"] = core_contrib
[ "def", "read_cs_core_contribution", "(", "self", ")", ":", "header_pattern", "=", "r'^\\s+Core NMR properties\\s*$\\n'", "r'\\n'", "r'^\\s+typ\\s+El\\s+Core shift \\(ppm\\)\\s*$\\n'", "r'^\\s+-{20,}$\\n'", "row_pattern", "=", "r'\\d+\\s+(?P<element>[A-Z][a-z]?\\w?)\\s+(?P<shift>[-]?\\d+\...
Parse the core contribution of NMR chemical shielding. Returns: G0 contribution matrix as list of list.
[ "Parse", "the", "core", "contribution", "of", "NMR", "chemical", "shielding", "." ]
python
train
ansible/tacacs_plus
tacacs_plus/packet.py
https://github.com/ansible/tacacs_plus/blob/de0d01372169c8849fa284d75097e57367c8930f/tacacs_plus/packet.py#L7-L70
def crypt(header, body_bytes, secret): """ TACACS+ uses a shared secret key (known to both the client and server) to obfuscate the body of sent packets. Only the packet body (not the header) is obfuscated. https://datatracker.ietf.org/doc/draft-ietf-opsawg-tacacs/?include_text=1#section-3.7 ENCRYPTED {data} == data ^ pseudo_pad The pad is generated by concatenating a series of MD5 hashes (each 16 bytes long) and truncating it to the length of the input data. pseudo_pad = {MD5_1 [,MD5_2 [ ... ,MD5_n]]} truncated to len(data) The first MD5 hash is generated by concatenating the session_id, the secret key, the version number and the sequence number and then running MD5 over that stream. All of those input values are available in the packet header, except for the secret key which is a shared secret between the TACACS+ client and server. Subsequent hashes are generated by using the same input stream, but concatenating the previous hash value at the end of the input stream. MD5_1 = MD5{session_id, key, version, seq_no} MD5_2 = MD5{session_id, key, version, seq_no, MD5_1} .... MD5_n = MD5{session_id, key, version, seq_no, MD5_n-1} :param header: a TACACSHeader object :param body_bytes: packed bytes, i.e., `struct.pack(...)` :param secret: a key used to encrypt/obfuscate packets according to the TACACS+ spec :return: packed bytes, i.e., `struct.pack(...)` representing the obfuscated packet body """ # noqa # B = unsigned char # !I = network-order (big-endian) unsigned int body_length = len(body_bytes) unhashed = ( struct.pack('!I', header.session_id) + six.b(secret) + struct.pack('B', header.version) + struct.pack('B', header.seq_no) ) pad = hashed = md5(unhashed).digest() if (len(pad) < body_length): # remake hash, appending it to pad until pad >= header.length while True: hashed = md5(unhashed + hashed).digest() pad += hashed if len(pad) >= body_length: break pad = pad[0:(body_length)] pad = list(struct.unpack('B' * len(pad), pad)) packet_body = [] for x in struct.unpack('B' * body_length, body_bytes): packet_body.append(x ^ pad.pop(0)) return struct.pack('B' * len(packet_body), *packet_body)
[ "def", "crypt", "(", "header", ",", "body_bytes", ",", "secret", ")", ":", "# noqa", "# B = unsigned char", "# !I = network-order (big-endian) unsigned int", "body_length", "=", "len", "(", "body_bytes", ")", "unhashed", "=", "(", "struct", ".", "pack", "(", "'!I'...
TACACS+ uses a shared secret key (known to both the client and server) to obfuscate the body of sent packets. Only the packet body (not the header) is obfuscated. https://datatracker.ietf.org/doc/draft-ietf-opsawg-tacacs/?include_text=1#section-3.7 ENCRYPTED {data} == data ^ pseudo_pad The pad is generated by concatenating a series of MD5 hashes (each 16 bytes long) and truncating it to the length of the input data. pseudo_pad = {MD5_1 [,MD5_2 [ ... ,MD5_n]]} truncated to len(data) The first MD5 hash is generated by concatenating the session_id, the secret key, the version number and the sequence number and then running MD5 over that stream. All of those input values are available in the packet header, except for the secret key which is a shared secret between the TACACS+ client and server. Subsequent hashes are generated by using the same input stream, but concatenating the previous hash value at the end of the input stream. MD5_1 = MD5{session_id, key, version, seq_no} MD5_2 = MD5{session_id, key, version, seq_no, MD5_1} .... MD5_n = MD5{session_id, key, version, seq_no, MD5_n-1} :param header: a TACACSHeader object :param body_bytes: packed bytes, i.e., `struct.pack(...)` :param secret: a key used to encrypt/obfuscate packets according to the TACACS+ spec :return: packed bytes, i.e., `struct.pack(...)` representing the obfuscated packet body
[ "TACACS", "+", "uses", "a", "shared", "secret", "key", "(", "known", "to", "both", "the", "client", "and", "server", ")", "to", "obfuscate", "the", "body", "of", "sent", "packets", ".", "Only", "the", "packet", "body", "(", "not", "the", "header", ")",...
python
train
pydata/xarray
xarray/core/common.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/common.py#L252-L280
def squeeze(self, dim: Union[Hashable, Iterable[Hashable], None] = None, drop: bool = False, axis: Union[int, Iterable[int], None] = None): """Return a new object with squeezed data. Parameters ---------- dim : None or Hashable or iterable of Hashable, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. drop : bool, optional If ``drop=True``, drop squeezed coordinates instead of making them scalar. axis : None or int or iterable of int, optional Like dim, but positional. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = get_squeeze_dims(self, dim, axis) return self.isel(drop=drop, **{d: 0 for d in dims})
[ "def", "squeeze", "(", "self", ",", "dim", ":", "Union", "[", "Hashable", ",", "Iterable", "[", "Hashable", "]", ",", "None", "]", "=", "None", ",", "drop", ":", "bool", "=", "False", ",", "axis", ":", "Union", "[", "int", ",", "Iterable", "[", "...
Return a new object with squeezed data. Parameters ---------- dim : None or Hashable or iterable of Hashable, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. drop : bool, optional If ``drop=True``, drop squeezed coordinates instead of making them scalar. axis : None or int or iterable of int, optional Like dim, but positional. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze
[ "Return", "a", "new", "object", "with", "squeezed", "data", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/aio/auth.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/auth.py#L457-L468
async def azureAccounts(self, *args, **kwargs): """ List Accounts Managed by Auth Retrieve a list of all Azure accounts managed by Taskcluster Auth. This method gives output: ``v1/azure-account-list-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
[ "async", "def", "azureAccounts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"azureAccounts\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ...
List Accounts Managed by Auth Retrieve a list of all Azure accounts managed by Taskcluster Auth. This method gives output: ``v1/azure-account-list-response.json#`` This method is ``stable``
[ "List", "Accounts", "Managed", "by", "Auth" ]
python
train
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L1488-L1507
def unassign_assessment_part_from_bank(self, assessment_part_id, bank_id): """Removes an ``AssessmentPart`` from an ``Bank``. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: NotFound - ``assessment_part_id`` or ``bank_id`` not found or ``assessment_part_id`` not assigned to ``bank_id`` raise: NullArgument - ``assessment_part_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) lookup_session.get_bank(bank_id) # to raise NotFound self._unassign_object_from_catalog(assessment_part_id, bank_id)
[ "def", "unassign_assessment_part_from_bank", "(", "self", ",", "assessment_part_id", ",", "bank_id", ")", ":", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bank_looku...
Removes an ``AssessmentPart`` from an ``Bank``. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: NotFound - ``assessment_part_id`` or ``bank_id`` not found or ``assessment_part_id`` not assigned to ``bank_id`` raise: NullArgument - ``assessment_part_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Removes", "an", "AssessmentPart", "from", "an", "Bank", "." ]
python
train
COALAIP/pycoalaip
coalaip/data_formats.py
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/data_formats.py#L34-L53
def _make_context_immutable(context): """Best effort attempt at turning a properly formatted context (either a string, dict, or array of strings and dicts) into an immutable data structure. If we get an array, make it immutable by creating a tuple; if we get a dict, copy it into a MappingProxyType. Otherwise, return as-is. """ def make_immutable(val): if isinstance(val, Mapping): return MappingProxyType(val) else: return val if not isinstance(context, (str, Mapping)): try: return tuple([make_immutable(val) for val in context]) except TypeError: pass return make_immutable(context)
[ "def", "_make_context_immutable", "(", "context", ")", ":", "def", "make_immutable", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "Mapping", ")", ":", "return", "MappingProxyType", "(", "val", ")", "else", ":", "return", "val", "if", "not", ...
Best effort attempt at turning a properly formatted context (either a string, dict, or array of strings and dicts) into an immutable data structure. If we get an array, make it immutable by creating a tuple; if we get a dict, copy it into a MappingProxyType. Otherwise, return as-is.
[ "Best", "effort", "attempt", "at", "turning", "a", "properly", "formatted", "context", "(", "either", "a", "string", "dict", "or", "array", "of", "strings", "and", "dicts", ")", "into", "an", "immutable", "data", "structure", "." ]
python
train
saltstack/salt
salt/cloud/clouds/digitalocean.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/digitalocean.py#L245-L262
def get_location(vm_): ''' Return the VM's location ''' locations = avail_locations() vm_location = six.text_type(config.get_cloud_config_value( 'location', vm_, __opts__, search_global=False )) for location in locations: if vm_location in (locations[location]['name'], locations[location]['slug']): return locations[location]['slug'] raise SaltCloudNotFound( 'The specified location, \'{0}\', could not be found.'.format( vm_location ) )
[ "def", "get_location", "(", "vm_", ")", ":", "locations", "=", "avail_locations", "(", ")", "vm_location", "=", "six", ".", "text_type", "(", "config", ".", "get_cloud_config_value", "(", "'location'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", ...
Return the VM's location
[ "Return", "the", "VM", "s", "location" ]
python
train
bioasp/caspo
caspo/core/graph.py
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/graph.py#L48-L67
def read_sif(cls, path): """ Creates a graph from a `simple interaction format (SIF)`_ file Parameters ---------- path : str Absolute path to a SIF file Returns ------- caspo.core.graph.Graph Created object instance .. _simple interaction format (SIF): http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats """ df = pd.read_csv(path, delim_whitespace=True, names=['source', 'sign', 'target']).drop_duplicates() edges = [(source, target, {'sign': sign}) for _, source, sign, target in df.itertuples()] return cls(data=edges)
[ "def", "read_sif", "(", "cls", ",", "path", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "path", ",", "delim_whitespace", "=", "True", ",", "names", "=", "[", "'source'", ",", "'sign'", ",", "'target'", "]", ")", ".", "drop_duplicates", "(", ")",...
Creates a graph from a `simple interaction format (SIF)`_ file Parameters ---------- path : str Absolute path to a SIF file Returns ------- caspo.core.graph.Graph Created object instance .. _simple interaction format (SIF): http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats
[ "Creates", "a", "graph", "from", "a", "simple", "interaction", "format", "(", "SIF", ")", "_", "file" ]
python
train
sepandhaghighi/pycm
pycm/pycm_overall_func.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L682-L700
def PC_PI_calc(P, TOP, POP): """ Calculate percent chance agreement for Scott's Pi. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float """ try: result = 0 for i in P.keys(): result += ((P[i] + TOP[i]) / (2 * POP[i]))**2 return result except Exception: return "None"
[ "def", "PC_PI_calc", "(", "P", ",", "TOP", ",", "POP", ")", ":", "try", ":", "result", "=", "0", "for", "i", "in", "P", ".", "keys", "(", ")", ":", "result", "+=", "(", "(", "P", "[", "i", "]", "+", "TOP", "[", "i", "]", ")", "/", "(", ...
Calculate percent chance agreement for Scott's Pi. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float
[ "Calculate", "percent", "chance", "agreement", "for", "Scott", "s", "Pi", "." ]
python
train
Jammy2211/PyAutoLens
autolens/model/galaxy/galaxy.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy.py#L159-L180
def luminosity_within_ellipse_in_units(self, major_axis : dim.Length, unit_luminosity='eps', kpc_per_arcsec=None, exposure_time=None): """Compute the total luminosity of the galaxy's light profiles, within an ellipse of specified major axis. This is performed via integration of each light profile and is centred, oriented and aligned with each light model's individual geometry. See *light_profiles.luminosity_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts. """ if self.has_light_profile: return sum(map(lambda p: p.luminosity_within_ellipse_in_units(major_axis=major_axis, unit_luminosity=unit_luminosity, kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time), self.light_profiles)) else: return None
[ "def", "luminosity_within_ellipse_in_units", "(", "self", ",", "major_axis", ":", "dim", ".", "Length", ",", "unit_luminosity", "=", "'eps'", ",", "kpc_per_arcsec", "=", "None", ",", "exposure_time", "=", "None", ")", ":", "if", "self", ".", "has_light_profile",...
Compute the total luminosity of the galaxy's light profiles, within an ellipse of specified major axis. This is performed via integration of each light profile and is centred, oriented and aligned with each light model's individual geometry. See *light_profiles.luminosity_within_ellipse* for details of how this is performed. Parameters ---------- major_axis : float The major-axis radius of the ellipse. unit_luminosity : str The units the luminosity is returned in (eps | counts). exposure_time : float The exposure time of the observation, which converts luminosity from electrons per second units to counts.
[ "Compute", "the", "total", "luminosity", "of", "the", "galaxy", "s", "light", "profiles", "within", "an", "ellipse", "of", "specified", "major", "axis", ".", "This", "is", "performed", "via", "integration", "of", "each", "light", "profile", "and", "is", "cen...
python
valid
cltk/cltk
cltk/prosody/latin/verse_scanner.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/verse_scanner.py#L51-L88
def transform_i_to_j(self, line: str) -> str: """ Transform instances of consonantal i to j :param line: :return: >>> print(VerseScanner().transform_i_to_j("iactātus")) jactātus >>> print(VerseScanner().transform_i_to_j("bracchia")) bracchia """ words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append(self.syllabifier.convert_consonantal_i(prefix)) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix):])) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) char_list = string_utils.overwrite(list(new_line), r"\b[iī][{}]".format( self.constants.VOWELS + self.constants.ACCENTED_VOWELS), "j") char_list = string_utils.overwrite(char_list, r"\b[I][{}]".format(self.constants.VOWELS_WO_I), "J") char_list = string_utils.overwrite(char_list, r"[{}][i][{}]".format( self.constants.VOWELS_WO_I, self.constants.VOWELS), "j", 1) return "".join(char_list)
[ "def", "transform_i_to_j", "(", "self", ",", "line", ":", "str", ")", "->", "str", ":", "words", "=", "line", ".", "split", "(", "\" \"", ")", "space_list", "=", "string_utils", ".", "space_list", "(", "line", ")", "corrected_words", "=", "[", "]", "fo...
Transform instances of consonantal i to j :param line: :return: >>> print(VerseScanner().transform_i_to_j("iactātus")) jactātus >>> print(VerseScanner().transform_i_to_j("bracchia")) bracchia
[ "Transform", "instances", "of", "consonantal", "i", "to", "j", ":", "param", "line", ":", ":", "return", ":" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/incoming_phone_number/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/incoming_phone_number/__init__.py#L583-L597
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: IncomingPhoneNumberContext for this IncomingPhoneNumberInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberContext """ if self._context is None: self._context = IncomingPhoneNumberContext( self._version, account_sid=self._solution['account_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "IncomingPhoneNumberContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]"...
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: IncomingPhoneNumberContext for this IncomingPhoneNumberInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.IncomingPhoneNumberContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/interface.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/interface.py#L202-L216
def start_completion(self, buffer_name=None, select_first=False, select_last=False, insert_common_part=False, complete_event=None): """ Start asynchronous autocompletion of this buffer. (This will do nothing if a previous completion was still in progress.) """ buffer_name = buffer_name or self.current_buffer_name completer = self._async_completers.get(buffer_name) if completer: completer(select_first=select_first, select_last=select_last, insert_common_part=insert_common_part, complete_event=CompleteEvent(completion_requested=True))
[ "def", "start_completion", "(", "self", ",", "buffer_name", "=", "None", ",", "select_first", "=", "False", ",", "select_last", "=", "False", ",", "insert_common_part", "=", "False", ",", "complete_event", "=", "None", ")", ":", "buffer_name", "=", "buffer_nam...
Start asynchronous autocompletion of this buffer. (This will do nothing if a previous completion was still in progress.)
[ "Start", "asynchronous", "autocompletion", "of", "this", "buffer", ".", "(", "This", "will", "do", "nothing", "if", "a", "previous", "completion", "was", "still", "in", "progress", ".", ")" ]
python
train
pyslackers/slack-sansio
slack/sansio.py
https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/sansio.py#L355-L389
def validate_request_signature( body: str, headers: MutableMapping, signing_secret: str ) -> None: """ Validate incoming request signature using the application signing secret. Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand. Args: body: Raw request body headers: Request headers signing_secret: Application signing_secret Raise: :class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match :class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old """ request_timestamp = int(headers["X-Slack-Request-Timestamp"]) if (int(time.time()) - request_timestamp) > (60 * 5): raise exceptions.InvalidTimestamp(timestamp=request_timestamp) slack_signature = headers["X-Slack-Signature"] calculated_signature = ( "v0=" + hmac.new( signing_secret.encode("utf-8"), f"""v0:{headers["X-Slack-Request-Timestamp"]}:{body}""".encode("utf-8"), digestmod=hashlib.sha256, ).hexdigest() ) if not hmac.compare_digest(slack_signature, calculated_signature): raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature)
[ "def", "validate_request_signature", "(", "body", ":", "str", ",", "headers", ":", "MutableMapping", ",", "signing_secret", ":", "str", ")", "->", "None", ":", "request_timestamp", "=", "int", "(", "headers", "[", "\"X-Slack-Request-Timestamp\"", "]", ")", "if",...
Validate incoming request signature using the application signing secret. Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand. Args: body: Raw request body headers: Request headers signing_secret: Application signing_secret Raise: :class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match :class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
[ "Validate", "incoming", "request", "signature", "using", "the", "application", "signing", "secret", "." ]
python
train
globality-corp/microcosm
microcosm/registry.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/registry.py#L43-L51
def all(self): """ Return a synthetic dictionary of all factories. """ return { key: value for key, value in chain(self.entry_points.items(), self.factories.items()) }
[ "def", "all", "(", "self", ")", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "chain", "(", "self", ".", "entry_points", ".", "items", "(", ")", ",", "self", ".", "factories", ".", "items", "(", ")", ")", "}" ]
Return a synthetic dictionary of all factories.
[ "Return", "a", "synthetic", "dictionary", "of", "all", "factories", "." ]
python
train
Microsoft/nni
tools/nni_annotation/code_generator.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_annotation/code_generator.py#L254-L281
def parse(code): """Annotate user code. Return annotated code (str) if annotation detected; return None if not. code: original user code (str) """ try: ast_tree = ast.parse(code) except Exception: raise RuntimeError('Bad Python code') transformer = Transformer() try: transformer.visit(ast_tree) except AssertionError as exc: raise RuntimeError('%d: %s' % (ast_tree.last_line, exc.args[0])) if not transformer.annotated: return None last_future_import = -1 import_nni = ast.Import(names=[ast.alias(name='nni', asname=None)]) nodes = ast_tree.body for i, _ in enumerate(nodes): if type(nodes[i]) is ast.ImportFrom and nodes[i].module == '__future__': last_future_import = i nodes.insert(last_future_import + 1, import_nni) return astor.to_source(ast_tree)
[ "def", "parse", "(", "code", ")", ":", "try", ":", "ast_tree", "=", "ast", ".", "parse", "(", "code", ")", "except", "Exception", ":", "raise", "RuntimeError", "(", "'Bad Python code'", ")", "transformer", "=", "Transformer", "(", ")", "try", ":", "trans...
Annotate user code. Return annotated code (str) if annotation detected; return None if not. code: original user code (str)
[ "Annotate", "user", "code", ".", "Return", "annotated", "code", "(", "str", ")", "if", "annotation", "detected", ";", "return", "None", "if", "not", ".", "code", ":", "original", "user", "code", "(", "str", ")" ]
python
train
ray-project/ray
python/ray/tune/scripts.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/scripts.py#L75-L79
def list_experiments(project_path, sort, output, filter_op, columns): """Lists experiments in the directory subtree.""" if columns: columns = columns.split(",") commands.list_experiments(project_path, sort, output, filter_op, columns)
[ "def", "list_experiments", "(", "project_path", ",", "sort", ",", "output", ",", "filter_op", ",", "columns", ")", ":", "if", "columns", ":", "columns", "=", "columns", ".", "split", "(", "\",\"", ")", "commands", ".", "list_experiments", "(", "project_path"...
Lists experiments in the directory subtree.
[ "Lists", "experiments", "in", "the", "directory", "subtree", "." ]
python
train
opencobra/cobrapy
cobra/util/solver.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/util/solver.py#L316-L360
def add_absolute_expression(model, expression, name="abs_var", ub=None, difference=0, add=True): """Add the absolute value of an expression to the model. Also defines a variable for the absolute value that can be used in other objectives or constraints. Parameters ---------- model : a cobra model The model to which to add the absolute expression. expression : A sympy expression Must be a valid expression within the Model's solver object. The absolute value is applied automatically on the expression. name : string The name of the newly created variable. ub : positive float The upper bound for the variable. difference : positive float The difference between the expression and the variable. add : bool Whether to add the variable to the model at once. Returns ------- namedtuple A named tuple with variable and two constraints (upper_constraint, lower_constraint) describing the new variable and the constraints that assign the absolute value of the expression to it. """ Components = namedtuple('Components', ['variable', 'upper_constraint', 'lower_constraint']) variable = model.problem.Variable(name, lb=0, ub=ub) # The following constraints enforce variable > expression and # variable > -expression upper_constraint = model.problem.Constraint(expression - variable, ub=difference, name="abs_pos_" + name), lower_constraint = model.problem.Constraint(expression + variable, lb=difference, name="abs_neg_" + name) to_add = Components(variable, upper_constraint, lower_constraint) if add: add_cons_vars_to_problem(model, to_add) return to_add
[ "def", "add_absolute_expression", "(", "model", ",", "expression", ",", "name", "=", "\"abs_var\"", ",", "ub", "=", "None", ",", "difference", "=", "0", ",", "add", "=", "True", ")", ":", "Components", "=", "namedtuple", "(", "'Components'", ",", "[", "'...
Add the absolute value of an expression to the model. Also defines a variable for the absolute value that can be used in other objectives or constraints. Parameters ---------- model : a cobra model The model to which to add the absolute expression. expression : A sympy expression Must be a valid expression within the Model's solver object. The absolute value is applied automatically on the expression. name : string The name of the newly created variable. ub : positive float The upper bound for the variable. difference : positive float The difference between the expression and the variable. add : bool Whether to add the variable to the model at once. Returns ------- namedtuple A named tuple with variable and two constraints (upper_constraint, lower_constraint) describing the new variable and the constraints that assign the absolute value of the expression to it.
[ "Add", "the", "absolute", "value", "of", "an", "expression", "to", "the", "model", "." ]
python
valid
SheffieldML/GPy
GPy/plotting/gpy_plot/latent_plots.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/plotting/gpy_plot/latent_plots.py#L305-L346
def plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which_indices=None, resolution=15, legend=True, plot_limits=None, updates=False, kern=None, marker='<>^vsd', num_samples=1000, annotation_kwargs=None, scatter_kwargs=None, **imshow_kwargs): """ Plot the latent space of the GP on the inputs. This is the density of the GP posterior as a grey scale and the scatter plot of the input dimemsions selected by which_indices. :param array-like labels: a label for each data point (row) of the inputs :param (int, int) which_indices: which input dimensions to plot against each other :param int resolution: the resolution at which we predict the magnification factor :param bool legend: whether to plot the legend on the figure, if int plot legend columns on legend :param plot_limits: the plot limits for the plot :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax)) :param bool updates: if possible, make interactive updates using the specific library you are using :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction :param str marker: markers to use - cycle if more labels then markers are given :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples. :param imshow_kwargs: the kwargs for the imshow (magnification factor) :param annotation_kwargs: the kwargs for the annotation plot :param scatter_kwargs: the kwargs for the scatter plots """ input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2] X = get_x_y_var(self)[0] _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution) canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]), xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs) if (data_labels is not None): legend = find_best_layout_for_subplots(len(np.unique(data_labels)))[1] else: data_labels = np.ones(self.num_data) legend = False plots = dict(scatter=_plot_latent_scatter(canvas, X, which_indices, data_labels, marker, num_samples, **scatter_kwargs or {})) plots.update(_plot_steepest_gradient_map(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, output_labels, updates, kern, annotation_kwargs=annotation_kwargs, **imshow_kwargs)) retval = pl().add_to_canvas(canvas, plots, legend=legend) _wait_for_updates(plots['annotation'], updates) return retval
[ "def", "plot_steepest_gradient_map", "(", "self", ",", "output_labels", "=", "None", ",", "data_labels", "=", "None", ",", "which_indices", "=", "None", ",", "resolution", "=", "15", ",", "legend", "=", "True", ",", "plot_limits", "=", "None", ",", "updates"...
Plot the latent space of the GP on the inputs. This is the density of the GP posterior as a grey scale and the scatter plot of the input dimemsions selected by which_indices. :param array-like labels: a label for each data point (row) of the inputs :param (int, int) which_indices: which input dimensions to plot against each other :param int resolution: the resolution at which we predict the magnification factor :param bool legend: whether to plot the legend on the figure, if int plot legend columns on legend :param plot_limits: the plot limits for the plot :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax)) :param bool updates: if possible, make interactive updates using the specific library you are using :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction :param str marker: markers to use - cycle if more labels then markers are given :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples. :param imshow_kwargs: the kwargs for the imshow (magnification factor) :param annotation_kwargs: the kwargs for the annotation plot :param scatter_kwargs: the kwargs for the scatter plots
[ "Plot", "the", "latent", "space", "of", "the", "GP", "on", "the", "inputs", ".", "This", "is", "the", "density", "of", "the", "GP", "posterior", "as", "a", "grey", "scale", "and", "the", "scatter", "plot", "of", "the", "input", "dimemsions", "selected", ...
python
train
calmjs/calmjs.parse
src/calmjs/parse/handlers/obfuscation.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/handlers/obfuscation.py#L322-L331
def reference(self, symbol, count=1): """ However, if referenced, ensure that the counter is applied to the catch symbol. """ if symbol == self.catch_symbol: self.catch_symbol_usage += count else: self.parent.reference(symbol, count)
[ "def", "reference", "(", "self", ",", "symbol", ",", "count", "=", "1", ")", ":", "if", "symbol", "==", "self", ".", "catch_symbol", ":", "self", ".", "catch_symbol_usage", "+=", "count", "else", ":", "self", ".", "parent", ".", "reference", "(", "symb...
However, if referenced, ensure that the counter is applied to the catch symbol.
[ "However", "if", "referenced", "ensure", "that", "the", "counter", "is", "applied", "to", "the", "catch", "symbol", "." ]
python
train
saltstack/salt
salt/proxy/ssh_sample.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L94-L104
def ping(): ''' Required. Ping the device on the other end of the connection ''' try: out, err = DETAILS['server'].sendline('help') return True except TerminalException as e: log.error(e) return False
[ "def", "ping", "(", ")", ":", "try", ":", "out", ",", "err", "=", "DETAILS", "[", "'server'", "]", ".", "sendline", "(", "'help'", ")", "return", "True", "except", "TerminalException", "as", "e", ":", "log", ".", "error", "(", "e", ")", "return", "...
Required. Ping the device on the other end of the connection
[ "Required", ".", "Ping", "the", "device", "on", "the", "other", "end", "of", "the", "connection" ]
python
train
carlcarl/grabflickr
grabflickr/grabflickr.py
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L150-L170
def get_photo_url(photo_id): """Request the photo download url with the photo id :param photo_id: The photo id of flickr :type photo_id: str :return: Photo download url :rtype: str """ args = _get_request_args( 'flickr.photos.getSizes', photo_id=photo_id ) resp = requests.post(API_URL, data=args) resp_json = json.loads(resp.text.encode('utf-8')) logger.debug(json.dumps(resp_json, indent=2)) size_list = resp_json['sizes']['size'] size_list_len = len(size_list) global image_size_mode image_size_mode = size_list_len if size_list_len < image_size_mode \ else image_size_mode download_url = resp_json['sizes']['size'][-image_size_mode]['source'] return download_url
[ "def", "get_photo_url", "(", "photo_id", ")", ":", "args", "=", "_get_request_args", "(", "'flickr.photos.getSizes'", ",", "photo_id", "=", "photo_id", ")", "resp", "=", "requests", ".", "post", "(", "API_URL", ",", "data", "=", "args", ")", "resp_json", "="...
Request the photo download url with the photo id :param photo_id: The photo id of flickr :type photo_id: str :return: Photo download url :rtype: str
[ "Request", "the", "photo", "download", "url", "with", "the", "photo", "id", ":", "param", "photo_id", ":", "The", "photo", "id", "of", "flickr", ":", "type", "photo_id", ":", "str", ":", "return", ":", "Photo", "download", "url", ":", "rtype", ":", "st...
python
train
collectiveacuity/labPack
labpack/records/time.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/records/time.py#L88-L119
def pyLocal(self, time_zone=''): ''' a method to report a python datetime from a labDT object :param time_zone: [optional] string with timezone to report in :return: string with date and time info ''' # validate inputs get_tz = get_localzone() title = 'Timezone input for labDT.pyLocal' if time_zone: # if time_zone.lower() in ('utc', 'uct', 'universal', 'zulu'): # raise ValueError('time_zone cannot be UTC. %s requires a local timezone value. Try:\nfor tz in pytz.all_timezones:\n print tz' % title) try: get_tz = tz.gettz(time_zone) except: raise ValueError('\n%s is not a valid timezone format. Try:\nfor tz in pytz.all_timezones:\n print tz' % title) # construct python datetime from labDT dT = self.astimezone(get_tz) dt_kwargs = { 'year': dT.year, 'month': dT.month, 'day': dT.day, 'hour': dT.hour, 'minute': dT.minute, 'second': dT.second, 'microsecond': dT.microsecond, 'tzinfo': dT.tzinfo } return labDT(**dt_kwargs)
[ "def", "pyLocal", "(", "self", ",", "time_zone", "=", "''", ")", ":", "# validate inputs\r", "get_tz", "=", "get_localzone", "(", ")", "title", "=", "'Timezone input for labDT.pyLocal'", "if", "time_zone", ":", "# if time_zone.lower() in ('utc', 'uct', 'universal', 'zulu'...
a method to report a python datetime from a labDT object :param time_zone: [optional] string with timezone to report in :return: string with date and time info
[ "a", "method", "to", "report", "a", "python", "datetime", "from", "a", "labDT", "object", ":", "param", "time_zone", ":", "[", "optional", "]", "string", "with", "timezone", "to", "report", "in", ":", "return", ":", "string", "with", "date", "and", "time...
python
train
boriel/zxbasic
asmparse.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L244-L252
def eval(self): """ Recursively evals the node. Exits with an error if not resolved. """ Expr.ignore = False result = self.try_eval() Expr.ignore = True return result
[ "def", "eval", "(", "self", ")", ":", "Expr", ".", "ignore", "=", "False", "result", "=", "self", ".", "try_eval", "(", ")", "Expr", ".", "ignore", "=", "True", "return", "result" ]
Recursively evals the node. Exits with an error if not resolved.
[ "Recursively", "evals", "the", "node", ".", "Exits", "with", "an", "error", "if", "not", "resolved", "." ]
python
train
humilis/humilis-lambdautils
lambdautils/state.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L151-L204
def get_state(key, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, deserializer=json.loads, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000): """Get Lambda state value(s).""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to get state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Getting key '{}' from table '{}'".format(key, table_name)) if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=wait_exponential_multiplier, wait_exponential_max=wait_exponential_max, stop_max_delay=stop_max_delay) def get_item(): try: return table.get_item( Key={"id": key}, ConsistentRead=consistent).get( "Item", {}).get("value") except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise value = get_item() if not value: return if deserializer: try: value = deserializer(value) except ValueError: # For backwards compatibility: plain strings are allowed logger.error("Unable to json-deserialize value '{}'".format(value)) return value return value
[ "def", "get_state", "(", "key", ",", "namespace", "=", "None", ",", "table_name", "=", "None", ",", "environment", "=", "None", ",", "layer", "=", "None", ",", "stage", "=", "None", ",", "shard_id", "=", "None", ",", "consistent", "=", "True", ",", "...
Get Lambda state value(s).
[ "Get", "Lambda", "state", "value", "(", "s", ")", "." ]
python
train
cameronmaske/celery-once
celery_once/backends/redis.py
https://github.com/cameronmaske/celery-once/blob/ffd21cbdf486444b4b1fcc6f7354436b895285c6/celery_once/backends/redis.py#L91-L109
def raise_or_lock(self, key, timeout): """ Checks if the task is locked and raises an exception, else locks the task. By default, the tasks and the key expire after 60 minutes. (meaning it will not be executed and the lock will clear). """ acquired = Lock( self.redis, key, timeout=timeout, blocking=self.blocking, blocking_timeout=self.blocking_timeout ).acquire() if not acquired: # Time remaining in milliseconds # https://redis.io/commands/pttl ttl = self.redis.pttl(key) raise AlreadyQueued(ttl / 1000.)
[ "def", "raise_or_lock", "(", "self", ",", "key", ",", "timeout", ")", ":", "acquired", "=", "Lock", "(", "self", ".", "redis", ",", "key", ",", "timeout", "=", "timeout", ",", "blocking", "=", "self", ".", "blocking", ",", "blocking_timeout", "=", "sel...
Checks if the task is locked and raises an exception, else locks the task. By default, the tasks and the key expire after 60 minutes. (meaning it will not be executed and the lock will clear).
[ "Checks", "if", "the", "task", "is", "locked", "and", "raises", "an", "exception", "else", "locks", "the", "task", ".", "By", "default", "the", "tasks", "and", "the", "key", "expire", "after", "60", "minutes", ".", "(", "meaning", "it", "will", "not", ...
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/authorizedapps.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/authorizedapps.py#L27-L44
def create(self, data): """ Retrieve OAuth2-based credentials to associate API calls with your application. :param data: The request body parameters :type data: :py:class:`dict` data = { "client_id": string*, "client_secret": string* } """ self.app_id = None if 'client_id' not in data: raise KeyError('The authorized app must have a client_id') if 'client_secret' not in data: raise KeyError('The authorized app must have a client_secret') return self._mc_client._post(url=self._build_path(), data=data)
[ "def", "create", "(", "self", ",", "data", ")", ":", "self", ".", "app_id", "=", "None", "if", "'client_id'", "not", "in", "data", ":", "raise", "KeyError", "(", "'The authorized app must have a client_id'", ")", "if", "'client_secret'", "not", "in", "data", ...
Retrieve OAuth2-based credentials to associate API calls with your application. :param data: The request body parameters :type data: :py:class:`dict` data = { "client_id": string*, "client_secret": string* }
[ "Retrieve", "OAuth2", "-", "based", "credentials", "to", "associate", "API", "calls", "with", "your", "application", "." ]
python
valid
NoneGG/aredis
aredis/client.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/client.py#L261-L282
def from_url(cls, url, db=None, skip_full_coverage_check=False, **kwargs): """ Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme <http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESP connections or the ``unix://`` scheme for Unix domain sockets. For example:: redis://[:password]@localhost:6379/0 unix://[:password]@/path/to/socket.sock?db=0 There are several ways to specify a database number. The parse function will return the first specified option: 1. A ``db`` querystring option, e.g. redis://localhost?db=0 2. If using the redis:// scheme, the path argument of the url, e.g. redis://localhost/0 3. The ``db`` argument to this function. If none of these options are specified, db=0 is used. Any additional querystring arguments and keyword arguments will be passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win. """ connection_pool = ClusterConnectionPool.from_url(url, db=db, **kwargs) return cls(connection_pool=connection_pool, skip_full_coverage_check=skip_full_coverage_check)
[ "def", "from_url", "(", "cls", ",", "url", ",", "db", "=", "None", ",", "skip_full_coverage_check", "=", "False", ",", "*", "*", "kwargs", ")", ":", "connection_pool", "=", "ClusterConnectionPool", ".", "from_url", "(", "url", ",", "db", "=", "db", ",", ...
Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme <http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESP connections or the ``unix://`` scheme for Unix domain sockets. For example:: redis://[:password]@localhost:6379/0 unix://[:password]@/path/to/socket.sock?db=0 There are several ways to specify a database number. The parse function will return the first specified option: 1. A ``db`` querystring option, e.g. redis://localhost?db=0 2. If using the redis:// scheme, the path argument of the url, e.g. redis://localhost/0 3. The ``db`` argument to this function. If none of these options are specified, db=0 is used. Any additional querystring arguments and keyword arguments will be passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win.
[ "Return", "a", "Redis", "client", "object", "configured", "from", "the", "given", "URL", "which", "must", "use", "either", "the", "redis", ":", "//", "scheme", "<http", ":", "//", "www", ".", "iana", ".", "org", "/", "assignments", "/", "uri", "-", "sc...
python
train
google/grr
grr/server/grr_response_server/databases/mysql_events.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_events.py#L22-L68
def ReadAPIAuditEntries(self, username=None, router_method_names=None, min_timestamp=None, max_timestamp=None, cursor=None): """Returns audit entries stored in the database.""" query = """SELECT details, timestamp FROM api_audit_entry FORCE INDEX (api_audit_entry_by_username_timestamp) {WHERE_PLACEHOLDER} ORDER BY timestamp ASC """ conditions = [] values = [] where = "" if username is not None: conditions.append("username = %s") values.append(username) if router_method_names: placeholders = ["%s"] * len(router_method_names) placeholders = ", ".join(placeholders) conditions.append("router_method_name IN (%s)" % placeholders) values.extend(router_method_names) if min_timestamp is not None: conditions.append("timestamp >= FROM_UNIXTIME(%s)") values.append(mysql_utils.RDFDatetimeToTimestamp(min_timestamp)) if max_timestamp is not None: conditions.append("timestamp <= FROM_UNIXTIME(%s)") values.append(mysql_utils.RDFDatetimeToTimestamp(max_timestamp)) if conditions: where = "WHERE " + " AND ".join(conditions) query = query.replace("{WHERE_PLACEHOLDER}", where) cursor.execute(query, values) return [ _AuditEntryFromRow(details, timestamp) for details, timestamp in cursor.fetchall() ]
[ "def", "ReadAPIAuditEntries", "(", "self", ",", "username", "=", "None", ",", "router_method_names", "=", "None", ",", "min_timestamp", "=", "None", ",", "max_timestamp", "=", "None", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"\"\"SELECT details, t...
Returns audit entries stored in the database.
[ "Returns", "audit", "entries", "stored", "in", "the", "database", "." ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/update.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/update.py#L143-L190
def update(client, revision, no_output, siblings, paths): """Update existing files by rerunning their outdated workflow.""" graph = Graph(client) outputs = graph.build(revision=revision, can_be_cwl=no_output, paths=paths) outputs = {node for node in outputs if graph.need_update(node)} if not outputs: click.secho( 'All files were generated from the latest inputs.', fg='green' ) sys.exit(0) # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs if _safe_path(node.path)} # Get all clean nodes. input_paths = {node.path for node in graph.nodes} - output_paths # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) workflow = graph.ascwl( input_paths=input_paths, output_paths=output_paths, outputs=outputs, ) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *(path for _, path in workflow.iter_input_files(client.workflow_path)) ) with output_file.open('w') as f: f.write( yaml.dump( ascwl( workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path, ), default_flow_style=False ) ) from ._cwl import execute execute(client, output_file, output_paths=output_paths)
[ "def", "update", "(", "client", ",", "revision", ",", "no_output", ",", "siblings", ",", "paths", ")", ":", "graph", "=", "Graph", "(", "client", ")", "outputs", "=", "graph", ".", "build", "(", "revision", "=", "revision", ",", "can_be_cwl", "=", "no_...
Update existing files by rerunning their outdated workflow.
[ "Update", "existing", "files", "by", "rerunning", "their", "outdated", "workflow", "." ]
python
train
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L1442-L1464
def AjustarLiquidacionContrato(self): "Ajustar Liquidación activas relacionadas a un contrato" # limpiar arrays no enviados: if not self.ajuste['ajusteBase']['certificados']: del self.ajuste['ajusteBase']['certificados'] for k1 in ('ajusteCredito', 'ajusteDebito'): for k2 in ('retenciones', 'deducciones'): if not self.ajuste[k1][k2]: del self.ajuste[k1][k2] ret = self.client.liquidacionAjustarContrato( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) ret = ret['ajusteContratoReturn'] self.__analizar_errores(ret) if 'ajusteContrato' in ret: aut = ret['ajusteContrato'] self.AnalizarAjuste(aut) return True
[ "def", "AjustarLiquidacionContrato", "(", "self", ")", ":", "# limpiar arrays no enviados:", "if", "not", "self", ".", "ajuste", "[", "'ajusteBase'", "]", "[", "'certificados'", "]", ":", "del", "self", ".", "ajuste", "[", "'ajusteBase'", "]", "[", "'certificado...
Ajustar Liquidación activas relacionadas a un contrato
[ "Ajustar", "Liquidación", "activas", "relacionadas", "a", "un", "contrato" ]
python
train
pauleveritt/kaybee
kaybee/plugins/events.py
https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/events.py#L115-L122
def call_env_before_read_docs(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames: List[str]): """ On env-read-docs, do callbacks""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EBRD): callback(kb_app, sphinx_app, sphinx_env, docnames)
[ "def", "call_env_before_read_docs", "(", "cls", ",", "kb_app", ",", "sphinx_app", ":", "Sphinx", ",", "sphinx_env", ":", "BuildEnvironment", ",", "docnames", ":", "List", "[", "str", "]", ")", ":", "for", "callback", "in", "EventAction", ".", "get_callbacks", ...
On env-read-docs, do callbacks
[ "On", "env", "-", "read", "-", "docs", "do", "callbacks" ]
python
train
nuSTORM/gnomon
gnomon/Configuration.py
https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Configuration.py#L143-L173
def populate_args_level(schema, parser): """Use a schema to populate a command line argument parser""" for key, value in schema['properties'].iteritems(): if key == 'name': continue arg = '--%s' % key desc = value['description'] if 'type' in value: if value['type'] == 'string': if 'enum' in value: parser.add_argument(arg, help=desc, type=str, choices=value['enum']) else: parser.add_argument(arg, help=desc, type=str) elif value['type'] == 'number': parser.add_argument(arg, help=desc, type=float) elif value['type'] == 'integer': parser.add_argument(arg, help=desc, type=int) elif str(value['type']) == 'array': assert value['minItems'] == value['maxItems'] if value['items']['type'] != 'number': raise NotImplementedError("Only float arrays work") parser.add_argument(arg, help=desc, type=float, nargs=value['maxItems'], metavar='N') elif value['type'] == 'object': #group = parser.add_argument_group(key, value['description']) #populate_args_level(value, group) pass
[ "def", "populate_args_level", "(", "schema", ",", "parser", ")", ":", "for", "key", ",", "value", "in", "schema", "[", "'properties'", "]", ".", "iteritems", "(", ")", ":", "if", "key", "==", "'name'", ":", "continue", "arg", "=", "'--%s'", "%", "key",...
Use a schema to populate a command line argument parser
[ "Use", "a", "schema", "to", "populate", "a", "command", "line", "argument", "parser" ]
python
train
ebroecker/canmatrix
src/canmatrix/canmatrix.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/canmatrix.py#L523-L548
def unpack_bitstring(length, is_float, is_signed, bits): # type: (int, bool, bool, typing.Any) -> typing.Union[float, int] """ returns a value calculated from bits :param length: length of signal in bits :param is_float: value is float :param bits: value as bits (array/iterable) :param is_signed: value is signed :return: """ if is_float: types = { 32: '>f', 64: '>d' } float_type = types[length] value, = struct.unpack(float_type, bytearray(int(''.join(b), 2) for b in grouper(bits, 8))) else: value = int(bits, 2) if is_signed and bits[0] == '1': value -= (1 << len(bits)) return value
[ "def", "unpack_bitstring", "(", "length", ",", "is_float", ",", "is_signed", ",", "bits", ")", ":", "# type: (int, bool, bool, typing.Any) -> typing.Union[float, int]", "if", "is_float", ":", "types", "=", "{", "32", ":", "'>f'", ",", "64", ":", "'>d'", "}", "fl...
returns a value calculated from bits :param length: length of signal in bits :param is_float: value is float :param bits: value as bits (array/iterable) :param is_signed: value is signed :return:
[ "returns", "a", "value", "calculated", "from", "bits", ":", "param", "length", ":", "length", "of", "signal", "in", "bits", ":", "param", "is_float", ":", "value", "is", "float", ":", "param", "bits", ":", "value", "as", "bits", "(", "array", "/", "ite...
python
train
Shapeways/coyote_framework
coyote_framework/database/coyote_db.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/database/coyote_db.py#L385-L388
def delete(sql, *args, **kwargs): """Deletes and commits with an insert sql statement""" assert "delete" in sql.lower(), 'This function requires a delete statement, provided: {}'.format(sql) CoyoteDb.execute_and_commit(sql, *args, **kwargs)
[ "def", "delete", "(", "sql", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "\"delete\"", "in", "sql", ".", "lower", "(", ")", ",", "'This function requires a delete statement, provided: {}'", ".", "format", "(", "sql", ")", "CoyoteDb", ".", ...
Deletes and commits with an insert sql statement
[ "Deletes", "and", "commits", "with", "an", "insert", "sql", "statement" ]
python
train
rwl/godot
godot/ui/graph_view_model.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/ui/graph_view_model.py#L251-L266
def save(self, info): """ Handles saving the current model to the last file. """ save_file = self.save_file if not isfile(save_file): self.save_as(info) else: fd = None try: fd = open(save_file, "wb") dot_code = str(self.model) fd.write(dot_code) finally: if fd is not None: fd.close()
[ "def", "save", "(", "self", ",", "info", ")", ":", "save_file", "=", "self", ".", "save_file", "if", "not", "isfile", "(", "save_file", ")", ":", "self", ".", "save_as", "(", "info", ")", "else", ":", "fd", "=", "None", "try", ":", "fd", "=", "op...
Handles saving the current model to the last file.
[ "Handles", "saving", "the", "current", "model", "to", "the", "last", "file", "." ]
python
test
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_objectexpr_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_objectexpr_parser.py#L196-L211
def members_entries(self, all_are_optional: bool=False) -> List[Tuple[str, str]]: """ Return an ordered list of elements for the _members section :param all_are_optional: True means we're in a choice situation so everything is optional :return: """ rval = [] if self._members: for member in self._members: rval += member.members_entries(all_are_optional) elif self._choices: for choice in self._choices: rval += self._context.reference(choice).members_entries(True) else: return [] return rval
[ "def", "members_entries", "(", "self", ",", "all_are_optional", ":", "bool", "=", "False", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "rval", "=", "[", "]", "if", "self", ".", "_members", ":", "for", "member", "in", "se...
Return an ordered list of elements for the _members section :param all_are_optional: True means we're in a choice situation so everything is optional :return:
[ "Return", "an", "ordered", "list", "of", "elements", "for", "the", "_members", "section" ]
python
train
LabKey/labkey-api-python
labkey/security.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/security.py#L194-L213
def __make_security_group_api_request(server_context, api, user_ids, group_id, container_path): """ Execute a request against the LabKey Security Controller Group Membership apis :param server_context: A LabKey server context. See utils.create_server_context. :param api: Action to execute :param user_ids: user ids to apply action to :param group_id: group id to apply action to :param container_path: Additional container context path :return: Request json object """ url = server_context.build_url(security_controller, api, container_path) # if user_ids is only a single scalar make it an array if not hasattr(user_ids, "__iter__"): user_ids = [user_ids] return server_context.make_request(url, { 'groupId': group_id, 'principalIds': user_ids })
[ "def", "__make_security_group_api_request", "(", "server_context", ",", "api", ",", "user_ids", ",", "group_id", ",", "container_path", ")", ":", "url", "=", "server_context", ".", "build_url", "(", "security_controller", ",", "api", ",", "container_path", ")", "#...
Execute a request against the LabKey Security Controller Group Membership apis :param server_context: A LabKey server context. See utils.create_server_context. :param api: Action to execute :param user_ids: user ids to apply action to :param group_id: group id to apply action to :param container_path: Additional container context path :return: Request json object
[ "Execute", "a", "request", "against", "the", "LabKey", "Security", "Controller", "Group", "Membership", "apis", ":", "param", "server_context", ":", "A", "LabKey", "server", "context", ".", "See", "utils", ".", "create_server_context", ".", ":", "param", "api", ...
python
train
globality-corp/microcosm
microcosm/config/types.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/config/types.py#L8-L21
def boolean(value): """ Configuration-friendly boolean type converter. Supports both boolean-valued and string-valued inputs (e.g. from env vars). """ if isinstance(value, bool): return value if value == "": return False return strtobool(value)
[ "def", "boolean", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "if", "value", "==", "\"\"", ":", "return", "False", "return", "strtobool", "(", "value", ")" ]
Configuration-friendly boolean type converter. Supports both boolean-valued and string-valued inputs (e.g. from env vars).
[ "Configuration", "-", "friendly", "boolean", "type", "converter", "." ]
python
train
log2timeline/plaso
utils/plot_task_queue.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/utils/plot_task_queue.py#L22-L75
def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Plots memory usage from profiling data.')) argument_parser.add_argument( '--output', dest='output_file', type=str, help=( 'path of the output file to write the graph to instead of using ' 'interactive mode. The output format deduced from the extension ' 'of the filename.')) argument_parser.add_argument( 'profile_path', type=str, help=( 'path to the directory containing the profiling data.')) options = argument_parser.parse_args() if not os.path.isdir(options.profile_path): print('No such directory: {0:s}'.format(options.profile_path)) return False names = ['time', 'queued', 'processing', 'to_merge', 'abandoned', 'total'] glob_expression = os.path.join(options.profile_path, 'task_queue-*.csv.gz') for csv_file_name in glob.glob(glob_expression): data = numpy.genfromtxt( csv_file_name, delimiter='\t', dtype=None, encoding='utf-8', names=names, skip_header=1) pyplot.plot(data['time'], data['queued'], label='queued') pyplot.plot(data['time'], data['processing'], label='processing') pyplot.plot(data['time'], data['to_merge'], label='to merge') pyplot.plot(data['time'], data['abandoned'], label='abandoned') pyplot.title('Number of tasks over time') pyplot.xlabel('Time') pyplot.xscale('linear') pyplot.ylabel('Number of tasks') pyplot.yscale('linear') pyplot.legend() if options.output_file: pyplot.savefig(options.output_file) else: pyplot.show() return True
[ "def", "Main", "(", ")", ":", "argument_parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "(", "'Plots memory usage from profiling data.'", ")", ")", "argument_parser", ".", "add_argument", "(", "'--output'", ",", "dest", "=", "'output_file'"...
The main program function. Returns: bool: True if successful or False if not.
[ "The", "main", "program", "function", "." ]
python
train
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L626-L641
def execAsync(self, func, *args, **kwargs): 'Execute `func(*args, **kwargs)` in a separate thread.' thread = threading.Thread(target=self.toplevelTryFunc, daemon=True, args=(func,)+args, kwargs=kwargs) self.addThread(thread) if self.sheets: currentSheet = self.sheets[0] currentSheet.currentThreads.append(thread) else: currentSheet = None thread.sheet = currentSheet thread.start() return thread
[ "def", "execAsync", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "toplevelTryFunc", ",", "daemon", "=", "True", ",", "args", "=", "(", "fu...
Execute `func(*args, **kwargs)` in a separate thread.
[ "Execute", "func", "(", "*", "args", "**", "kwargs", ")", "in", "a", "separate", "thread", "." ]
python
train
pimusicbox/mopidy-websettings
mopidy_websettings/__init__.py
https://github.com/pimusicbox/mopidy-websettings/blob/a9aca8e15a29f323e7d91e3f2237f4605e2892e3/mopidy_websettings/__init__.py#L33-L45
def restart_program(): """ DOES NOT WORK WELL WITH MOPIDY Hack from https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program to support updating the settings, since mopidy is not able to do that yet Restarts the current program Note: this function does not return. Any cleanup action (like saving data) must be done before calling this function """ python = sys.executable os.execl(python, python, * sys.argv)
[ "def", "restart_program", "(", ")", ":", "python", "=", "sys", ".", "executable", "os", ".", "execl", "(", "python", ",", "python", ",", "*", "sys", ".", "argv", ")" ]
DOES NOT WORK WELL WITH MOPIDY Hack from https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program to support updating the settings, since mopidy is not able to do that yet Restarts the current program Note: this function does not return. Any cleanup action (like saving data) must be done before calling this function
[ "DOES", "NOT", "WORK", "WELL", "WITH", "MOPIDY", "Hack", "from", "https", ":", "//", "www", ".", "daniweb", ".", "com", "/", "software", "-", "development", "/", "python", "/", "code", "/", "260268", "/", "restart", "-", "your", "-", "python", "-", "...
python
train
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L150-L156
def setLayout(self, value): """ Sets the worksheet layout, keeping it sorted by position :param value: the layout to set """ new_layout = sorted(value, key=lambda k: k['position']) self.getField('Layout').set(self, new_layout)
[ "def", "setLayout", "(", "self", ",", "value", ")", ":", "new_layout", "=", "sorted", "(", "value", ",", "key", "=", "lambda", "k", ":", "k", "[", "'position'", "]", ")", "self", ".", "getField", "(", "'Layout'", ")", ".", "set", "(", "self", ",", ...
Sets the worksheet layout, keeping it sorted by position :param value: the layout to set
[ "Sets", "the", "worksheet", "layout", "keeping", "it", "sorted", "by", "position", ":", "param", "value", ":", "the", "layout", "to", "set" ]
python
train
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2019-L2034
def _checkMode(mode): """Check that the Modbus mode is valie. Args: mode (string): The Modbus mode (MODE_RTU or MODE_ASCII) Raises: TypeError, ValueError """ if not isinstance(mode, str): raise TypeError('The {0} should be a string. Given: {1!r}'.format("mode", mode)) if mode not in [MODE_RTU, MODE_ASCII]: raise ValueError("Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.".format(mode))
[ "def", "_checkMode", "(", "mode", ")", ":", "if", "not", "isinstance", "(", "mode", ",", "str", ")", ":", "raise", "TypeError", "(", "'The {0} should be a string. Given: {1!r}'", ".", "format", "(", "\"mode\"", ",", "mode", ")", ")", "if", "mode", "not", "...
Check that the Modbus mode is valie. Args: mode (string): The Modbus mode (MODE_RTU or MODE_ASCII) Raises: TypeError, ValueError
[ "Check", "that", "the", "Modbus", "mode", "is", "valie", "." ]
python
train
yueyoum/social-oauth
example/_bottle.py
https://github.com/yueyoum/social-oauth/blob/80600ea737355b20931c8a0b5223f5b68175d930/example/_bottle.py#L1946-L1953
def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None.''' data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None
[ "def", "cookie_decode", "(", "data", ",", "key", ")", ":", "data", "=", "tob", "(", "data", ")", "if", "cookie_is_encoded", "(", "data", ")", ":", "sig", ",", "msg", "=", "data", ".", "split", "(", "tob", "(", "'?'", ")", ",", "1", ")", "if", "...
Verify and decode an encoded string. Return an object or None.
[ "Verify", "and", "decode", "an", "encoded", "string", ".", "Return", "an", "object", "or", "None", "." ]
python
train
apacha/OMR-Datasets
omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/downloaders/MuscimaPlusPlusDatasetDownloader.py#L33-L54
def download_and_extract_dataset(self, destination_directory: str): """ Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset that were manually annotated (140 out of 1000 images). """ if not os.path.exists(self.get_dataset_filename()): print("Downloading MUSCIMA++ Dataset...") self.download_file(self.get_dataset_download_url(), self.get_dataset_filename()) if not os.path.exists(self.get_imageset_filename()): print("Downloading MUSCIMA++ Images...") self.download_file(self.get_images_download_url(), self.get_imageset_filename()) print("Extracting MUSCIMA++ Dataset...") self.extract_dataset(os.path.abspath(destination_directory)) absolute_path_to_temp_folder = os.path.abspath('MuscimaPpImages') self.extract_dataset(absolute_path_to_temp_folder, self.get_imageset_filename()) DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "fulls"), os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data", "images")) self.clean_up_temp_directory(absolute_path_to_temp_folder)
[ "def", "download_and_extract_dataset", "(", "self", ",", "destination_directory", ":", "str", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "get_dataset_filename", "(", ")", ")", ":", "print", "(", "\"Downloading MUSCIMA++ Dataset......
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset that were manually annotated (140 out of 1000 images).
[ "Downloads", "and", "extracts", "the", "MUSCIMA", "++", "dataset", "along", "with", "the", "images", "from", "the", "CVC", "-", "MUSCIMA", "dataset", "that", "were", "manually", "annotated", "(", "140", "out", "of", "1000", "images", ")", "." ]
python
train
fbradyirl/hikvision
hikvision/api.py
https://github.com/fbradyirl/hikvision/blob/3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6/hikvision/api.py#L117-L163
def get_about(self, element_to_query=None): """ Returns ElementTree containing the result of <host>/System/deviceInfo or if element_to_query is not None, the value of that element """ url = '%s/System/deviceInfo' % self._base _LOGGING.info('url: %s', url) response = requests.get( url, auth=HTTPBasicAuth(self._username, self._password)) _LOGGING.debug('response: %s', response) _LOGGING.debug("status_code %s", response.status_code) if response.status_code != 200: log_response_errors(response) return None if element_to_query is None: return response.text else: try: tree = ElementTree.fromstring(response.text) element_to_query = './/{%s}%s' % ( self._xml_namespace, element_to_query) result = tree.findall(element_to_query) if len(result) > 0: _LOGGING.debug('element_to_query: %s result: %s', element_to_query, result[0]) return result[0].text.strip() else: _LOGGING.error( 'There was a problem finding element: %s', element_to_query) _LOGGING.error('Entire response: %s', response.text) except AttributeError as attib_err: _LOGGING.error('Entire response: %s', response.text) _LOGGING.error( 'There was a problem finding element:' ' %s AttributeError: %s', element_to_query, attib_err) return return
[ "def", "get_about", "(", "self", ",", "element_to_query", "=", "None", ")", ":", "url", "=", "'%s/System/deviceInfo'", "%", "self", ".", "_base", "_LOGGING", ".", "info", "(", "'url: %s'", ",", "url", ")", "response", "=", "requests", ".", "get", "(", "u...
Returns ElementTree containing the result of <host>/System/deviceInfo or if element_to_query is not None, the value of that element
[ "Returns", "ElementTree", "containing", "the", "result", "of", "<host", ">", "/", "System", "/", "deviceInfo", "or", "if", "element_to_query", "is", "not", "None", "the", "value", "of", "that", "element" ]
python
train
carter-j-h/iterable-python-wrapper
iterablepythonwrapper/client.py
https://github.com/carter-j-h/iterable-python-wrapper/blob/10d5db034ddfdfc3333efeee07fc9228b6a998c4/iterablepythonwrapper/client.py#L575-L640
def export_data_json(self, return_response_object, chunk_size=1024, path=None, data_type_name=None, date_range=None, delimiter=None, start_date_time=None, end_date_time=None, omit_fields=None, only_fields=None, campaign_id=None): """ Custom Keyword arguments: 1. return_response_object: if set to 'True', the 'r' response object will be returned. The benefit of this is that you can manipulate the data in any way you want. If set to false, we will write the response to a file where each Iterable activity you're exporting is a single-line JSON object. 2. chunk_size: Chunk size is used as a paremeter in the r.iter_content(chunk_size) method that controls how big the response chunks are (in bytes). Depending on the device used to make the request, this might change depending on the user. Default is set to 1 MB. 3. path: Allows you to choose the directory where the file is downloaded into. Example: "/Users/username/Desktop/" If not set the file will download into the current directory. """ call="/api/export/data.json" # make sure correct ranges are being used date_ranges = ["Today", "Yesterday", "BeforeToday", "All"] if isinstance(return_response_object, bool) is False: raise ValueError("'return_iterator_object'parameter must be a boolean") if chunk_size is not None and isinstance(chunk_size, int): pass else: raise ValueError("'chunk_size' parameter must be a integer") payload={} if data_type_name is not None: payload["dataTypeName"]= data_type_name if date_range is not None and date_range in date_ranges: payload["range"]= date_range if start_date_time is not None: payload["startDateTime"]= start_date_time if end_date_time is not None: payload["endDateTime"]= end_date_time if omit_fields is not None: payload["omitFields"]= omit_fields if only_fields is not None and isinstance(only_fields, list): payload["onlyFields"]= only_fields if campaign_id is not None: payload["campaignId"]= campaign_id return self.export_data_api(call=call, chunk_size=chunk_size, params=payload, path=path, return_response_object=return_response_object)
[ "def", "export_data_json", "(", "self", ",", "return_response_object", ",", "chunk_size", "=", "1024", ",", "path", "=", "None", ",", "data_type_name", "=", "None", ",", "date_range", "=", "None", ",", "delimiter", "=", "None", ",", "start_date_time", "=", "...
Custom Keyword arguments: 1. return_response_object: if set to 'True', the 'r' response object will be returned. The benefit of this is that you can manipulate the data in any way you want. If set to false, we will write the response to a file where each Iterable activity you're exporting is a single-line JSON object. 2. chunk_size: Chunk size is used as a paremeter in the r.iter_content(chunk_size) method that controls how big the response chunks are (in bytes). Depending on the device used to make the request, this might change depending on the user. Default is set to 1 MB. 3. path: Allows you to choose the directory where the file is downloaded into. Example: "/Users/username/Desktop/" If not set the file will download into the current directory.
[ "Custom", "Keyword", "arguments", ":" ]
python
train
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L897-L909
def get_http_boot_url(self): """Request the http boot url from system in uefi boot mode. :returns: URL for http boot :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode. """ if(self._is_boot_mode_uefi() is True): return self._get_bios_setting('UefiShellStartupUrl') else: msg = 'get_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def", "get_http_boot_url", "(", "self", ")", ":", "if", "(", "self", ".", "_is_boot_mode_uefi", "(", ")", "is", "True", ")", ":", "return", "self", ".", "_get_bios_setting", "(", "'UefiShellStartupUrl'", ")", "else", ":", "msg", "=", "'get_http_boot_url is no...
Request the http boot url from system in uefi boot mode. :returns: URL for http boot :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode.
[ "Request", "the", "http", "boot", "url", "from", "system", "in", "uefi", "boot", "mode", "." ]
python
train
ericsuh/dirichlet
dirichlet/dirichlet.py
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L324-L337
def _ipsi(y, tol=1.48e-9, maxiter=10): '''Inverse of psi (digamma) using Newton's method. For the purposes of Dirichlet MLE, since the parameters a[i] must always satisfy a > 0, we define ipsi :: R -> (0,inf).''' y = asanyarray(y, dtype='float') x0 = _piecewise(y, [y >= -2.22, y < -2.22], [(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))]) for i in xrange(maxiter): x1 = x0 - (psi(x0) - y)/_trigamma(x0) if norm(x1 - x0) < tol: return x1 x0 = x1 raise Exception( 'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
[ "def", "_ipsi", "(", "y", ",", "tol", "=", "1.48e-9", ",", "maxiter", "=", "10", ")", ":", "y", "=", "asanyarray", "(", "y", ",", "dtype", "=", "'float'", ")", "x0", "=", "_piecewise", "(", "y", ",", "[", "y", ">=", "-", "2.22", ",", "y", "<"...
Inverse of psi (digamma) using Newton's method. For the purposes of Dirichlet MLE, since the parameters a[i] must always satisfy a > 0, we define ipsi :: R -> (0,inf).
[ "Inverse", "of", "psi", "(", "digamma", ")", "using", "Newton", "s", "method", ".", "For", "the", "purposes", "of", "Dirichlet", "MLE", "since", "the", "parameters", "a", "[", "i", "]", "must", "always", "satisfy", "a", ">", "0", "we", "define", "ipsi"...
python
train
ByteInternet/amqpconsumer
amqpconsumer/events.py
https://github.com/ByteInternet/amqpconsumer/blob/144ab16b3fbba8ad30f8688ae1c58e3a6423b88b/amqpconsumer/events.py#L78-L89
def connect(self): """Connect to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection """ logger.debug('Connecting to %s', self._url) return pika.SelectConnection(pika.URLParameters(self._url), self.on_connection_open, stop_ioloop_on_close=False)
[ "def", "connect", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Connecting to %s'", ",", "self", ".", "_url", ")", "return", "pika", ".", "SelectConnection", "(", "pika", ".", "URLParameters", "(", "self", ".", "_url", ")", ",", "self", ".", "o...
Connect to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection
[ "Connect", "to", "RabbitMQ", "returning", "the", "connection", "handle", "." ]
python
train
tdryer/hangups
hangups/event.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/event.py#L53-L59
async def fire(self, *args, **kwargs): """Fire this event, calling all observers with the same arguments.""" logger.debug('Fired {}'.format(self)) for observer in self._observers: gen = observer(*args, **kwargs) if asyncio.iscoroutinefunction(observer): await gen
[ "async", "def", "fire", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'Fired {}'", ".", "format", "(", "self", ")", ")", "for", "observer", "in", "self", ".", "_observers", ":", "gen", "=", "observe...
Fire this event, calling all observers with the same arguments.
[ "Fire", "this", "event", "calling", "all", "observers", "with", "the", "same", "arguments", "." ]
python
valid
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L678-L738
def make_pixel_mask(steps, shift, default=0, value=1, enable_columns=None, mask=None): '''Generate pixel mask. Parameters ---------- steps : int Number of mask steps, e.g. steps=3 (every third pixel is enabled), steps=336 (one pixel per column), steps=672 (one pixel per double column). shift : int Shift mask by given value to the bottom (towards higher row numbers). From 0 to (steps - 1). default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. enable_columns : list List of columns where the shift mask will be applied. List elements can range from 1 to 80. mask : array_like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked (i.e. invalid) data. Masked pixels will be set to default value. Returns ------- mask_array : numpy.ndarray Mask array. Usage ----- shift_mask = 'enable' steps = 3 # three step mask for mask_step in range(steps): commands = [] commands.extend(self.register.get_commands("ConfMode")) mask_array = make_pixel_mask(steps=steps, step=mask_step) self.register.set_pixel_register_value(shift_mask, mask_array) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=shift_mask)) self.register_utils.send_commands(commands) # do something here ''' shape = (80, 336) # value = np.zeros(dimension, dtype = np.uint8) mask_array = np.full(shape, default, dtype=np.uint8) # FE columns and rows are starting from 1 if enable_columns: odd_columns = [odd - 1 for odd in enable_columns if odd % 2 != 0] even_columns = [even - 1 for even in enable_columns if even % 2 == 0] else: odd_columns = range(0, 80, 2) even_columns = range(1, 80, 2) odd_rows = np.arange(shift % steps, 336, steps) even_row_offset = ((steps // 2) + shift) % steps # // integer devision even_rows = np.arange(even_row_offset, 336, steps) if odd_columns: odd_col_rows = itertools.product(odd_columns, odd_rows) # get any combination of column and row, no for loop needed for odd_col_row in odd_col_rows: mask_array[odd_col_row[0], odd_col_row[1]] = value # advanced indexing if even_columns: even_col_rows = itertools.product(even_columns, even_rows) for even_col_row in even_col_rows: mask_array[even_col_row[0], even_col_row[1]] = value if mask is not None: mask_array = np.ma.array(mask_array, mask=mask, fill_value=default) mask_array = mask_array.filled() return mask_array
[ "def", "make_pixel_mask", "(", "steps", ",", "shift", ",", "default", "=", "0", ",", "value", "=", "1", ",", "enable_columns", "=", "None", ",", "mask", "=", "None", ")", ":", "shape", "=", "(", "80", ",", "336", ")", "# value = np.zeros(dimension, dtype...
Generate pixel mask. Parameters ---------- steps : int Number of mask steps, e.g. steps=3 (every third pixel is enabled), steps=336 (one pixel per column), steps=672 (one pixel per double column). shift : int Shift mask by given value to the bottom (towards higher row numbers). From 0 to (steps - 1). default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. enable_columns : list List of columns where the shift mask will be applied. List elements can range from 1 to 80. mask : array_like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked (i.e. invalid) data. Masked pixels will be set to default value. Returns ------- mask_array : numpy.ndarray Mask array. Usage ----- shift_mask = 'enable' steps = 3 # three step mask for mask_step in range(steps): commands = [] commands.extend(self.register.get_commands("ConfMode")) mask_array = make_pixel_mask(steps=steps, step=mask_step) self.register.set_pixel_register_value(shift_mask, mask_array) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=shift_mask)) self.register_utils.send_commands(commands) # do something here
[ "Generate", "pixel", "mask", ".", "Parameters", "----------", "steps", ":", "int", "Number", "of", "mask", "steps", "e", ".", "g", ".", "steps", "=", "3", "(", "every", "third", "pixel", "is", "enabled", ")", "steps", "=", "336", "(", "one", "pixel", ...
python
train
spacetelescope/synphot_refactor
synphot/spectrum.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/spectrum.py#L242-L248
def _process_generic_param(pval, def_unit, equivalencies=[]): """Process generic model parameter.""" if isinstance(pval, u.Quantity): outval = pval.to(def_unit, equivalencies).value else: # Assume already in desired unit outval = pval return outval
[ "def", "_process_generic_param", "(", "pval", ",", "def_unit", ",", "equivalencies", "=", "[", "]", ")", ":", "if", "isinstance", "(", "pval", ",", "u", ".", "Quantity", ")", ":", "outval", "=", "pval", ".", "to", "(", "def_unit", ",", "equivalencies", ...
Process generic model parameter.
[ "Process", "generic", "model", "parameter", "." ]
python
train
dlancer/django-pages-cms
pages/templatetags/pages_tags.py
https://github.com/dlancer/django-pages-cms/blob/441fad674d5ad4f6e05c953508950525dc0fa789/pages/templatetags/pages_tags.py#L34-L59
def get_page_object_by_name(context, name): """ **Arguments** ``name` name for object selection :return selected object """ selected_object = None try: for obj_type in context['page']['content']: for obj in context['page']['content'][obj_type]: if obj.name == name: selected_object = obj break if selected_object is None: for obj_type in context['page']['content']: for obj in context['page']['ext_content'][obj_type]: if obj.name == name: selected_object = obj break except TypeError: pass return selected_object
[ "def", "get_page_object_by_name", "(", "context", ",", "name", ")", ":", "selected_object", "=", "None", "try", ":", "for", "obj_type", "in", "context", "[", "'page'", "]", "[", "'content'", "]", ":", "for", "obj", "in", "context", "[", "'page'", "]", "[...
**Arguments** ``name` name for object selection :return selected object
[ "**", "Arguments", "**" ]
python
train
chrisjrn/registrasion
registrasion/reporting/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/reporting/views.py#L117-L167
def sales_payment_summary(): ''' Summarises paid items and payments. ''' def value_or_zero(aggregate, key): return aggregate[key] or 0 def sum_amount(payment_set): a = payment_set.values("amount").aggregate(total=Sum("amount")) return value_or_zero(a, "total") headings = ["Category", "Total"] data = [] # Summarise all sales made (= income.) sales = commerce.LineItem.objects.filter( invoice__status=commerce.Invoice.STATUS_PAID, ).values( "price", "quantity" ).aggregate( total=Sum(F("price") * F("quantity"), output_field=CURRENCY()), ) sales = value_or_zero(sales, "total") all_payments = sum_amount(commerce.PaymentBase.objects.all()) # Manual payments # Credit notes generated (total) # Payments made by credit note # Claimed credit notes all_credit_notes = 0 - sum_amount(commerce.CreditNote.objects.all()) unclaimed_credit_notes = 0 - sum_amount(commerce.CreditNote.unclaimed()) claimed_credit_notes = sum_amount( commerce.CreditNoteApplication.objects.all() ) refunded_credit_notes = 0 - sum_amount(commerce.CreditNote.refunded()) data.append(["Items on paid invoices", sales]) data.append(["All payments", all_payments]) data.append(["Sales - Payments ", sales - all_payments]) data.append(["All credit notes", all_credit_notes]) data.append(["Credit notes paid on invoices", claimed_credit_notes]) data.append(["Credit notes refunded", refunded_credit_notes]) data.append(["Unclaimed credit notes", unclaimed_credit_notes]) data.append([ "Credit notes - (claimed credit notes + unclaimed credit notes)", all_credit_notes - claimed_credit_notes - refunded_credit_notes - unclaimed_credit_notes ]) return ListReport("Sales and Payments Summary", headings, data)
[ "def", "sales_payment_summary", "(", ")", ":", "def", "value_or_zero", "(", "aggregate", ",", "key", ")", ":", "return", "aggregate", "[", "key", "]", "or", "0", "def", "sum_amount", "(", "payment_set", ")", ":", "a", "=", "payment_set", ".", "values", "...
Summarises paid items and payments.
[ "Summarises", "paid", "items", "and", "payments", "." ]
python
test
nschloe/pygmsh
pygmsh/built_in/geometry.py
https://github.com/nschloe/pygmsh/blob/1a1a07481aebe6c161b60dd31e0fbe1ddf330d61/pygmsh/built_in/geometry.py#L1006-L1019
def translate(self, input_entity, vector): """Translates input_entity itself by vector. Changes the input object. """ d = {1: "Line", 2: "Surface", 3: "Volume"} self._GMSH_CODE.append( "Translate {{{}}} {{ {}{{{}}}; }}".format( ", ".join([str(co) for co in vector]), d[input_entity.dimension], input_entity.id, ) ) return
[ "def", "translate", "(", "self", ",", "input_entity", ",", "vector", ")", ":", "d", "=", "{", "1", ":", "\"Line\"", ",", "2", ":", "\"Surface\"", ",", "3", ":", "\"Volume\"", "}", "self", ".", "_GMSH_CODE", ".", "append", "(", "\"Translate {{{}}} {{ {}{{...
Translates input_entity itself by vector. Changes the input object.
[ "Translates", "input_entity", "itself", "by", "vector", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/litre/cplusplus.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/litre/cplusplus.py#L320-L325
def _execute(self, code): """Override of litre._execute; sets up variable context before evaluating code """ self.globals['example'] = self.example eval(code, self.globals)
[ "def", "_execute", "(", "self", ",", "code", ")", ":", "self", ".", "globals", "[", "'example'", "]", "=", "self", ".", "example", "eval", "(", "code", ",", "self", ".", "globals", ")" ]
Override of litre._execute; sets up variable context before evaluating code
[ "Override", "of", "litre", ".", "_execute", ";", "sets", "up", "variable", "context", "before", "evaluating", "code" ]
python
train
ellethee/argparseinator
argparseinator/utils.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/utils.py#L174-L215
def get_functarguments(func): """ Recupera gli argomenti dalla funzione stessa. """ argspec = inspect.getargspec(func) if argspec.defaults is not None: args = argspec.args[:-len(argspec.defaults)] kwargs = dict( zip(argspec.args[-len(argspec.defaults):], argspec.defaults)) else: args = argspec.args kwargs = {} if args and args[0] == 'self': args.pop(0) func.__named__ = [] arguments = [] shared = get_shared(func) for arg in args: if has_shared(arg, shared) is not False: continue if has_argument(arg, func.__cls__) is not False: continue arguments.append(([arg], {}, )) func.__named__.append(arg) for key, val in kwargs.items(): if has_shared(key, shared) is not False: continue if has_argument(key, func.__cls__) is not False: continue if isinstance(val, dict): flags = [val.pop('lflag', '--%s' % key)] short = val.pop('flag', None) dest = val.get('dest', key).replace('-', '_') if short: flags.insert(0, short) else: flags = ['--%s' % key] val = dict(default=val) dest = key.replace('-', '_') func.__named__.append(dest) arguments.append((flags, val, )) return arguments
[ "def", "get_functarguments", "(", "func", ")", ":", "argspec", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "argspec", ".", "defaults", "is", "not", "None", ":", "args", "=", "argspec", ".", "args", "[", ":", "-", "len", "(", "argspec", ...
Recupera gli argomenti dalla funzione stessa.
[ "Recupera", "gli", "argomenti", "dalla", "funzione", "stessa", "." ]
python
train
miguelgrinberg/python-socketio
socketio/base_manager.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/base_manager.py#L89-L95
def enter_room(self, sid, namespace, room): """Add a client to a room.""" if namespace not in self.rooms: self.rooms[namespace] = {} if room not in self.rooms[namespace]: self.rooms[namespace][room] = {} self.rooms[namespace][room][sid] = True
[ "def", "enter_room", "(", "self", ",", "sid", ",", "namespace", ",", "room", ")", ":", "if", "namespace", "not", "in", "self", ".", "rooms", ":", "self", ".", "rooms", "[", "namespace", "]", "=", "{", "}", "if", "room", "not", "in", "self", ".", ...
Add a client to a room.
[ "Add", "a", "client", "to", "a", "room", "." ]
python
train
google/openhtf
openhtf/plugs/usb/local_usb.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/local_usb.py#L158-L178
def open(cls, **kwargs): """See iter_open, but raises if multiple or no matches found.""" handle_iter = cls.iter_open(**kwargs) try: handle = six.next(handle_iter) except StopIteration: # No matching interface, raise. raise usb_exceptions.DeviceNotFoundError( 'Open failed with args: %s', kwargs) try: multiple_handle = six.next(handle_iter) except StopIteration: # Exactly one matching device, return it. return handle # We have more than one device, close the ones we opened and bail. handle.close() multiple_handle.close() raise usb_exceptions.MultipleInterfacesFoundError(kwargs)
[ "def", "open", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "handle_iter", "=", "cls", ".", "iter_open", "(", "*", "*", "kwargs", ")", "try", ":", "handle", "=", "six", ".", "next", "(", "handle_iter", ")", "except", "StopIteration", ":", "# No mat...
See iter_open, but raises if multiple or no matches found.
[ "See", "iter_open", "but", "raises", "if", "multiple", "or", "no", "matches", "found", "." ]
python
train
reingart/pyafipws
wslum.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslum.py#L254-L261
def AgregarBalanceLitrosPorcentajesSolidos(self, litros_remitidos, litros_decomisados, kg_grasa, kg_proteina, **kwargs): "Agrega balance litros y porcentajes sólidos a la liq. (obligatorio)" d = {'litrosRemitidos': litros_remitidos, 'litrosDecomisados': litros_decomisados, 'kgGrasa': kg_grasa, 'kgProteina': kg_proteina} self.solicitud['balanceLitrosPorcentajesSolidos'] = d
[ "def", "AgregarBalanceLitrosPorcentajesSolidos", "(", "self", ",", "litros_remitidos", ",", "litros_decomisados", ",", "kg_grasa", ",", "kg_proteina", ",", "*", "*", "kwargs", ")", ":", "d", "=", "{", "'litrosRemitidos'", ":", "litros_remitidos", ",", "'litrosDecomi...
Agrega balance litros y porcentajes sólidos a la liq. (obligatorio)
[ "Agrega", "balance", "litros", "y", "porcentajes", "sólidos", "a", "la", "liq", ".", "(", "obligatorio", ")" ]
python
train
respondcreate/django-versatileimagefield
versatileimagefield/datastructures/base.py
https://github.com/respondcreate/django-versatileimagefield/blob/d41e279c39cccffafbe876c67596184704ae8877/versatileimagefield/datastructures/base.py#L60-L104
def preprocess(self, image, image_format): """ Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: * `image`: a PIL Image instance * `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}). """ save_kwargs = {'format': image_format} # Ensuring image is properly rotated if hasattr(image, '_getexif'): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs['icc_profile'] = image.info.get('icc_profile') if hasattr(self, 'preprocess_%s' % image_format): image, addl_save_kwargs = getattr( self, 'preprocess_%s' % image_format )(image=image) save_kwargs.update(addl_save_kwargs) return image, save_kwargs
[ "def", "preprocess", "(", "self", ",", "image", ",", "image_format", ")", ":", "save_kwargs", "=", "{", "'format'", ":", "image_format", "}", "# Ensuring image is properly rotated", "if", "hasattr", "(", "image", ",", "'_getexif'", ")", ":", "exif_datadict", "="...
Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: * `image`: a PIL Image instance * `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}).
[ "Preprocess", "an", "image", "." ]
python
test
Neurita/boyle
boyle/utils/imports.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/imports.py#L6-L27
def import_pyfile(filepath, mod_name=None): """ Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module """ import sys if sys.version_info.major == 3: import importlib.machinery loader = importlib.machinery.SourceFileLoader('', filepath) mod = loader.load_module(mod_name) else: import imp mod = imp.load_source(mod_name, filepath) return mod
[ "def", "import_pyfile", "(", "filepath", ",", "mod_name", "=", "None", ")", ":", "import", "sys", "if", "sys", ".", "version_info", ".", "major", "==", "3", ":", "import", "importlib", ".", "machinery", "loader", "=", "importlib", ".", "machinery", ".", ...
Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module
[ "Imports", "the", "contents", "of", "filepath", "as", "a", "Python", "module", "." ]
python
valid
apache/incubator-mxnet
python/mxnet/kvstore.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore.py#L565-L603
def _set_updater(self, updater): """Sets a push updater into the store. This function only changes the local store. When running on multiple machines one must use `set_optimizer`. Parameters ---------- updater : function The updater function. Examples -------- >>> def update(key, input, stored): ... print "update on key: %d" % key ... stored += input * 2 >>> kv._set_updater(update) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> kv.push('3', mx.nd.ones(shape)) update on key: 3 >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 6. 6. 6.] [ 6. 6. 6.]] """ self._updater = updater # set updater with int keys _updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._updater_func = _updater_proto(_updater_wrapper(updater)) # set updater with str keys _str_updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._str_updater_func = _str_updater_proto(_updater_wrapper(updater)) check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func, self._str_updater_func, None))
[ "def", "_set_updater", "(", "self", ",", "updater", ")", ":", "self", ".", "_updater", "=", "updater", "# set updater with int keys", "_updater_proto", "=", "ctypes", ".", "CFUNCTYPE", "(", "None", ",", "ctypes", ".", "c_int", ",", "NDArrayHandle", ",", "NDArr...
Sets a push updater into the store. This function only changes the local store. When running on multiple machines one must use `set_optimizer`. Parameters ---------- updater : function The updater function. Examples -------- >>> def update(key, input, stored): ... print "update on key: %d" % key ... stored += input * 2 >>> kv._set_updater(update) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> kv.push('3', mx.nd.ones(shape)) update on key: 3 >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 6. 6. 6.] [ 6. 6. 6.]]
[ "Sets", "a", "push", "updater", "into", "the", "store", "." ]
python
train
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L364-L381
def finalize(self): """Shutdown the library and clean up the model. Note that the Fortran library's cleanup code is not up to snuff yet, so the cleanup is not perfect. Note also that the working directory is changed back to the original one. """ self.library.finalize.argtypes = [] self.library.finalize.restype = c_int ierr = wrap(self.library.finalize)() # always go back to previous directory logger.info('cd {}'.format(self.original_dir)) # This one doesn't work. os.chdir(self.original_dir) if ierr: errormsg = "Finalizing model {engine} failed with exit code {code}" raise RuntimeError(errormsg.format(engine=self.engine, code=ierr))
[ "def", "finalize", "(", "self", ")", ":", "self", ".", "library", ".", "finalize", ".", "argtypes", "=", "[", "]", "self", ".", "library", ".", "finalize", ".", "restype", "=", "c_int", "ierr", "=", "wrap", "(", "self", ".", "library", ".", "finalize...
Shutdown the library and clean up the model. Note that the Fortran library's cleanup code is not up to snuff yet, so the cleanup is not perfect. Note also that the working directory is changed back to the original one.
[ "Shutdown", "the", "library", "and", "clean", "up", "the", "model", "." ]
python
train
TheRealLink/pylgtv
pylgtv/webos_client.py
https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L146-L172
def _command(self, msg): """Send a command to the tv.""" logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port)); try: websocket = yield from websockets.connect( "ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect) except: logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port)); return False logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port)); try: yield from self._send_register_payload(websocket) if not self.client_key: raise PyLGTVPairException("Unable to pair") yield from websocket.send(json.dumps(msg)) if msg['type'] == 'request': raw_response = yield from websocket.recv() self.last_response = json.loads(raw_response) finally: logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port)); yield from websocket.close()
[ "def", "_command", "(", "self", ",", "msg", ")", ":", "logger", ".", "debug", "(", "'send command to %s'", ",", "\"ws://{}:{}\"", ".", "format", "(", "self", ".", "ip", ",", "self", ".", "port", ")", ")", "try", ":", "websocket", "=", "yield", "from", ...
Send a command to the tv.
[ "Send", "a", "command", "to", "the", "tv", "." ]
python
train
guaix-ucm/numina
numina/array/interpolation.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L123-L128
def _create_s(y, h): """Estimate secants""" s = np.zeros_like(y) s[:-1] = (y[1:] - y[:-1]) / h[:-1] s[-1] = 0.0 return s
[ "def", "_create_s", "(", "y", ",", "h", ")", ":", "s", "=", "np", ".", "zeros_like", "(", "y", ")", "s", "[", ":", "-", "1", "]", "=", "(", "y", "[", "1", ":", "]", "-", "y", "[", ":", "-", "1", "]", ")", "/", "h", "[", ":", "-", "1...
Estimate secants
[ "Estimate", "secants" ]
python
train
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L11706-L11733
def list_namespaced_config_map(self, namespace, **kwargs): """ list or watch objects of kind ConfigMap This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_config_map(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ConfigMapList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_config_map_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_config_map_with_http_info(namespace, **kwargs) return data
[ "def", "list_namespaced_config_map", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_namespa...
list or watch objects of kind ConfigMap This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_config_map(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ConfigMapList If the method is called asynchronously, returns the request thread.
[ "list", "or", "watch", "objects", "of", "kind", "ConfigMap", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "th...
python
train
mlenzen/collections-extended
collections_extended/range_map.py
https://github.com/mlenzen/collections-extended/blob/ee9e86f6bbef442dbebcb3a5970642c5c969e2cf/collections_extended/range_map.py#L330-L346
def delete(self, start=None, stop=None): """Delete the range from start to stop from self. Raises: KeyError: If part of the passed range isn't mapped. """ _check_start_stop(start, stop) start_loc = self._bisect_right(start) - 1 if stop is None: stop_loc = len(self._keys) else: stop_loc = self._bisect_left(stop) for value in self._values[start_loc:stop_loc]: if value is NOT_SET: raise KeyError((start, stop)) # this is inefficient, we've already found the sub ranges self.set(NOT_SET, start=start, stop=stop)
[ "def", "delete", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "_check_start_stop", "(", "start", ",", "stop", ")", "start_loc", "=", "self", ".", "_bisect_right", "(", "start", ")", "-", "1", "if", "stop", "is", "None",...
Delete the range from start to stop from self. Raises: KeyError: If part of the passed range isn't mapped.
[ "Delete", "the", "range", "from", "start", "to", "stop", "from", "self", "." ]
python
train
Cog-Creators/Red-Lavalink
lavalink/utils.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/utils.py#L1-L6
def format_time(time): """ Formats the given time into HH:MM:SS """ h, r = divmod(time / 1000, 3600) m, s = divmod(r, 60) return "%02d:%02d:%02d" % (h, m, s)
[ "def", "format_time", "(", "time", ")", ":", "h", ",", "r", "=", "divmod", "(", "time", "/", "1000", ",", "3600", ")", "m", ",", "s", "=", "divmod", "(", "r", ",", "60", ")", "return", "\"%02d:%02d:%02d\"", "%", "(", "h", ",", "m", ",", "s", ...
Formats the given time into HH:MM:SS
[ "Formats", "the", "given", "time", "into", "HH", ":", "MM", ":", "SS" ]
python
train
e7dal/bubble3
behave4cmd0/textutil.py
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/textutil.py#L164-L172
def text_remove_empty_lines(text): """ Whitespace normalization: - Strip empty lines - Strip trailing whitespace """ lines = [ line.rstrip() for line in text.splitlines() if line.strip() ] return "\n".join(lines)
[ "def", "text_remove_empty_lines", "(", "text", ")", ":", "lines", "=", "[", "line", ".", "rstrip", "(", ")", "for", "line", "in", "text", ".", "splitlines", "(", ")", "if", "line", ".", "strip", "(", ")", "]", "return", "\"\\n\"", ".", "join", "(", ...
Whitespace normalization: - Strip empty lines - Strip trailing whitespace
[ "Whitespace", "normalization", ":" ]
python
train
hydpy-dev/hydpy
hydpy/auxs/iuhtools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/iuhtools.py#L195-L208
def delay_response_series(self): """A tuple of two numpy arrays, which hold the time delays and the associated iuh values respectively.""" delays = [] responses = [] sum_responses = 0. for t in itertools.count(self.dt_response/2., self.dt_response): delays.append(t) response = self(t) responses.append(response) sum_responses += self.dt_response*response if (sum_responses > .9) and (response < self.smallest_response): break return numpy.array(delays), numpy.array(responses)
[ "def", "delay_response_series", "(", "self", ")", ":", "delays", "=", "[", "]", "responses", "=", "[", "]", "sum_responses", "=", "0.", "for", "t", "in", "itertools", ".", "count", "(", "self", ".", "dt_response", "/", "2.", ",", "self", ".", "dt_respo...
A tuple of two numpy arrays, which hold the time delays and the associated iuh values respectively.
[ "A", "tuple", "of", "two", "numpy", "arrays", "which", "hold", "the", "time", "delays", "and", "the", "associated", "iuh", "values", "respectively", "." ]
python
train
secdev/scapy
scapy/utils6.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils6.py#L282-L338
def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2): """ Generate a Link-Scoped Multicast Address as described in RFC 4489. Returned value is in printable notation. 'addr' parameter specifies the link-local address to use for generating Link-scoped multicast address IID. By default, the function returns a ::/96 prefix (aka last 32 bits of returned address are null). If a group id is provided through 'grpid' parameter, last 32 bits of the address are set to that value (accepted formats : b'\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896). By default, generated address scope is Link-Local (2). That value can be modified by passing a specific 'scope' value as an argument of the function. RFC 4489 only authorizes scope values <= 2. Enforcement is performed by the function (None will be returned). If no link-local address can be used to generate the Link-Scoped IPv6 Multicast address, or if another error occurs, None is returned. """ if scope not in [0, 1, 2]: return None try: if not in6_islladdr(addr): return None addr = inet_pton(socket.AF_INET6, addr) except Exception: warning("in6_getLinkScopedMcastPrefix(): Invalid address provided") return None iid = addr[8:] if grpid is None: grpid = b'\x00\x00\x00\x00' else: if isinstance(grpid, (bytes, str)): if len(grpid) == 8: try: grpid = int(grpid, 16) & 0xffffffff except Exception: warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") # noqa: E501 return None elif len(grpid) == 4: try: grpid = struct.unpack("!I", grpid)[0] except Exception: warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") # noqa: E501 return None grpid = struct.pack("!I", grpid) flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope)) plen = b'\xff' res = b'\x00' a = b'\xff' + flgscope + res + plen + iid + grpid return inet_ntop(socket.AF_INET6, a)
[ "def", "in6_getLinkScopedMcastAddr", "(", "addr", ",", "grpid", "=", "None", ",", "scope", "=", "2", ")", ":", "if", "scope", "not", "in", "[", "0", ",", "1", ",", "2", "]", ":", "return", "None", "try", ":", "if", "not", "in6_islladdr", "(", "addr...
Generate a Link-Scoped Multicast Address as described in RFC 4489. Returned value is in printable notation. 'addr' parameter specifies the link-local address to use for generating Link-scoped multicast address IID. By default, the function returns a ::/96 prefix (aka last 32 bits of returned address are null). If a group id is provided through 'grpid' parameter, last 32 bits of the address are set to that value (accepted formats : b'\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896). By default, generated address scope is Link-Local (2). That value can be modified by passing a specific 'scope' value as an argument of the function. RFC 4489 only authorizes scope values <= 2. Enforcement is performed by the function (None will be returned). If no link-local address can be used to generate the Link-Scoped IPv6 Multicast address, or if another error occurs, None is returned.
[ "Generate", "a", "Link", "-", "Scoped", "Multicast", "Address", "as", "described", "in", "RFC", "4489", ".", "Returned", "value", "is", "in", "printable", "notation", "." ]
python
train
PaulHancock/Aegean
AegeanTools/fitting.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L228-L279
def lmfit_jacobian(pars, x, y, errs=None, B=None, emp=False): """ Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian` which gives the output in a format that is required for lmfit. Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated errs : list a vector of 1\sigma errors (optional). Default = None B : 2d-array a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix` emp : bool If true the use the empirical Jacobian, otherwise use the analytical one. Default = False. Returns ------- j : 2d-array A Jacobian. See Also -------- :func:`AegeanTools.fitting.Bmatrix` :func:`AegeanTools.fitting.jacobian` :func:`AegeanTools.fitting.emp_jacobian` """ if emp: matrix = emp_jacobian(pars, x, y) else: # calculate in the normal way matrix = jacobian(pars, x, y) # now munge this to be as expected for lmfit matrix = np.vstack(matrix) if errs is not None: matrix /= errs # matrix = matrix.dot(errs) if B is not None: matrix = matrix.dot(B) matrix = np.transpose(matrix) return matrix
[ "def", "lmfit_jacobian", "(", "pars", ",", "x", ",", "y", ",", "errs", "=", "None", ",", "B", "=", "None", ",", "emp", "=", "False", ")", ":", "if", "emp", ":", "matrix", "=", "emp_jacobian", "(", "pars", ",", "x", ",", "y", ")", "else", ":", ...
Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian` which gives the output in a format that is required for lmfit. Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated errs : list a vector of 1\sigma errors (optional). Default = None B : 2d-array a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix` emp : bool If true the use the empirical Jacobian, otherwise use the analytical one. Default = False. Returns ------- j : 2d-array A Jacobian. See Also -------- :func:`AegeanTools.fitting.Bmatrix` :func:`AegeanTools.fitting.jacobian` :func:`AegeanTools.fitting.emp_jacobian`
[ "Wrapper", "around", ":", "func", ":", "AegeanTools", ".", "fitting", ".", "jacobian", "and", ":", "func", ":", "AegeanTools", ".", "fitting", ".", "emp_jacobian", "which", "gives", "the", "output", "in", "a", "format", "that", "is", "required", "for", "lm...
python
train