repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L1875-L1893
def get_dtext(value): """ dtext = <printable ascii except \ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any ASCII other than the RFC defined printable ASCII an NonPrintableDefect is added to the token's defects list. Quoted pairs are converted to their unquoted values, so what is returned is a ptext token, in this case a ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is added to the returned token's defect list. """ ptext, value, had_qp = _get_ptext_to_endchars(value, '[]') ptext = ValueTerminal(ptext, 'ptext') if had_qp: ptext.defects.append(errors.ObsoleteHeaderDefect( "quoted printable found in domain-literal")) _validate_xtext(ptext) return ptext, value
[ "def", "get_dtext", "(", "value", ")", ":", "ptext", ",", "value", ",", "had_qp", "=", "_get_ptext_to_endchars", "(", "value", ",", "'[]'", ")", "ptext", "=", "ValueTerminal", "(", "ptext", ",", "'ptext'", ")", "if", "had_qp", ":", "ptext", ".", "defects...
dtext = <printable ascii except \ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any ASCII other than the RFC defined printable ASCII an NonPrintableDefect is added to the token's defects list. Quoted pairs are converted to their unquoted values, so what is returned is a ptext token, in this case a ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is added to the returned token's defect list.
[ "dtext", "=", "<printable", "ascii", "except", "\\", "[", "]", ">", "/", "obs", "-", "dtext", "obs", "-", "dtext", "=", "obs", "-", "NO", "-", "WS", "-", "CTL", "/", "quoted", "-", "pair" ]
python
train
43.894737
jealous/stockstats
stockstats.py
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L270-L298
def _get_rsi(cls, df, n_days): """ Calculate the RSI (Relative Strength Index) within N days calculated based on the formula at: https://en.wikipedia.org/wiki/Relative_strength_index :param df: data :param n_days: N days :return: None """ n_days = int(n_days) d = df['close_-1_d'] df['closepm'] = (d + d.abs()) / 2 df['closenm'] = (-d + d.abs()) / 2 closepm_smma_column = 'closepm_{}_smma'.format(n_days) closenm_smma_column = 'closenm_{}_smma'.format(n_days) p_ema = df[closepm_smma_column] n_ema = df[closenm_smma_column] rs_column_name = 'rs_{}'.format(n_days) rsi_column_name = 'rsi_{}'.format(n_days) df[rs_column_name] = rs = p_ema / n_ema df[rsi_column_name] = 100 - 100 / (1.0 + rs) del df['closepm'] del df['closenm'] del df[closepm_smma_column] del df[closenm_smma_column]
[ "def", "_get_rsi", "(", "cls", ",", "df", ",", "n_days", ")", ":", "n_days", "=", "int", "(", "n_days", ")", "d", "=", "df", "[", "'close_-1_d'", "]", "df", "[", "'closepm'", "]", "=", "(", "d", "+", "d", ".", "abs", "(", ")", ")", "/", "2", ...
Calculate the RSI (Relative Strength Index) within N days calculated based on the formula at: https://en.wikipedia.org/wiki/Relative_strength_index :param df: data :param n_days: N days :return: None
[ "Calculate", "the", "RSI", "(", "Relative", "Strength", "Index", ")", "within", "N", "days", "calculated", "based", "on", "the", "formula", "at", ":", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Relative_strength_index", ":",...
python
train
33.482759
bennylope/smartystreets.py
smartystreets/client.py
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L186-L200
def street_address(self, address): """ Geocode one and only address, get a single Address object back >>> client.street_address("100 Main St, Anywhere, USA") >>> client.street_address({"street": "100 Main St, anywhere USA"}) :param address: string or dictionary with street address information :return: an Address object or None for no match """ address = self.street_addresses([address]) if not len(address): return None return Address(address[0])
[ "def", "street_address", "(", "self", ",", "address", ")", ":", "address", "=", "self", ".", "street_addresses", "(", "[", "address", "]", ")", "if", "not", "len", "(", "address", ")", ":", "return", "None", "return", "Address", "(", "address", "[", "0...
Geocode one and only address, get a single Address object back >>> client.street_address("100 Main St, Anywhere, USA") >>> client.street_address({"street": "100 Main St, anywhere USA"}) :param address: string or dictionary with street address information :return: an Address object or None for no match
[ "Geocode", "one", "and", "only", "address", "get", "a", "single", "Address", "object", "back" ]
python
train
35.266667
rwl/pylon
pylon/io/excel.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/excel.py#L56-L63
def write_bus_data(self, file): """ Writes bus data to an Excel spreadsheet. """ bus_sheet = self.book.add_sheet("Buses") for i, bus in enumerate(self.case.buses): for j, attr in enumerate(BUS_ATTRS): bus_sheet.write(i, j, getattr(bus, attr))
[ "def", "write_bus_data", "(", "self", ",", "file", ")", ":", "bus_sheet", "=", "self", ".", "book", ".", "add_sheet", "(", "\"Buses\"", ")", "for", "i", ",", "bus", "in", "enumerate", "(", "self", ".", "case", ".", "buses", ")", ":", "for", "j", ",...
Writes bus data to an Excel spreadsheet.
[ "Writes", "bus", "data", "to", "an", "Excel", "spreadsheet", "." ]
python
train
37
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L175-L196
def _get_rule_definition(self, rule): """Generates the source code for a rule.""" fmt = """def {rule_fxn_name}(self, text): {indent}\"\"\"{rule_source}\"\"\" {indent}self._attempting(text) {indent}return {rule_definition}(text){transform} """ fmt = self._clean_fmt(fmt) source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True) # All the primitives will accept a string x in place of terminal(x). This is terminal shorthand. # However, if a rule is only a wrapper around a single terminal, we have to actually make a # terminal call. This handles that situation. if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')): source = ["terminal({})".format(source[0])] rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name), indent=self.indent, rule_source=self._get_rule_source(rule), rule_definition="\n".join(source), transform=self._get_rule_transform(rule)) return self._indent(rule_source, 1)
[ "def", "_get_rule_definition", "(", "self", ",", "rule", ")", ":", "fmt", "=", "\"\"\"def {rule_fxn_name}(self, text):\n {indent}\\\"\\\"\\\"{rule_source}\\\"\\\"\\\"\n {indent}self._attempting(text)\n {indent}return {rule_definition}(text){transform}\n ...
Generates the source code for a rule.
[ "Generates", "the", "source", "code", "for", "a", "rule", "." ]
python
test
52.681818
pywavefront/PyWavefront
pywavefront/cache.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/cache.py#L102-L116
def _load_vertex_buffers(self): """Load each vertex buffer into each material""" fd = gzip.open(cache_name(self.file_name), 'rb') for buff in self.meta.vertex_buffers: mat = self.wavefront.materials.get(buff['material']) if not mat: mat = Material(name=buff['material'], is_default=True) self.wavefront.materials[mat.name] = mat mat.vertex_format = buff['vertex_format'] self.load_vertex_buffer(fd, mat, buff['byte_length']) fd.close()
[ "def", "_load_vertex_buffers", "(", "self", ")", ":", "fd", "=", "gzip", ".", "open", "(", "cache_name", "(", "self", ".", "file_name", ")", ",", "'rb'", ")", "for", "buff", "in", "self", ".", "meta", ".", "vertex_buffers", ":", "mat", "=", "self", "...
Load each vertex buffer into each material
[ "Load", "each", "vertex", "buffer", "into", "each", "material" ]
python
train
35.8
3ll3d00d/vibe
backend/src/recorder/resources/measurements.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/recorder/resources/measurements.py#L163-L183
def execute(self, duration): """ Executes the measurement, recording the event status. :param duration: the time to run for. :return: nothing. """ self.statuses.append({'name': ScheduledMeasurementStatus.RUNNING.name, 'time': datetime.utcnow()}) try: self.recording = True self.device.start(self.name, durationInSeconds=duration) finally: self.recording = False if self.device.status == RecordingDeviceStatus.FAILED: self.statuses.append({'name': ScheduledMeasurementStatus.FAILED.name, 'time': datetime.utcnow(), 'reason': self.device.failureCode}) else: self.statuses.append({'name': ScheduledMeasurementStatus.COMPLETE.name, 'time': datetime.utcnow()}) # this is a bit of a hack, need to remove this at some point by refactoring the way measurements are stored if self.callback is not None: self.callback()
[ "def", "execute", "(", "self", ",", "duration", ")", ":", "self", ".", "statuses", ".", "append", "(", "{", "'name'", ":", "ScheduledMeasurementStatus", ".", "RUNNING", ".", "name", ",", "'time'", ":", "datetime", ".", "utcnow", "(", ")", "}", ")", "tr...
Executes the measurement, recording the event status. :param duration: the time to run for. :return: nothing.
[ "Executes", "the", "measurement", "recording", "the", "event", "status", ".", ":", "param", "duration", ":", "the", "time", "to", "run", "for", ".", ":", "return", ":", "nothing", "." ]
python
train
48.809524
pypa/pipenv
pipenv/vendor/jinja2/runtime.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/runtime.py#L219-L221
def get_exported(self): """Get a new dict with the exported variables.""" return dict((k, self.vars[k]) for k in self.exported_vars)
[ "def", "get_exported", "(", "self", ")", ":", "return", "dict", "(", "(", "k", ",", "self", ".", "vars", "[", "k", "]", ")", "for", "k", "in", "self", ".", "exported_vars", ")" ]
Get a new dict with the exported variables.
[ "Get", "a", "new", "dict", "with", "the", "exported", "variables", "." ]
python
train
48.666667
ubernostrum/django-contact-form
src/contact_form/forms.py
https://github.com/ubernostrum/django-contact-form/blob/7dd8491cd75fc39d0edfb2c14c270a130d5afa9e/src/contact_form/forms.py#L70-L90
def get_context(self): """ Return the context used to render the templates for the email subject and body. By default, this context includes: * All of the validated values in the form, as variables of the same names as their fields. * The current ``Site`` object, as the variable ``site``. * Any additional variables added by context processors (this will be a ``RequestContext``). """ if not self.is_valid(): raise ValueError( "Cannot generate Context from invalid contact form" ) return dict(self.cleaned_data, site=get_current_site(self.request))
[ "def", "get_context", "(", "self", ")", ":", "if", "not", "self", ".", "is_valid", "(", ")", ":", "raise", "ValueError", "(", "\"Cannot generate Context from invalid contact form\"", ")", "return", "dict", "(", "self", ".", "cleaned_data", ",", "site", "=", "g...
Return the context used to render the templates for the email subject and body. By default, this context includes: * All of the validated values in the form, as variables of the same names as their fields. * The current ``Site`` object, as the variable ``site``. * Any additional variables added by context processors (this will be a ``RequestContext``).
[ "Return", "the", "context", "used", "to", "render", "the", "templates", "for", "the", "email", "subject", "and", "body", "." ]
python
train
32.095238
StackStorm/pybind
pybind/slxos/v17s_1_02/adj_neighbor_entries_state/adj_neighbor/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/adj_neighbor_entries_state/adj_neighbor/__init__.py#L158-L181
def _set_adj_type(self, v, load=False): """ Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type) If this variable is read-only (config: false) in the source YANG file, then _set_adj_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adj_type() directly. YANG Description: Type of ISIS Adjacency """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """adj_type must be of a type compatible with isis-adj-type""", 'defined-type': "brocade-isis-operational:isis-adj-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)""", }) self.__adj_type = t if hasattr(self, '_set'): self._set()
[ "def", "_set_adj_type", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base"...
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type) If this variable is read-only (config: false) in the source YANG file, then _set_adj_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adj_type() directly. YANG Description: Type of ISIS Adjacency
[ "Setter", "method", "for", "adj_type", "mapped", "from", "YANG", "variable", "/", "adj_neighbor_entries_state", "/", "adj_neighbor", "/", "adj_type", "(", "isis", "-", "adj", "-", "type", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config...
python
train
90.208333
criteo/gourde
gourde/gourde.py
https://github.com/criteo/gourde/blob/9a274e534a2af5d2b2a5e99f10c59010adb94863/gourde/gourde.py#L307-L335
def run_with_gunicorn(self, **options): """Run with gunicorn.""" import gunicorn.app.base from gunicorn.six import iteritems import multiprocessing class GourdeApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(GourdeApplication, self).__init__() def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application options = { 'bind': '%s:%s' % (self.host, self.port), 'workers': self.threads or ((multiprocessing.cpu_count() * 2) + 1), 'debug': self.debug, **options, } GourdeApplication(self.app, options).run()
[ "def", "run_with_gunicorn", "(", "self", ",", "*", "*", "options", ")", ":", "import", "gunicorn", ".", "app", ".", "base", "from", "gunicorn", ".", "six", "import", "iteritems", "import", "multiprocessing", "class", "GourdeApplication", "(", "gunicorn", ".", ...
Run with gunicorn.
[ "Run", "with", "gunicorn", "." ]
python
train
36.758621
liip/taxi
taxi/timesheet/parser.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/parser.py#L189-L260
def create_entry_line_from_text(self, text): """ Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry` object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`. """ split_line = re.match(self.entry_line_regexp, text) if not split_line: raise ParseError("Line must have an alias, a duration and a description") alias = split_line.group('alias') start_time = end_time = None if split_line.group('start_time') is not None: if split_line.group('start_time'): try: start_time = create_time_from_text(split_line.group('start_time')) except ValueError: raise ParseError("Start time is not a valid time, it must be in format hh:mm or hhmm") else: start_time = None if split_line.group('end_time') is not None: if split_line.group('end_time') == '?': end_time = None else: try: end_time = create_time_from_text(split_line.group('end_time')) except ValueError: raise ParseError("End time is not a valid time, it must be in format hh:mm or hhmm") if split_line.group('duration') is not None: duration = float(split_line.group('duration')) elif start_time or end_time: duration = (start_time, end_time) else: duration = (None, None) description = split_line.group('description') # Parse and set line flags if split_line.group('flags'): try: flags = self.extract_flags_from_text(split_line.group('flags')) # extract_flags_from_text will raise `KeyError` if one of the flags is not recognized. This should never # happen though as the list of accepted flags is bundled in self.entry_line_regexp except KeyError as e: raise ParseError(*e.args) else: flags = set() # Backwards compatibility with previous notation that allowed to end the alias with a `?` to ignore it if alias.endswith('?'): flags.add(Entry.FLAG_IGNORED) alias = alias[:-1] if description == '?': flags.add(Entry.FLAG_IGNORED) line = ( split_line.group('flags') or '', split_line.group('spacing1') or '', split_line.group('alias'), split_line.group('spacing2'), split_line.group('time'), split_line.group('spacing3'), split_line.group('description'), ) entry_line = Entry(alias, duration, description, flags=flags, text=line) return entry_line
[ "def", "create_entry_line_from_text", "(", "self", ",", "text", ")", ":", "split_line", "=", "re", ".", "match", "(", "self", ".", "entry_line_regexp", ",", "text", ")", "if", "not", "split_line", ":", "raise", "ParseError", "(", "\"Line must have an alias, a du...
Try to parse the given text line and extract and entry. Return an :class:`~taxi.timesheet.lines.Entry` object if parsing is successful, otherwise raise :exc:`~taxi.exceptions.ParseError`.
[ "Try", "to", "parse", "the", "given", "text", "line", "and", "extract", "and", "entry", ".", "Return", "an", ":", "class", ":", "~taxi", ".", "timesheet", ".", "lines", ".", "Entry", "object", "if", "parsing", "is", "successful", "otherwise", "raise", ":...
python
train
38.708333
saltstack/salt
salt/modules/boto_s3_bucket.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3_bucket.py#L413-L446
def list_objects(Bucket, Delimiter=None, EncodingType=None, Prefix=None, FetchOwner=False, StartAfter=None, region=None, key=None, keyid=None, profile=None): ''' List objects in a given S3 bucket. Returns a list of objects. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.list_objects mybucket ''' try: Contents = [] args = {'Bucket': Bucket, 'FetchOwner': FetchOwner} args.update({'Delimiter': Delimiter}) if Delimiter else None args.update({'EncodingType': EncodingType}) if Delimiter else None args.update({'Prefix': Prefix}) if Prefix else None args.update({'StartAfter': StartAfter}) if StartAfter else None conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) IsTruncated = True while IsTruncated: ret = conn.list_objects_v2(**args) IsTruncated = ret.get('IsTruncated', False) if IsTruncated in ('True', 'true', True): args['ContinuationToken'] = ret['NextContinuationToken'] Contents += ret.get('Contents', []) return {'Contents': Contents} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "list_objects", "(", "Bucket", ",", "Delimiter", "=", "None", ",", "EncodingType", "=", "None", ",", "Prefix", "=", "None", ",", "FetchOwner", "=", "False", ",", "StartAfter", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ","...
List objects in a given S3 bucket. Returns a list of objects. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.list_objects mybucket
[ "List", "objects", "in", "a", "given", "S3", "bucket", "." ]
python
train
36.882353
oemof/oemof.db
oemof/db/coastdat.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L131-L150
def create_single_weather(df, rename_dc): """Create an oemof weather object for the given geometry""" my_weather = weather.FeedinWeather() data_height = {} name = None # Create a pandas.DataFrame with the time series of the weather data set weather_df = pd.DataFrame(index=df.time_series.iloc[0].index) for row in df.iterrows(): key = rename_dc[row[1].type] weather_df[key] = row[1].time_series data_height[key] = row[1].height if not np.isnan(row[1].height) else 0 name = row[1].gid my_weather.data = weather_df my_weather.timezone = weather_df.index.tz my_weather.longitude = df.geom_point.iloc[0].x my_weather.latitude = df.geom_point.iloc[0].y my_weather.geometry = df.geom_point.iloc[0] my_weather.data_height = data_height my_weather.name = name return my_weather
[ "def", "create_single_weather", "(", "df", ",", "rename_dc", ")", ":", "my_weather", "=", "weather", ".", "FeedinWeather", "(", ")", "data_height", "=", "{", "}", "name", "=", "None", "# Create a pandas.DataFrame with the time series of the weather data set", "weather_d...
Create an oemof weather object for the given geometry
[ "Create", "an", "oemof", "weather", "object", "for", "the", "given", "geometry" ]
python
train
42.1
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/interengine/bintree.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/interengine/bintree.py#L242-L244
def allreduce(self, f, value, flat=True): """parallel reduce followed by broadcast of the result""" return self.reduce(f, value, flat=flat, all=True)
[ "def", "allreduce", "(", "self", ",", "f", ",", "value", ",", "flat", "=", "True", ")", ":", "return", "self", ".", "reduce", "(", "f", ",", "value", ",", "flat", "=", "flat", ",", "all", "=", "True", ")" ]
parallel reduce followed by broadcast of the result
[ "parallel", "reduce", "followed", "by", "broadcast", "of", "the", "result" ]
python
test
54.333333
brainiak/brainiak
brainiak/utils/utils.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L558-L631
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff): """ Utility called by gen_design. It reads in one or more stimulus timing file comforming to AFNI style, and return a list (size of ``[number of runs \\* number of conditions]``) of dictionary including onsets, durations and weights of each event. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files should follow the style of AFNI stimulus timing files, refer to gen_design. n_C: integer, number of task conditions n_S: integer, number of scans scan_onoff: list of numbers. The onset of each scan after concatenating all scans, together with the offset of the last scan. For example, if 3 scans of duration 100s, 150s, 120s are run, scan_onoff is [0, 100, 250, 370] Returns ------- design_info: list of stimulus information The first level of the list correspond to different scans. The second level of the list correspond to different conditions. Each item in the list is a dictiornary with keys "onset", "duration" and "weight". If one condition includes no event in a scan, the values of these keys in that scan of the condition are empty lists. See also -------- gen_design """ design_info = [[{'onset': [], 'duration': [], 'weight': []} for i_c in range(n_C)] for i_s in range(n_S)] # Read stimulus timing files for i_c in range(n_C): with open(stimtime_files[i_c]) as f: text = f.readlines() assert len(text) == n_S, \ 'Number of lines does not match number of runs!' for i_s, line in enumerate(text): events = line.strip().split() if events[0] == '*': continue for event in events: assert event != '*' tmp = str.split(event, ':') if len(tmp) == 2: duration = float(tmp[1]) else: duration = 1.0 tmp = str.split(tmp[0], '*') if len(tmp) == 2: weight = float(tmp[1]) else: weight = 1.0 if (float(tmp[0]) >= 0 and float(tmp[0]) < scan_onoff[i_s + 1] - scan_onoff[i_s]): design_info[i_s][i_c]['onset'].append(float(tmp[0])) design_info[i_s][i_c]['duration'].append(duration) design_info[i_s][i_c]['weight'].append(weight) return design_info
[ "def", "_read_stimtime_AFNI", "(", "stimtime_files", ",", "n_C", ",", "n_S", ",", "scan_onoff", ")", ":", "design_info", "=", "[", "[", "{", "'onset'", ":", "[", "]", ",", "'duration'", ":", "[", "]", ",", "'weight'", ":", "[", "]", "}", "for", "i_c"...
Utility called by gen_design. It reads in one or more stimulus timing file comforming to AFNI style, and return a list (size of ``[number of runs \\* number of conditions]``) of dictionary including onsets, durations and weights of each event. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files should follow the style of AFNI stimulus timing files, refer to gen_design. n_C: integer, number of task conditions n_S: integer, number of scans scan_onoff: list of numbers. The onset of each scan after concatenating all scans, together with the offset of the last scan. For example, if 3 scans of duration 100s, 150s, 120s are run, scan_onoff is [0, 100, 250, 370] Returns ------- design_info: list of stimulus information The first level of the list correspond to different scans. The second level of the list correspond to different conditions. Each item in the list is a dictiornary with keys "onset", "duration" and "weight". If one condition includes no event in a scan, the values of these keys in that scan of the condition are empty lists. See also -------- gen_design
[ "Utility", "called", "by", "gen_design", ".", "It", "reads", "in", "one", "or", "more", "stimulus", "timing", "file", "comforming", "to", "AFNI", "style", "and", "return", "a", "list", "(", "size", "of", "[", "number", "of", "runs", "\\\\", "*", "number"...
python
train
38.175676
glormph/msstitch
src/app/actions/mzidtsv/proteingrouping.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingrouping.py#L57-L68
def count_protein_group_hits(lineproteins, groups): """Takes a list of protein accessions and a list of protein groups content from DB. Counts for each group in list how many proteins are found in lineproteins. Returns list of str amounts. """ hits = [] for group in groups: hits.append(0) for protein in lineproteins: if protein in group: hits[-1] += 1 return [str(x) for x in hits]
[ "def", "count_protein_group_hits", "(", "lineproteins", ",", "groups", ")", ":", "hits", "=", "[", "]", "for", "group", "in", "groups", ":", "hits", ".", "append", "(", "0", ")", "for", "protein", "in", "lineproteins", ":", "if", "protein", "in", "group"...
Takes a list of protein accessions and a list of protein groups content from DB. Counts for each group in list how many proteins are found in lineproteins. Returns list of str amounts.
[ "Takes", "a", "list", "of", "protein", "accessions", "and", "a", "list", "of", "protein", "groups", "content", "from", "DB", ".", "Counts", "for", "each", "group", "in", "list", "how", "many", "proteins", "are", "found", "in", "lineproteins", ".", "Returns...
python
train
37
jantman/awslimitchecker
awslimitchecker/services/rds.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/rds.py#L274-L294
def _update_limits_from_api(self): """ Query RDS's DescribeAccountAttributes API action, and update limits with the quotas returned. Updates ``self.limits``. We ignore the usage information from the API, """ self.connect() logger.info("Querying RDS DescribeAccountAttributes for limits") lims = self.conn.describe_account_attributes()['AccountQuotas'] for lim in lims: if lim['AccountQuotaName'] not in self.API_NAME_TO_LIMIT: logger.info('RDS DescribeAccountAttributes returned unknown' 'limit: %s (max: %s; used: %s)', lim['AccountQuotaName'], lim['Max'], lim['Used']) continue lname = self.API_NAME_TO_LIMIT[lim['AccountQuotaName']] self.limits[lname]._set_api_limit(lim['Max']) if len(self.limits[lname].get_current_usage()) < 1: self.limits[lname]._add_current_usage(lim['Used']) logger.debug('Done setting limits from API.')
[ "def", "_update_limits_from_api", "(", "self", ")", ":", "self", ".", "connect", "(", ")", "logger", ".", "info", "(", "\"Querying RDS DescribeAccountAttributes for limits\"", ")", "lims", "=", "self", ".", "conn", ".", "describe_account_attributes", "(", ")", "["...
Query RDS's DescribeAccountAttributes API action, and update limits with the quotas returned. Updates ``self.limits``. We ignore the usage information from the API,
[ "Query", "RDS", "s", "DescribeAccountAttributes", "API", "action", "and", "update", "limits", "with", "the", "quotas", "returned", ".", "Updates", "self", ".", "limits", "." ]
python
train
49.666667
inasafe/inasafe
scripts/create_api_docs.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/scripts/create_api_docs.py#L88-L104
def create_module_rst_file(module_name): """Function for creating content in each .rst file for a module. :param module_name: name of the module. :type module_name: str :returns: A content for auto module. :rtype: str """ return_text = 'Module: ' + module_name dash = '=' * len(return_text) return_text += '\n' + dash + '\n\n' return_text += '.. automodule:: ' + module_name + '\n' return_text += ' :members:\n\n' return return_text
[ "def", "create_module_rst_file", "(", "module_name", ")", ":", "return_text", "=", "'Module: '", "+", "module_name", "dash", "=", "'='", "*", "len", "(", "return_text", ")", "return_text", "+=", "'\\n'", "+", "dash", "+", "'\\n\\n'", "return_text", "+=", "'.....
Function for creating content in each .rst file for a module. :param module_name: name of the module. :type module_name: str :returns: A content for auto module. :rtype: str
[ "Function", "for", "creating", "content", "in", "each", ".", "rst", "file", "for", "a", "module", "." ]
python
train
27.705882
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L80-L139
def map_v2_event_into_v1(event): ''' Helper method to convert Sensu 2.x event into Sensu 1.x event. ''' # return the event if it has already been mapped if "v2_event_mapped_into_v1" in event: return event # Trigger mapping code if enity exists and client does not if not bool(event.get('client')) and "entity" in event: event['client'] = event['entity'] # Fill in missing client attributes if "name" not in event['client']: event['client']['name'] = event['entity']['id'] if "subscribers" not in event['client']: event['client']['subscribers'] = event['entity']['subscriptions'] # Fill in renamed check attributes expected in 1.4 event if "subscribers" not in event['check']: event['check']['subscribers'] = event['check']['subscriptions'] if "source" not in event['check']: event['check']['source'] = event['check']['proxy_entity_id'] # Mimic 1.4 event action based on 2.0 event state # action used in logs and fluentd plugins handlers action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve', 'failing': 'create'} if "state" in event['check']: state = event['check']['state'] else: state = "unknown::2.0_event" if "action" not in event and state.lower() in action_state_mapping: event['action'] = action_state_mapping[state.lower()] else: event['action'] = state # Mimic 1.4 event history based on 2.0 event history if "history" in event['check']: # save the original history event['check']['history_v2'] = deepcopy(event['check']['history']) legacy_history = [] for history in event['check']['history']: if isinstance(history['status'], int): legacy_history.append(str(history['status'])) else: legacy_history.append("3") event['check']['history'] = legacy_history # Setting flag indicating this function has already been called event['v2_event_mapped_into_v1'] = True # return the updated event return event
[ "def", "map_v2_event_into_v1", "(", "event", ")", ":", "# return the event if it has already been mapped", "if", "\"v2_event_mapped_into_v1\"", "in", "event", ":", "return", "event", "# Trigger mapping code if enity exists and client does not", "if", "not", "bool", "(", "event"...
Helper method to convert Sensu 2.x event into Sensu 1.x event.
[ "Helper", "method", "to", "convert", "Sensu", "2", ".", "x", "event", "into", "Sensu", "1", ".", "x", "event", "." ]
python
train
37.1
apache/airflow
airflow/hooks/zendesk_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/zendesk_hook.py#L39-L50
def __handle_rate_limit_exception(self, rate_limit_exception): """ Sleep for the time specified in the exception. If not specified, wait for 60 seconds. """ retry_after = int( rate_limit_exception.response.headers.get('Retry-After', 60)) self.log.info( "Hit Zendesk API rate limit. Pausing for %s seconds", retry_after ) time.sleep(retry_after)
[ "def", "__handle_rate_limit_exception", "(", "self", ",", "rate_limit_exception", ")", ":", "retry_after", "=", "int", "(", "rate_limit_exception", ".", "response", ".", "headers", ".", "get", "(", "'Retry-After'", ",", "60", ")", ")", "self", ".", "log", ".",...
Sleep for the time specified in the exception. If not specified, wait for 60 seconds.
[ "Sleep", "for", "the", "time", "specified", "in", "the", "exception", ".", "If", "not", "specified", "wait", "for", "60", "seconds", "." ]
python
test
36.083333
dereneaton/ipyrad
ipyrad/analysis/tetrad.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1542-L1586
def nworker(data, smpchunk, tests): """ The workhorse function. Not numba. """ ## tell engines to limit threads #numba.config.NUMBA_DEFAULT_NUM_THREADS = 1 ## open the seqarray view, the modified array is in bootsarr with h5py.File(data.database.input, 'r') as io5: seqview = io5["bootsarr"][:] maparr = io5["bootsmap"][:] ## create an N-mask array of all seq cols (this isn't really too slow) nall_mask = seqview[:] == 78 ## tried numba compiling everythign below here, but was not faster ## than making nmask w/ axis arg in numpy ## get the input arrays ready rquartets = np.zeros((smpchunk.shape[0], 4), dtype=np.uint16) rweights = None #rweights = np.ones(smpchunk.shape[0], dtype=np.float64) rdstats = np.zeros((smpchunk.shape[0], 4), dtype=np.uint32) #times = [] ## fill arrays with results using numba funcs for idx in xrange(smpchunk.shape[0]): ## get seqchunk for 4 samples (4, ncols) sidx = smpchunk[idx] seqchunk = seqview[sidx] ## get N-containing columns in 4-array, and invariant sites. nmask = np.any(nall_mask[sidx], axis=0) nmask += np.all(seqchunk == seqchunk[0], axis=0) ## <- do we need this? ## get matrices if there are any shared SNPs ## returns best-tree index, qscores, and qstats #bidx, qscores, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) bidx, qstats = calculate(seqchunk, maparr[:, 0], nmask, tests) ## get weights from the three scores sorted. ## Only save to file if the quartet has information rdstats[idx] = qstats rquartets[idx] = smpchunk[idx][bidx] return rquartets, rweights, rdstats
[ "def", "nworker", "(", "data", ",", "smpchunk", ",", "tests", ")", ":", "## tell engines to limit threads", "#numba.config.NUMBA_DEFAULT_NUM_THREADS = 1", "## open the seqarray view, the modified array is in bootsarr", "with", "h5py", ".", "File", "(", "data", ".", "database"...
The workhorse function. Not numba.
[ "The", "workhorse", "function", ".", "Not", "numba", "." ]
python
valid
38.2
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/email.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/email.py#L106-L124
def connect(self): # type: () -> None """ Connect to server Returns: None """ if self.connection_type.lower() == 'ssl': self.server = smtplib.SMTP_SSL(host=self.host, port=self.port, local_hostname=self.local_hostname, timeout=self.timeout, source_address=self.source_address) elif self.connection_type.lower() == 'lmtp': self.server = smtplib.LMTP(host=self.host, port=self.port, local_hostname=self.local_hostname, source_address=self.source_address) else: self.server = smtplib.SMTP(host=self.host, port=self.port, local_hostname=self.local_hostname, timeout=self.timeout, source_address=self.source_address) self.server.login(self.username, self.password)
[ "def", "connect", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "connection_type", ".", "lower", "(", ")", "==", "'ssl'", ":", "self", ".", "server", "=", "smtplib", ".", "SMTP_SSL", "(", "host", "=", "self", ".", "host", ",", "port",...
Connect to server Returns: None
[ "Connect", "to", "server" ]
python
train
46.526316
scdoshi/django-bits
bits/gis.py
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/gis.py#L15-L25
def gprmc_to_degdec(lat, latDirn, lng, lngDirn): """Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.""" x = float(lat[0:2]) + float(lat[2:]) / 60 y = float(lng[0:3]) + float(lng[3:]) / 60 if latDirn == 'S': x = -x if lngDirn == 'W': y = -y return x, y
[ "def", "gprmc_to_degdec", "(", "lat", ",", "latDirn", ",", "lng", ",", "lngDirn", ")", ":", "x", "=", "float", "(", "lat", "[", "0", ":", "2", "]", ")", "+", "float", "(", "lat", "[", "2", ":", "]", ")", "/", "60", "y", "=", "float", "(", "...
Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.
[ "Converts", "GPRMC", "formats", "(", "Decimal", "Minutes", ")", "to", "Degrees", "Decimal", "." ]
python
train
26.818182
andrewsnowden/dota2py
dota2py/summary.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/summary.py#L113-L128
def creep_kill(self, target, timestamp): """ A creep was tragically killed. Need to split this into radiant/dire and neutrals """ self.creep_kill_types[target] += 1 matched = False for k, v in self.creep_types.iteritems(): if target.startswith(k): matched = True setattr(self, v, getattr(self, v) + 1) break if not matched: print('> unhandled creep type'.format(target))
[ "def", "creep_kill", "(", "self", ",", "target", ",", "timestamp", ")", ":", "self", ".", "creep_kill_types", "[", "target", "]", "+=", "1", "matched", "=", "False", "for", "k", ",", "v", "in", "self", ".", "creep_types", ".", "iteritems", "(", ")", ...
A creep was tragically killed. Need to split this into radiant/dire and neutrals
[ "A", "creep", "was", "tragically", "killed", ".", "Need", "to", "split", "this", "into", "radiant", "/", "dire", "and", "neutrals" ]
python
train
30.8125
estnltk/estnltk
estnltk/wiki/parser.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wiki/parser.py#L66-L82
def templatesCollector(text, open, close): """leaves related articles and wikitables in place""" others = [] spans = [i for i in findBalanced(text, open, close)] spanscopy = copy(spans) for i in range(len(spans)): start, end = spans[i] o = text[start:end] ol = o.lower() if 'vaata|' in ol or 'wikitable' in ol: spanscopy.remove(spans[i]) continue others.append(o) text = dropSpans(spanscopy, text) return text, others
[ "def", "templatesCollector", "(", "text", ",", "open", ",", "close", ")", ":", "others", "=", "[", "]", "spans", "=", "[", "i", "for", "i", "in", "findBalanced", "(", "text", ",", "open", ",", "close", ")", "]", "spanscopy", "=", "copy", "(", "span...
leaves related articles and wikitables in place
[ "leaves", "related", "articles", "and", "wikitables", "in", "place" ]
python
train
29.117647
klavinslab/coral
coral/sequence/_sequence.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/sequence/_sequence.py#L40-L47
def copy(self): '''Create a copy of the current instance. :returns: A safely editable copy of the current sequence. ''' # Significant performance improvements by skipping alphabet check return type(self)(self.seq, self.material, run_checks=False)
[ "def", "copy", "(", "self", ")", ":", "# Significant performance improvements by skipping alphabet check", "return", "type", "(", "self", ")", "(", "self", ".", "seq", ",", "self", ".", "material", ",", "run_checks", "=", "False", ")" ]
Create a copy of the current instance. :returns: A safely editable copy of the current sequence.
[ "Create", "a", "copy", "of", "the", "current", "instance", "." ]
python
train
35.125
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L70-L94
def dimension_name(dimension): """Return the Dimension.name for a dimension-like object. Args: dimension: Dimension or dimension string, tuple or dict Returns: The name of the Dimension or what would be the name if the input as converted to a Dimension. """ if isinstance(dimension, Dimension): return dimension.name elif isinstance(dimension, basestring): return dimension elif isinstance(dimension, tuple): return dimension[0] elif isinstance(dimension, dict): return dimension['name'] elif dimension is None: return None else: raise ValueError('%s type could not be interpreted as Dimension. ' 'Dimensions must be declared as a string, tuple, ' 'dictionary or Dimension type.' % type(dimension).__name__)
[ "def", "dimension_name", "(", "dimension", ")", ":", "if", "isinstance", "(", "dimension", ",", "Dimension", ")", ":", "return", "dimension", ".", "name", "elif", "isinstance", "(", "dimension", ",", "basestring", ")", ":", "return", "dimension", "elif", "is...
Return the Dimension.name for a dimension-like object. Args: dimension: Dimension or dimension string, tuple or dict Returns: The name of the Dimension or what would be the name if the input as converted to a Dimension.
[ "Return", "the", "Dimension", ".", "name", "for", "a", "dimension", "-", "like", "object", "." ]
python
train
34.88
frejanordsiek/GeminiMotorDrive
GeminiMotorDrive/__init__.py
https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L145-L219
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2): """ Gets the specified drive parameter. Gets a parameter from the drive. Only supports ``bool``, ``int``, and ``float`` parameters. Parameters ---------- name : str Name of the parameter to check. It is always the command to set it but without the value. tp : type {bool, int, float} The type of the parameter. timeout : number, optional Optional timeout in seconds to use when reading the response. A negative value or ``None`` indicates that the an infinite timeout should be used. max_retries : int, optional Maximum number of retries to do per command in the case of errors. Returns ------- value : bool, int, or float The value of the specified parameter. Raises ------ TypeError If 'tp' is not an allowed type (``bool``, ``int``, ``float``). CommandError If the command to retrieve the parameter returned an error. ValueError If the value returned to the drive cannot be converted to the proper type. See Also -------- _set_parameter : Set a parameter. """ # Raise a TypeError if tp isn't one of the valid types. if tp not in (bool, int, float): raise TypeError('Only supports bool, int, and float; not ' + str(tp)) # Sending a command of name queries the state for that # parameter. The response will have name preceeded by an '*' and # then followed by a number which will have to be converted. response = self.driver.send_command(name, timeout=timeout, immediate=True, max_retries=max_retries) # If the response has an error, there are no response lines, or # the first response line isn't '*'+name; then there was an # error and an exception needs to be thrown. if self.driver.command_error(response) \ or len(response[4]) == 0 \ or not response[4][0].startswith('*' + name): raise CommandError('Couldn''t retrieve parameter ' + name) # Extract the string representation of the value, which is after # the '*'+name. value_str = response[4][0][(len(name)+1):] # Convert the value string to the appropriate type and return # it. Throw an error if it is not supported. if tp == bool: return (value_str == '1') elif tp == int: return int(value_str) elif tp == float: return float(value_str)
[ "def", "_get_parameter", "(", "self", ",", "name", ",", "tp", ",", "timeout", "=", "1.0", ",", "max_retries", "=", "2", ")", ":", "# Raise a TypeError if tp isn't one of the valid types.", "if", "tp", "not", "in", "(", "bool", ",", "int", ",", "float", ")", ...
Gets the specified drive parameter. Gets a parameter from the drive. Only supports ``bool``, ``int``, and ``float`` parameters. Parameters ---------- name : str Name of the parameter to check. It is always the command to set it but without the value. tp : type {bool, int, float} The type of the parameter. timeout : number, optional Optional timeout in seconds to use when reading the response. A negative value or ``None`` indicates that the an infinite timeout should be used. max_retries : int, optional Maximum number of retries to do per command in the case of errors. Returns ------- value : bool, int, or float The value of the specified parameter. Raises ------ TypeError If 'tp' is not an allowed type (``bool``, ``int``, ``float``). CommandError If the command to retrieve the parameter returned an error. ValueError If the value returned to the drive cannot be converted to the proper type. See Also -------- _set_parameter : Set a parameter.
[ "Gets", "the", "specified", "drive", "parameter", "." ]
python
train
37.453333
polyaxon/rhea
rhea/manager.py
https://github.com/polyaxon/rhea/blob/f47b59777cd996d834a0497a1ab442541aaa8a62/rhea/manager.py#L63-L103
def get_int(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `int`/`list(int)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `int`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=int, type_convert=int, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=int, type_convert=int, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
[ "def", "get_int", "(", "self", ",", "key", ",", "is_list", "=", "False", ",", "is_optional", "=", "False", ",", "is_secret", "=", "False", ",", "is_local", "=", "False", ",", "default", "=", "None", ",", "options", "=", "None", ")", ":", "if", "is_li...
Get a the value corresponding to the key and converts it to `int`/`list(int)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `int`: value corresponding to the key.
[ "Get", "a", "the", "value", "corresponding", "to", "the", "key", "and", "converts", "it", "to", "int", "/", "list", "(", "int", ")", "." ]
python
train
43.195122
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py#L8194-L8203
def ptz_status_send(self, zoom, pan, tilt, force_mavlink1=False): ''' Transmits the actual Pan, Tilt and Zoom values of the camera unit zoom : The actual Zoom Value (uint8_t) pan : The Pan value in 10ths of degree (int16_t) tilt : The Tilt value in 10ths of degree (int16_t) ''' return self.send(self.ptz_status_encode(zoom, pan, tilt), force_mavlink1=force_mavlink1)
[ "def", "ptz_status_send", "(", "self", ",", "zoom", ",", "pan", ",", "tilt", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "ptz_status_encode", "(", "zoom", ",", "pan", ",", "tilt", ")", ",", "force_mav...
Transmits the actual Pan, Tilt and Zoom values of the camera unit zoom : The actual Zoom Value (uint8_t) pan : The Pan value in 10ths of degree (int16_t) tilt : The Tilt value in 10ths of degree (int16_t)
[ "Transmits", "the", "actual", "Pan", "Tilt", "and", "Zoom", "values", "of", "the", "camera", "unit" ]
python
train
53.6
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2143-L2154
def reset(self): """ Resets the initial radii used for updating the particles. Call if any of the particle radii or positions have been changed external to the augmented state. """ inds = list(range(self.state.obj_get_positions().shape[0])) self._rad_nms = self.state.param_particle_rad(inds) self._pos_nms = self.state.param_particle_pos(inds) self._initial_rad = np.copy(self.state.state[self._rad_nms]) self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-1,3)) self.param_vals[self.rscale_mask] = 0
[ "def", "reset", "(", "self", ")", ":", "inds", "=", "list", "(", "range", "(", "self", ".", "state", ".", "obj_get_positions", "(", ")", ".", "shape", "[", "0", "]", ")", ")", "self", ".", "_rad_nms", "=", "self", ".", "state", ".", "param_particle...
Resets the initial radii used for updating the particles. Call if any of the particle radii or positions have been changed external to the augmented state.
[ "Resets", "the", "initial", "radii", "used", "for", "updating", "the", "particles", ".", "Call", "if", "any", "of", "the", "particle", "radii", "or", "positions", "have", "been", "changed", "external", "to", "the", "augmented", "state", "." ]
python
valid
49.75
numenta/nupic
src/nupic/data/generators/data_generator.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L174-L185
def getRecord(self, n=None): """Returns the nth record""" if n is None: assert len(self.fields)>0 n = self.fields[0].numRecords-1 assert (all(field.numRecords>n for field in self.fields)) record = [field.values[n] for field in self.fields] return record
[ "def", "getRecord", "(", "self", ",", "n", "=", "None", ")", ":", "if", "n", "is", "None", ":", "assert", "len", "(", "self", ".", "fields", ")", ">", "0", "n", "=", "self", ".", "fields", "[", "0", "]", ".", "numRecords", "-", "1", "assert", ...
Returns the nth record
[ "Returns", "the", "nth", "record" ]
python
valid
23.166667
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/manager.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L316-L320
def upload_directory(self, directory, bucket, key, transfer_config=None, subscribers=None): ''' upload a directory using Aspera ''' check_io_access(directory, os.R_OK) return self._queue_task(bucket, [FilePair(key, directory)], transfer_config, subscribers, enumAsperaDirection.SEND)
[ "def", "upload_directory", "(", "self", ",", "directory", ",", "bucket", ",", "key", ",", "transfer_config", "=", "None", ",", "subscribers", "=", "None", ")", ":", "check_io_access", "(", "directory", ",", "os", ".", "R_OK", ")", "return", "self", ".", ...
upload a directory using Aspera
[ "upload", "a", "directory", "using", "Aspera" ]
python
train
67
ooici/elasticpy
elasticpy/search.py
https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/search.py#L50-L58
def size(self,value): ''' The number of hits to return. Defaults to 10 ''' if not self.params: self.params = dict(size=value) return self self.params['size'] = value return self
[ "def", "size", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "params", ":", "self", ".", "params", "=", "dict", "(", "size", "=", "value", ")", "return", "self", "self", ".", "params", "[", "'size'", "]", "=", "value", "return", ...
The number of hits to return. Defaults to 10
[ "The", "number", "of", "hits", "to", "return", ".", "Defaults", "to", "10" ]
python
train
26.777778
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/utils.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L307-L344
def _process_batch_write_response(request, response, table_crypto_config): # type: (Dict, Dict, Dict[Text, CryptoConfig]) -> Dict """Handle unprocessed items in the response from a transparently encrypted write. :param dict request: The DynamoDB plaintext request dictionary :param dict response: The DynamoDB response from the batch operation :param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items :return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values :rtype: dict """ try: unprocessed_items = response["UnprocessedItems"] except KeyError: return response # Unprocessed items need to be returned in their original state for table_name, unprocessed in unprocessed_items.items(): original_items = request[table_name] crypto_config = table_crypto_config[table_name] if crypto_config.encryption_context.partition_key_name: items_match = partial(_item_keys_match, crypto_config) else: items_match = partial(_item_attributes_match, crypto_config) for pos, operation in enumerate(unprocessed): for request_type, item in operation.items(): if request_type != "PutRequest": continue for plaintext_item in original_items: if plaintext_item.get(request_type) and items_match( plaintext_item[request_type]["Item"], item["Item"] ): unprocessed[pos] = plaintext_item.copy() break return response
[ "def", "_process_batch_write_response", "(", "request", ",", "response", ",", "table_crypto_config", ")", ":", "# type: (Dict, Dict, Dict[Text, CryptoConfig]) -> Dict", "try", ":", "unprocessed_items", "=", "response", "[", "\"UnprocessedItems\"", "]", "except", "KeyError", ...
Handle unprocessed items in the response from a transparently encrypted write. :param dict request: The DynamoDB plaintext request dictionary :param dict response: The DynamoDB response from the batch operation :param Dict[Text, CryptoConfig] table_crypto_config: table level CryptoConfig used in encrypting the request items :return: DynamoDB response, with any unprocessed items reverted back to the original plaintext values :rtype: dict
[ "Handle", "unprocessed", "items", "in", "the", "response", "from", "a", "transparently", "encrypted", "write", "." ]
python
train
43.868421
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L106-L122
def _relative_paths(xs, base_path): """Adjust paths to be relative to the provided base path. """ if isinstance(xs, six.string_types): if xs.startswith(base_path): return xs.replace(base_path + "/", "", 1) else: return xs elif isinstance(xs, (list, tuple)): return [_relative_paths(x, base_path) for x in xs] elif isinstance(xs, dict): out = {} for k, v in xs.items(): out[k] = _relative_paths(v, base_path) return out else: return xs
[ "def", "_relative_paths", "(", "xs", ",", "base_path", ")", ":", "if", "isinstance", "(", "xs", ",", "six", ".", "string_types", ")", ":", "if", "xs", ".", "startswith", "(", "base_path", ")", ":", "return", "xs", ".", "replace", "(", "base_path", "+",...
Adjust paths to be relative to the provided base path.
[ "Adjust", "paths", "to", "be", "relative", "to", "the", "provided", "base", "path", "." ]
python
train
31.411765
Capitains/flask-capitains-nemo
flask_nemo/plugins/annotations_api.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/annotations_api.py#L120-L138
def r_annotation_body(self, sha): """ Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any} """ annotation = self.__queryinterface__.getResource(sha) if not annotation: return "invalid resource uri", 404 # TODO this should inspect the annotation content # set appropriate Content-Type headers # and return the actual content content = annotation.read() if isinstance(content, Response): return content headers = {"Content-Type": annotation.mimetype} return Response(content, headers=headers)
[ "def", "r_annotation_body", "(", "self", ",", "sha", ")", ":", "annotation", "=", "self", ".", "__queryinterface__", ".", "getResource", "(", "sha", ")", "if", "not", "annotation", ":", "return", "\"invalid resource uri\"", ",", "404", "# TODO this should inspect ...
Route to retrieve contents of an annotation resource :param uri: The uri of the annotation resource :type uri: str :return: annotation contents :rtype: {str: Any}
[ "Route", "to", "retrieve", "contents", "of", "an", "annotation", "resource" ]
python
valid
38.263158
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L1727-L1730
def reversed(self): """returns a copy of the Arc object with its orientation reversed.""" return Arc(self.end, self.radius, self.rotation, self.large_arc, not self.sweep, self.start)
[ "def", "reversed", "(", "self", ")", ":", "return", "Arc", "(", "self", ".", "end", ",", "self", ".", "radius", ",", "self", ".", "rotation", ",", "self", ".", "large_arc", ",", "not", "self", ".", "sweep", ",", "self", ".", "start", ")" ]
returns a copy of the Arc object with its orientation reversed.
[ "returns", "a", "copy", "of", "the", "Arc", "object", "with", "its", "orientation", "reversed", "." ]
python
train
53.5
dmlc/gluon-nlp
scripts/bert/create_pretraining_data.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/create_pretraining_data.py#L354-L472
def create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates `TrainingInstance`s for a single document.""" document = all_documents[document_index] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if rng.random() < short_seq_prob: target_seq_length = rng.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): # pylint: disable=R1702 segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = rng.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next is_random_next = False if len(current_chunk) == 1 or rng.random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = rng.randint( 0, len(all_documents) - 1) if random_document_index != document_index: break random_document = all_documents[random_document_index] random_start = rng.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we 'put them back' so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) segment_a_lengths = len(segment_ids) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) segment_b_lengths = len(segment_ids) - segment_a_lengths (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance( tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels, segment_a_lengths=segment_a_lengths, segment_b_lengths=segment_b_lengths) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
[ "def", "create_instances_from_document", "(", "all_documents", ",", "document_index", ",", "max_seq_length", ",", "short_seq_prob", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_words", ",", "rng", ")", ":", "document", "=", "all_documents", "[", ...
Creates `TrainingInstance`s for a single document.
[ "Creates", "TrainingInstance", "s", "for", "a", "single", "document", "." ]
python
train
43.445378
dslackw/slpkg
slpkg/main.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L381-L418
def pkg_tracking(self): """Tracking package dependencies """ flag = [] options = [ "-t", "--tracking" ] additional_options = [ "--check-deps", "--graph=", "--case-ins" ] for arg in self.args[2:]: if arg.startswith(additional_options[1]): flag.append(arg) self.args.remove(arg) if arg in additional_options: flag.append(arg) # clean additional options from args for f in flag: if f in self.args: self.args.remove(f) # print usage message if wrong additional option for arg in self.args: if arg.startswith("--"): if arg not in additional_options: usage("") raise SystemExit() if (len(self.args) >= 3 and len(self.args) <= 3 and self.args[0] in options and self.args[1] in self.meta.repositories): TrackingDeps(self.args[2], self.args[1], flag).run() elif (len(self.args) >= 2 and self.args[1] not in self.meta.repositories): usage(self.args[1]) else: usage("")
[ "def", "pkg_tracking", "(", "self", ")", ":", "flag", "=", "[", "]", "options", "=", "[", "\"-t\"", ",", "\"--tracking\"", "]", "additional_options", "=", "[", "\"--check-deps\"", ",", "\"--graph=\"", ",", "\"--case-ins\"", "]", "for", "arg", "in", "self", ...
Tracking package dependencies
[ "Tracking", "package", "dependencies" ]
python
train
32.973684
maxalbert/tohu
tohu/v6/utils.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L100-L110
def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None): """ Helper function which prints a sequence of `num` items produced by the random generator `gen`. """ if seed: gen.reset(seed) elems = [format(next(gen), fmt) for _ in range(num)] sep_initial = "\n\n" if '\n' in sep else " " print("Generated sequence:{}{}".format(sep_initial, sep.join(elems)))
[ "def", "print_generated_sequence", "(", "gen", ",", "num", ",", "*", ",", "sep", "=", "\", \"", ",", "fmt", "=", "''", ",", "seed", "=", "None", ")", ":", "if", "seed", ":", "gen", ".", "reset", "(", "seed", ")", "elems", "=", "[", "format", "(",...
Helper function which prints a sequence of `num` items produced by the random generator `gen`.
[ "Helper", "function", "which", "prints", "a", "sequence", "of", "num", "items", "produced", "by", "the", "random", "generator", "gen", "." ]
python
train
36.090909
pantsbuild/pants
src/python/pants/base/hash_utils.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/hash_utils.py#L107-L120
def json_hash(obj, digest=None, encoder=None): """Hashes `obj` by dumping to JSON. :param obj: An object that can be rendered to json using the given `encoder`. :param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`. :param encoder: An optional custom json encoder. :type encoder: :class:`json.JSONEncoder` :returns: A hash of the given `obj` according to the given `encoder`. :rtype: str :API: public """ json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder) return hash_all(json_str, digest=digest)
[ "def", "json_hash", "(", "obj", ",", "digest", "=", "None", ",", "encoder", "=", "None", ")", ":", "json_str", "=", "json", ".", "dumps", "(", "obj", ",", "ensure_ascii", "=", "True", ",", "allow_nan", "=", "False", ",", "sort_keys", "=", "True", ","...
Hashes `obj` by dumping to JSON. :param obj: An object that can be rendered to json using the given `encoder`. :param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`. :param encoder: An optional custom json encoder. :type encoder: :class:`json.JSONEncoder` :returns: A hash of the given `obj` according to the given `encoder`. :rtype: str :API: public
[ "Hashes", "obj", "by", "dumping", "to", "JSON", "." ]
python
train
41.785714
nicodv/kmodes
kmodes/kmodes.py
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L21-L50
def init_huang(X, n_clusters, dissim, random_state): """Initialize centroids according to method by Huang [1997].""" n_attrs = X.shape[1] centroids = np.empty((n_clusters, n_attrs), dtype='object') # determine frequencies of attributes for iattr in range(n_attrs): freq = defaultdict(int) for curattr in X[:, iattr]: freq[curattr] += 1 # Sample centroids using the probabilities of attributes. # (I assume that's what's meant in the Huang [1998] paper; it works, # at least) # Note: sampling using population in static list with as many choices # as frequency counts. Since the counts are small integers, # memory consumption is low. choices = [chc for chc, wght in freq.items() for _ in range(wght)] # So that we are consistent between Python versions, # each with different dict ordering. choices = sorted(choices) centroids[:, iattr] = random_state.choice(choices, n_clusters) # The previously chosen centroids could result in empty clusters, # so set centroid to closest point in X. for ik in range(n_clusters): ndx = np.argsort(dissim(X, centroids[ik])) # We want the centroid to be unique, if possible. while np.all(X[ndx[0]] == centroids, axis=1).any() and ndx.shape[0] > 1: ndx = np.delete(ndx, 0) centroids[ik] = X[ndx[0]] return centroids
[ "def", "init_huang", "(", "X", ",", "n_clusters", ",", "dissim", ",", "random_state", ")", ":", "n_attrs", "=", "X", ".", "shape", "[", "1", "]", "centroids", "=", "np", ".", "empty", "(", "(", "n_clusters", ",", "n_attrs", ")", ",", "dtype", "=", ...
Initialize centroids according to method by Huang [1997].
[ "Initialize", "centroids", "according", "to", "method", "by", "Huang", "[", "1997", "]", "." ]
python
train
47.2
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8225-L8236
def rot(self, x, fun, rot=1, args=()): """returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument""" if len(np.shape(array(x))) > 1: # parallelized res = [] for x in x: res.append(self.rot(x, fun, rot, args)) return res if rot: return fun(rotate(x, *args)) else: return fun(x)
[ "def", "rot", "(", "self", ",", "x", ",", "fun", ",", "rot", "=", "1", ",", "args", "=", "(", ")", ")", ":", "if", "len", "(", "np", ".", "shape", "(", "array", "(", "x", ")", ")", ")", ">", "1", ":", "# parallelized", "res", "=", "[", "]...
returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument
[ "returns", "fun", "(", "rotation", "(", "x", ")", "*", "args", ")", "ie", ".", "fun", "applied", "to", "a", "rotated", "argument" ]
python
train
33
amcfague/webunit2
webunit2/response.py
https://github.com/amcfague/webunit2/blob/3157e5837aad0810800628c1383f1fe11ee3e513/webunit2/response.py#L59-L65
def assertHeader(self, name, value=None, *args, **kwargs): """ Returns `True` if ``name`` was in the headers and, if ``value`` is True, whether or not the values match, or `False` otherwise. """ return name in self.raw_headers and ( True if value is None else self.raw_headers[name] == value)
[ "def", "assertHeader", "(", "self", ",", "name", ",", "value", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "name", "in", "self", ".", "raw_headers", "and", "(", "True", "if", "value", "is", "None", "else", "self", "....
Returns `True` if ``name`` was in the headers and, if ``value`` is True, whether or not the values match, or `False` otherwise.
[ "Returns", "True", "if", "name", "was", "in", "the", "headers", "and", "if", "value", "is", "True", "whether", "or", "not", "the", "values", "match", "or", "False", "otherwise", "." ]
python
train
48.285714
rosenbrockc/fortpy
fortpy/isense/builtin.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/builtin.py#L89-L95
def _parse_summary(tag, parser, parent): """Parses a <summary> tag and adds it the Executable parent instance. :arg parser: an instance of DocParser to create the DocElement with. """ summary = DocElement(tag, parser, parent) parent.docstring.append(summary)
[ "def", "_parse_summary", "(", "tag", ",", "parser", ",", "parent", ")", ":", "summary", "=", "DocElement", "(", "tag", ",", "parser", ",", "parent", ")", "parent", ".", "docstring", ".", "append", "(", "summary", ")" ]
Parses a <summary> tag and adds it the Executable parent instance. :arg parser: an instance of DocParser to create the DocElement with.
[ "Parses", "a", "<summary", ">", "tag", "and", "adds", "it", "the", "Executable", "parent", "instance", ".", ":", "arg", "parser", ":", "an", "instance", "of", "DocParser", "to", "create", "the", "DocElement", "with", "." ]
python
train
39.571429
GoogleCloudPlatform/python-repo-tools
gcp_devrel/tools/requirements.py
https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L132-L146
def check_req(req): """Checks if a given req is the latest version available.""" if not isinstance(req, Requirement): return None info = get_package_info(req.name) newest_version = _get_newest_version(info) if _is_pinned(req) and _is_version_range(req): return None current_spec = next(iter(req.specifier)) if req.specifier else None current_version = current_spec.version if current_spec else None if current_version != newest_version: return req.name, current_version, newest_version
[ "def", "check_req", "(", "req", ")", ":", "if", "not", "isinstance", "(", "req", ",", "Requirement", ")", ":", "return", "None", "info", "=", "get_package_info", "(", "req", ".", "name", ")", "newest_version", "=", "_get_newest_version", "(", "info", ")", ...
Checks if a given req is the latest version available.
[ "Checks", "if", "a", "given", "req", "is", "the", "latest", "version", "available", "." ]
python
train
35.333333
ynop/audiomate
audiomate/feeding/dataset.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/feeding/dataset.py#L343-L379
def get_utt_regions(self): """ Return the regions of all utterances, assuming all utterances are concatenated. It is assumed that the utterances are sorted in ascending order for concatenation. A region is defined by offset (in chunks), length (num-chunks) and a list of references to the utterance datasets in the containers. Returns: list: List of with a tuple for every utterances containing the region info. """ regions = [] current_offset = 0 for utt_idx in sorted(self.utt_ids): offset = current_offset num_frames = [] refs = [] for cnt in self.containers: num_frames.append(cnt.get(utt_idx).shape[0]) refs.append(cnt.get(utt_idx, mem_map=True)) if len(set(num_frames)) != 1: raise ValueError('Utterance {} has not the same number of frames in all containers!'.format(utt_idx)) num_chunks = math.ceil(num_frames[0] / float(self.frames_per_chunk)) region = (offset, num_chunks, refs) regions.append(region) # Sets the offset for the next utterances current_offset += num_chunks return regions
[ "def", "get_utt_regions", "(", "self", ")", ":", "regions", "=", "[", "]", "current_offset", "=", "0", "for", "utt_idx", "in", "sorted", "(", "self", ".", "utt_ids", ")", ":", "offset", "=", "current_offset", "num_frames", "=", "[", "]", "refs", "=", "...
Return the regions of all utterances, assuming all utterances are concatenated. It is assumed that the utterances are sorted in ascending order for concatenation. A region is defined by offset (in chunks), length (num-chunks) and a list of references to the utterance datasets in the containers. Returns: list: List of with a tuple for every utterances containing the region info.
[ "Return", "the", "regions", "of", "all", "utterances", "assuming", "all", "utterances", "are", "concatenated", ".", "It", "is", "assumed", "that", "the", "utterances", "are", "sorted", "in", "ascending", "order", "for", "concatenation", "." ]
python
train
33.513514
wbond/oscrypto
oscrypto/_openssl/tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/tls.py#L675-L694
def _raw_read(self): """ Reads data from the socket and writes it to the memory bio used by libssl to decrypt the data. Returns the unencrypted data for the purpose of debugging handshakes. :return: A byte string of ciphertext from the socket. Used for debugging the handshake only. """ data = self._raw_bytes try: data += self._socket.recv(8192) except (socket_.error): pass output = data written = libssl.BIO_write(self._rbio, data, len(data)) self._raw_bytes = data[written:] return output
[ "def", "_raw_read", "(", "self", ")", ":", "data", "=", "self", ".", "_raw_bytes", "try", ":", "data", "+=", "self", ".", "_socket", ".", "recv", "(", "8192", ")", "except", "(", "socket_", ".", "error", ")", ":", "pass", "output", "=", "data", "wr...
Reads data from the socket and writes it to the memory bio used by libssl to decrypt the data. Returns the unencrypted data for the purpose of debugging handshakes. :return: A byte string of ciphertext from the socket. Used for debugging the handshake only.
[ "Reads", "data", "from", "the", "socket", "and", "writes", "it", "to", "the", "memory", "bio", "used", "by", "libssl", "to", "decrypt", "the", "data", ".", "Returns", "the", "unencrypted", "data", "for", "the", "purpose", "of", "debugging", "handshakes", "...
python
valid
31.35
danielperna84/pyhomematic
pyhomematic/_hm.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/_hm.py#L609-L637
def stop(self): """To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server.""" stopped = [] for interface_id, proxy in self.proxies.items(): if interface_id in self.failed_inits: LOG.warning("ServerThread.stop: Not performing de-init for %s" % interface_id) continue if proxy._callbackip and proxy._callbackport: callbackip = proxy._callbackip callbackport = proxy._callbackport else: callbackip = proxy._localip callbackport = self._localport remote = "http://%s:%i" % (callbackip, callbackport) LOG.debug("ServerThread.stop: init('%s')" % remote) if not callbackip in stopped: try: proxy.init(remote) stopped.append(callbackip) LOG.info("Proxy de-initialized: %s" % remote) except Exception as err: LOG.debug("proxyInit: Exception: %s" % str(err)) LOG.warning("Failed to de-initialize proxy") self.proxies.clear() LOG.info("Shutting down server") self.server.shutdown() LOG.debug("ServerThread.stop: Stopping ServerThread") self.server.server_close() LOG.info("Server stopped")
[ "def", "stop", "(", "self", ")", ":", "stopped", "=", "[", "]", "for", "interface_id", ",", "proxy", "in", "self", ".", "proxies", ".", "items", "(", ")", ":", "if", "interface_id", "in", "self", ".", "failed_inits", ":", "LOG", ".", "warning", "(", ...
To stop the server we de-init from the CCU / Homegear, then shut down our XML-RPC server.
[ "To", "stop", "the", "server", "we", "de", "-", "init", "from", "the", "CCU", "/", "Homegear", "then", "shut", "down", "our", "XML", "-", "RPC", "server", "." ]
python
train
46.758621
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1694-L1713
def get_zonefile_data( self, zonefile_hash, zonefile_dir ): """ Get a zonefile by hash Return the serialized zonefile on success Return None on error """ # check cache atlas_zonefile_data = get_atlas_zonefile_data( zonefile_hash, zonefile_dir, check=False ) if atlas_zonefile_data is not None: # check hash zfh = get_zonefile_data_hash( atlas_zonefile_data ) if zfh != zonefile_hash: log.debug("Invalid local zonefile %s" % zonefile_hash ) remove_atlas_zonefile_data( zonefile_hash, zonefile_dir ) else: log.debug("Zonefile %s is local" % zonefile_hash) return atlas_zonefile_data return None
[ "def", "get_zonefile_data", "(", "self", ",", "zonefile_hash", ",", "zonefile_dir", ")", ":", "# check cache", "atlas_zonefile_data", "=", "get_atlas_zonefile_data", "(", "zonefile_hash", ",", "zonefile_dir", ",", "check", "=", "False", ")", "if", "atlas_zonefile_data...
Get a zonefile by hash Return the serialized zonefile on success Return None on error
[ "Get", "a", "zonefile", "by", "hash", "Return", "the", "serialized", "zonefile", "on", "success", "Return", "None", "on", "error" ]
python
train
37.95
debrouwere/python-ballpark
ballpark/utils.py
https://github.com/debrouwere/python-ballpark/blob/0b871cdf5b4b5f50e5f3f3d044558801783381c4/ballpark/utils.py#L100-L110
def vectorize(fn): """ Allows a method to accept a list argument, but internally deal only with a single item of that list. """ @functools.wraps(fn) def vectorized_function(values, *vargs, **kwargs): return [fn(value, *vargs, **kwargs) for value in values] return vectorized_function
[ "def", "vectorize", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "vectorized_function", "(", "values", ",", "*", "vargs", ",", "*", "*", "kwargs", ")", ":", "return", "[", "fn", "(", "value", ",", "*", "vargs", ",", ...
Allows a method to accept a list argument, but internally deal only with a single item of that list.
[ "Allows", "a", "method", "to", "accept", "a", "list", "argument", "but", "internally", "deal", "only", "with", "a", "single", "item", "of", "that", "list", "." ]
python
train
28.272727
scanny/python-pptx
pptx/shapes/shapetree.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/shapetree.py#L351-L357
def index(self, shape): """Return the index of *shape* in this sequence. Raises |ValueError| if *shape* is not in the collection. """ shape_elms = list(self._element.iter_shape_elms()) return shape_elms.index(shape.element)
[ "def", "index", "(", "self", ",", "shape", ")", ":", "shape_elms", "=", "list", "(", "self", ".", "_element", ".", "iter_shape_elms", "(", ")", ")", "return", "shape_elms", ".", "index", "(", "shape", ".", "element", ")" ]
Return the index of *shape* in this sequence. Raises |ValueError| if *shape* is not in the collection.
[ "Return", "the", "index", "of", "*", "shape", "*", "in", "this", "sequence", "." ]
python
train
36.857143
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L1668-L1684
def get_atom(value): """atom = [CFWS] 1*atext [CFWS] """ atom = Atom() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) if value and value[0] in ATOM_ENDS: raise errors.HeaderParseError( "expected atom but found '{}'".format(value)) token, value = get_atext(value) atom.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) return atom, value
[ "def", "get_atom", "(", "value", ")", ":", "atom", "=", "Atom", "(", ")", "if", "value", "and", "value", "[", "0", "]", "in", "CFWS_LEADER", ":", "token", ",", "value", "=", "get_cfws", "(", "value", ")", "atom", ".", "append", "(", "token", ")", ...
atom = [CFWS] 1*atext [CFWS]
[ "atom", "=", "[", "CFWS", "]", "1", "*", "atext", "[", "CFWS", "]" ]
python
train
29.529412
rwl/godot
godot/node.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/node.py#L593-L622
def arrange_all(self): """ Arrange the components of the node using Graphviz. """ # FIXME: Circular reference avoidance. import godot.dot_data_parser import godot.graph graph = godot.graph.Graph(ID="g") graph.add_node(self) print "GRAPH DOT:\n", str(graph) xdot_data = graph.create( format = "xdot" ) print "XDOT DATA:\n", xdot_data parser = godot.dot_data_parser.GodotDataParser() # parser.parse_dot_data(xdot_data) flat_data = xdot_data.replace('\\\n','') tokens = parser.dotparser.parseString(flat_data)[0] for element in tokens[3]: print "TOK:", element cmd = element[0] if cmd == 'add_node': cmd, nodename, opts = element assert nodename == self.ID print "OPTIONS:", opts self.set( **opts )
[ "def", "arrange_all", "(", "self", ")", ":", "# FIXME: Circular reference avoidance.", "import", "godot", ".", "dot_data_parser", "import", "godot", ".", "graph", "graph", "=", "godot", ".", "graph", ".", "Graph", "(", "ID", "=", "\"g\"", ")", "graph", ".", ...
Arrange the components of the node using Graphviz.
[ "Arrange", "the", "components", "of", "the", "node", "using", "Graphviz", "." ]
python
test
29.733333
cole/aiosmtplib
src/aiosmtplib/email.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/email.py#L84-L105
def _extract_recipients( message: Message, resent_dates: List[Union[str, Header]] = None ) -> List[str]: """ Extract the recipients from the message object given. """ recipients = [] # type: List[str] if resent_dates: recipient_headers = ("Resent-To", "Resent-Cc", "Resent-Bcc") else: recipient_headers = ("To", "Cc", "Bcc") for header in recipient_headers: recipients.extend(message.get_all(header, [])) # type: ignore parsed_recipients = [ str(email.utils.formataddr(address)) for address in email.utils.getaddresses(recipients) ] return parsed_recipients
[ "def", "_extract_recipients", "(", "message", ":", "Message", ",", "resent_dates", ":", "List", "[", "Union", "[", "str", ",", "Header", "]", "]", "=", "None", ")", "->", "List", "[", "str", "]", ":", "recipients", "=", "[", "]", "# type: List[str]", "...
Extract the recipients from the message object given.
[ "Extract", "the", "recipients", "from", "the", "message", "object", "given", "." ]
python
train
28.454545
greenbone/ospd
ospd/ospd.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L661-L665
def finish_scan(self, scan_id): """ Sets a scan as finished. """ self.set_scan_progress(scan_id, 100) self.set_scan_status(scan_id, ScanStatus.FINISHED) logger.info("%s: Scan finished.", scan_id)
[ "def", "finish_scan", "(", "self", ",", "scan_id", ")", ":", "self", ".", "set_scan_progress", "(", "scan_id", ",", "100", ")", "self", ".", "set_scan_status", "(", "scan_id", ",", "ScanStatus", ".", "FINISHED", ")", "logger", ".", "info", "(", "\"%s: Scan...
Sets a scan as finished.
[ "Sets", "a", "scan", "as", "finished", "." ]
python
train
44.6
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L215-L235
def get_gid(path, follow_symlinks=True): ''' Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
[ "def", "get_gid", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'gid'", ",", "-", "1", ...
Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added
[ "Return", "the", "id", "of", "the", "group", "that", "owns", "a", "given", "file" ]
python
train
22.47619
twilio/twilio-python
twilio/rest/sync/v1/service/sync_list/sync_list_permission.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/sync_list/sync_list_permission.py#L118-L132
def get(self, identity): """ Constructs a SyncListPermissionContext :param identity: Identity of the user to whom the Sync List Permission applies. :returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext """ return SyncListPermissionContext( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], identity=identity, )
[ "def", "get", "(", "self", ",", "identity", ")", ":", "return", "SyncListPermissionContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "list_sid", "=", "self", ".", "_solution", "[", "'...
Constructs a SyncListPermissionContext :param identity: Identity of the user to whom the Sync List Permission applies. :returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
[ "Constructs", "a", "SyncListPermissionContext" ]
python
train
39.266667
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L3616-L3638
def remove_account_user_from_groups(self, account_id, user_id, body, **kwargs): # noqa: E501 """Remove user from groups. # noqa: E501 An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user to be removed from the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501 else: (data) = self.remove_account_user_from_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501 return data
[ "def", "remove_account_user_from_groups", "(", "self", ",", "account_id", ",", "user_id", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchrono...
Remove user from groups. # noqa: E501 An endpoint for removing user from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_account_user_from_groups(account_id, user_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user to be removed from the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Remove", "user", "from", "groups", ".", "#", "noqa", ":", "E501" ]
python
train
65.608696
tgbugs/pyontutils
ilxutils/ilxutils/simple_scicrunch_client.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L130-L132
def is_equal(self, string1, string2): ''' Simple string comparator ''' return string1.lower().strip() == string2.lower().strip()
[ "def", "is_equal", "(", "self", ",", "string1", ",", "string2", ")", ":", "return", "string1", ".", "lower", "(", ")", ".", "strip", "(", ")", "==", "string2", ".", "lower", "(", ")", ".", "strip", "(", ")" ]
Simple string comparator
[ "Simple", "string", "comparator" ]
python
train
47.333333
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L273-L456
def _dequeue(self, local_prof): """ **Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the completed queus and updates the copy of workflow that exists in the WFprocessor object. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process. """ try: local_prof.prof('dequeue-thread started', uid=self._uid) self._logger.info('Dequeue thread started') mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._dequeue_thread_terminate.is_set(): try: method_frame, header_frame, body = mq_channel.basic_get( queue=self._completed_queue[0]) if body: # Get task from the message completed_task = Task() completed_task.from_dict(json.loads(body)) self._logger.info( 'Got finished task %s from queue' % (completed_task.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUEING, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Traverse the entire workflow to find out the correct Task for pipe in self._workflow: with pipe.lock: if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)): if completed_task.parent_pipeline['uid'] == pipe.uid: self._logger.debug( 'Found parent pipeline: %s' % pipe.uid) for stage in pipe.stages: if completed_task.parent_stage['uid'] == stage.uid: self._logger.debug( 'Found parent stage: %s' % (stage.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUED, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if not completed_task.exit_code: completed_task.state = states.DONE else: completed_task.state = states.FAILED for task in stage.tasks: if task.uid == completed_task.uid: task.state = str( completed_task.state) if (task.state == states.FAILED) and (self._resubmit_failed): task.state = states.INITIAL transition(obj=task, obj_type='Task', new_state=task.state, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if stage._check_stage_complete(): transition(obj=stage, obj_type='Stage', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Check if Stage has a post-exec that needs to be # executed if stage.post_exec: try: self._logger.info('Executing post-exec for stage %s' % stage.uid) self._prof.prof('Adap: executing post-exec', uid=self._uid) stage.post_exec() self._logger.info( 'Post-exec executed for stage %s' % stage.uid) self._prof.prof( 'Adap: post-exec executed', uid=self._uid) except Exception, ex: self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid) raise pipe._increment_stage() if pipe.completed: transition(obj=pipe, obj_type='Pipeline', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Found the task and processed it -- no more iterations needed break # Found the stage and processed it -- no more iterations neeeded break # Found the pipeline and processed it -- no more iterations neeeded break mq_channel.basic_ack( delivery_tag=method_frame.delivery_tag) # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now except Exception, ex: self._logger.exception( 'Unable to receive message from completed queue: %s' % ex) raise self._logger.info('Terminated dequeue thread') mq_connection.close() local_prof.prof('terminating dequeue-thread', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit gracefully...') mq_connection.close() raise KeyboardInterrupt except Exception, ex: self._logger.exception('Error in dequeue-thread: %s' % ex) try: mq_connection.close() except: self._logger.warning('mq_connection not created') raise EnTKError(ex)
[ "def", "_dequeue", "(", "self", ",", "local_prof", ")", ":", "try", ":", "local_prof", ".", "prof", "(", "'dequeue-thread started'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Dequeue thread started'", ")", "mq_c...
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the completed queus and updates the copy of workflow that exists in the WFprocessor object. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process.
[ "**", "Purpose", "**", ":", "This", "is", "the", "function", "that", "is", "run", "in", "the", "dequeue", "thread", ".", "This", "function", "extracts", "Tasks", "from", "the", "completed", "queus", "and", "updates", "the", "copy", "of", "workflow", "that"...
python
train
52.032609
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2369-L2463
def get_ZXY_data_IFFT(Data, zf, xf, yf, zwidth=10000, xwidth=5000, ywidth=5000, timeStart=None, timeEnd=None, show_fig=True): """ Given a Data object and the frequencies of the z, x and y peaks (and some optional parameters for the created filters) this function extracts the individual z, x and y signals (in volts) by creating IIR filters and filtering the Data. Parameters ---------- Data : DataObject DataObject containing the data for which you want to extract the z, x and y signals. zf : float The frequency of the z peak in the PSD xf : float The frequency of the x peak in the PSD yf : float The frequency of the y peak in the PSD zwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Z. xwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter X. ywidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Y. timeStart : float, optional Starting time for filtering timeEnd : float, optional Ending time for filtering show_fig : bool, optional If True - plot unfiltered and filtered PSD for z, x and y. If False - don't plot anything Returns ------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. timedata : ndarray Array containing the time data to go with the z, x, and y signal. """ if timeStart == None: timeStart = Data.timeStart if timeEnd == None: timeEnd = Data.timeEnd time = Data.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] SAMPLEFREQ = Data.SampleFreq input_signal = Data.voltage[StartIndex: EndIndex] zdata = IFFT_filter(input_signal, SAMPLEFREQ, zf - zwidth / 2, zf + zwidth / 2) xdata = IFFT_filter(input_signal, SAMPLEFREQ, xf - xwidth / 2, xf + xwidth / 2) ydata = IFFT_filter(input_signal, SAMPLEFREQ, yf - ywidth / 2, yf + ywidth / 2) if show_fig == True: NPerSegment = len(Data.time) if NPerSegment > 1e7: NPerSegment = int(1e7) f, PSD = scipy.signal.welch( input_signal, SAMPLEFREQ, nperseg=NPerSegment) f_z, PSD_z = scipy.signal.welch(zdata, SAMPLEFREQ, nperseg=NPerSegment) f_y, PSD_y = scipy.signal.welch(ydata, SAMPLEFREQ, nperseg=NPerSegment) f_x, PSD_x = scipy.signal.welch(xdata, SAMPLEFREQ, nperseg=NPerSegment) _plt.plot(f, PSD) _plt.plot(f_z, PSD_z, label="z") _plt.plot(f_x, PSD_x, label="x") _plt.plot(f_y, PSD_y, label="y") _plt.legend(loc="best") _plt.xlim([zf - zwidth, yf + ywidth]) _plt.xlabel('Frequency (Hz)') _plt.ylabel(r'$S_{xx}$ ($V^2/Hz$)') _plt.semilogy() _plt.title("filepath = %s" % (Data.filepath)) _plt.show() timedata = time[StartIndex: EndIndex] return zdata, xdata, ydata, timedata
[ "def", "get_ZXY_data_IFFT", "(", "Data", ",", "zf", ",", "xf", ",", "yf", ",", "zwidth", "=", "10000", ",", "xwidth", "=", "5000", ",", "ywidth", "=", "5000", ",", "timeStart", "=", "None", ",", "timeEnd", "=", "None", ",", "show_fig", "=", "True", ...
Given a Data object and the frequencies of the z, x and y peaks (and some optional parameters for the created filters) this function extracts the individual z, x and y signals (in volts) by creating IIR filters and filtering the Data. Parameters ---------- Data : DataObject DataObject containing the data for which you want to extract the z, x and y signals. zf : float The frequency of the z peak in the PSD xf : float The frequency of the x peak in the PSD yf : float The frequency of the y peak in the PSD zwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Z. xwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter X. ywidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Y. timeStart : float, optional Starting time for filtering timeEnd : float, optional Ending time for filtering show_fig : bool, optional If True - plot unfiltered and filtered PSD for z, x and y. If False - don't plot anything Returns ------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. timedata : ndarray Array containing the time data to go with the z, x, and y signal.
[ "Given", "a", "Data", "object", "and", "the", "frequencies", "of", "the", "z", "x", "and", "y", "peaks", "(", "and", "some", "optional", "parameters", "for", "the", "created", "filters", ")", "this", "function", "extracts", "the", "individual", "z", "x", ...
python
train
35.231579
aarongarrett/inspyred
inspyred/benchmarks.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/benchmarks.py#L980-L984
def generator(self, random, args): """Return a candidate solution for an evolutionary computation.""" locations = [i for i in range(len(self.weights))] random.shuffle(locations) return locations
[ "def", "generator", "(", "self", ",", "random", ",", "args", ")", ":", "locations", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "self", ".", "weights", ")", ")", "]", "random", ".", "shuffle", "(", "locations", ")", "return", "locati...
Return a candidate solution for an evolutionary computation.
[ "Return", "a", "candidate", "solution", "for", "an", "evolutionary", "computation", "." ]
python
train
44.4
chop-dbhi/varify-data-warehouse
vdw/samples/migrations/0008_force_migrate_default_cohort_and_project.py
https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/samples/migrations/0008_force_migrate_default_cohort_and_project.py#L11-L33
def forwards(self, orm): "Write your forwards methods here." Project = orm['samples.Project'] Cohort = orm['samples.Cohort'] now = datetime.datetime.now() # Create default project try: project = Project.objects.get(name=DEFAULT_PROJECT_NAME) except Project.DoesNotExist: project = Project(name=DEFAULT_PROJECT_NAME, label=DEFAULT_PROJECT_NAME, created=now, modified=now) project.save() # Create default cohort try: cohort = Cohort.objects.get(name=DEFAULT_COHORT_NAME) except Cohort.DoesNotExist: cohort = Cohort(name=DEFAULT_COHORT_NAME, published=True, autocreated=True, created=now, modified=now) cohort.save()
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "Project", "=", "orm", "[", "'samples.Project'", "]", "Cohort", "=", "orm", "[", "'samples.Cohort'", "]", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Create default project", "try"...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
python
train
33.869565
pandeylab/pythomics
pythomics/parsers/fasta.py
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/parsers/fasta.py#L90-L125
def get_sequence(self, chrom, start, end, strand='+', indexing=(-1, 0)): """ chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11', start/end are coordinates, which default to python style [0,1) internally. So positions should be entered with (1,1) indexing. This can be changed with the indexing keyword. The default is for everything to be relative to the positive strand """ try: divisor = int(self.sequence_index[chrom][2]) except KeyError: self.open_fasta_index() try: divisor = int(self.sequence_index[chrom][2]) except KeyError: sys.stderr.write("%s cannot be found within the fasta index file.\n" % chrom) return "" start+=indexing[0] end+=indexing[1] #is it a valid position? if ( start < 0 or end > int(self.sequence_index[chrom][0] )): raise ValueError("The range %d-%d is invalid. Valid range for this feature is 1-%d." % (start-indexing[0], end-indexing[1], int(self.sequence_index[chrom][0]))) #go to start of chromosome seekpos = int(self.sequence_index[chrom][1]) #find how many newlines we have seekpos += start+start/divisor slen = end-start endpos = int(slen + (slen/divisor) + 1) #a hack of sorts but it works and is easy self.fasta_file.seek(seekpos, 0) output = self.fasta_file.read(endpos) output = output.replace('\n', '') out = output[:slen] if strand == '+' or strand == 1: return out if strand == '-' or strand == -1: return _reverse_complement(out)
[ "def", "get_sequence", "(", "self", ",", "chrom", ",", "start", ",", "end", ",", "strand", "=", "'+'", ",", "indexing", "=", "(", "-", "1", ",", "0", ")", ")", ":", "try", ":", "divisor", "=", "int", "(", "self", ".", "sequence_index", "[", "chro...
chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11', start/end are coordinates, which default to python style [0,1) internally. So positions should be entered with (1,1) indexing. This can be changed with the indexing keyword. The default is for everything to be relative to the positive strand
[ "chromosome", "is", "entered", "relative", "to", "the", "file", "it", "was", "built", "with", "so", "it", "can", "be", "chr11", "or", "11", "start", "/", "end", "are", "coordinates", "which", "default", "to", "python", "style", "[", "0", "1", ")", "int...
python
train
50.111111
python-diamond/Diamond
src/collectors/snmpinterface/snmpinterface.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/snmpinterface/snmpinterface.py#L118-L215
def collect_snmp(self, device, host, port, community): """ Collect SNMP interface data from device """ # Log self.log.info("Collecting SNMP interface statistics from: %s", device) # Define a list of interface indexes ifIndexes = [] # Get Interface Indexes ifIndexOid = '.'.join([self.IF_MIB_INDEX_OID]) ifIndexData = self.walk(ifIndexOid, host, port, community) ifIndexes = [v for v in ifIndexData.values()] for ifIndex in ifIndexes: # Get Interface Type ifTypeOid = '.'.join([self.IF_MIB_TYPE_OID, ifIndex]) ifTypeData = self.get(ifTypeOid, host, port, community) if ifTypeData[ifTypeOid] not in self.IF_TYPES: # Skip Interface continue # Get Interface Name ifNameOid = '.'.join([self.IF_MIB_NAME_OID, ifIndex]) ifNameData = self.get(ifNameOid, host, port, community) ifName = ifNameData[ifNameOid] # Remove quotes from string ifName = re.sub(r'(\"|\')', '', ifName) # Get Gauges for gaugeName, gaugeOid in self.IF_MIB_GAUGE_OID_TABLE.items(): ifGaugeOid = '.'.join([self.IF_MIB_GAUGE_OID_TABLE[gaugeName], ifIndex]) ifGaugeData = self.get(ifGaugeOid, host, port, community) ifGaugeValue = ifGaugeData[ifGaugeOid] if not ifGaugeValue: continue # Get Metric Name and Value metricIfDescr = re.sub(r'\W', '_', ifName) metricName = '.'.join([metricIfDescr, gaugeName]) metricValue = int(ifGaugeValue) # Get Metric Path metricPath = '.'.join(['devices', device, self.config['path'], metricName]) # Publish Metric self.publish_gauge(metricPath, metricValue) # Get counters (64bit) counterItems = self.IF_MIB_COUNTER_OID_TABLE.items() for counterName, counterOid in counterItems: ifCounterOid = '.'.join( [self.IF_MIB_COUNTER_OID_TABLE[counterName], ifIndex]) ifCounterData = self.get(ifCounterOid, host, port, community) ifCounterValue = ifCounterData[ifCounterOid] if not ifCounterValue: continue # Get Metric Name and Value metricIfDescr = re.sub(r'\W', '_', ifName) if counterName in ['ifHCInOctets', 'ifHCOutOctets']: for unit in self.config['byte_unit']: # Convert Metric metricName = '.'.join([metricIfDescr, counterName.replace('Octets', unit)]) metricValue = diamond.convertor.binary.convert( value=ifCounterValue, oldUnit='byte', newUnit=unit) # Get Metric Path metricPath = '.'.join(['devices', device, self.config['path'], metricName]) # Publish Metric self.publish_counter(metricPath, metricValue, max_value=18446744073709600000, ) else: metricName = '.'.join([metricIfDescr, counterName]) metricValue = int(ifCounterValue) # Get Metric Path metricPath = '.'.join(['devices', device, self.config['path'], metricName]) # Publish Metric self.publish_counter(metricPath, metricValue, max_value=18446744073709600000, )
[ "def", "collect_snmp", "(", "self", ",", "device", ",", "host", ",", "port", ",", "community", ")", ":", "# Log", "self", ".", "log", ".", "info", "(", "\"Collecting SNMP interface statistics from: %s\"", ",", "device", ")", "# Define a list of interface indexes", ...
Collect SNMP interface data from device
[ "Collect", "SNMP", "interface", "data", "from", "device" ]
python
train
45.020408
pypa/pipenv
pipenv/patched/notpip/_vendor/ipaddress.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/ipaddress.py#L286-L303
def _find_address_range(addresses): """Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. """ it = iter(addresses) first = last = next(it) for ip in it: if ip._ip != last._ip + 1: yield first, last first = ip last = ip yield first, last
[ "def", "_find_address_range", "(", "addresses", ")", ":", "it", "=", "iter", "(", "addresses", ")", "first", "=", "last", "=", "next", "(", "it", ")", "for", "ip", "in", "it", ":", "if", "ip", ".", "_ip", "!=", "last", ".", "_ip", "+", "1", ":", ...
Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence.
[ "Find", "a", "sequence", "of", "sorted", "deduplicated", "IPv#Address", "." ]
python
train
24.166667
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/choi.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/choi.py#L134-L185
def compose(self, other, qargs=None, front=False): """Return the composition channel self∘other. Args: other (QuantumChannel): a quantum channel. qargs (list): a list of subsystem positions to compose other on. front (bool): If False compose in standard order other(self(input)) otherwise compose in reverse order self(other(input)) [default: False] Returns: Choi: The composition channel as a Choi object. Raises: QiskitError: if other cannot be converted to a channel or has incompatible dimensions. """ if qargs is not None: return Choi( SuperOp(self).compose(other, qargs=qargs, front=front)) # Convert to Choi matrix if not isinstance(other, Choi): other = Choi(other) # Check dimensions match up if front and self._input_dim != other._output_dim: raise QiskitError( 'input_dim of self must match output_dim of other') if not front and self._output_dim != other._input_dim: raise QiskitError( 'input_dim of other must match output_dim of self') if front: first = np.reshape(other._data, other._bipartite_shape) second = np.reshape(self._data, self._bipartite_shape) input_dim = other._input_dim input_dims = other.input_dims() output_dim = self._output_dim output_dims = self.output_dims() else: first = np.reshape(self._data, self._bipartite_shape) second = np.reshape(other._data, other._bipartite_shape) input_dim = self._input_dim input_dims = self.input_dims() output_dim = other._output_dim output_dims = other.output_dims() # Contract Choi matrices for composition data = np.reshape( np.einsum('iAjB,AkBl->ikjl', first, second), (input_dim * output_dim, input_dim * output_dim)) return Choi(data, input_dims, output_dims)
[ "def", "compose", "(", "self", ",", "other", ",", "qargs", "=", "None", ",", "front", "=", "False", ")", ":", "if", "qargs", "is", "not", "None", ":", "return", "Choi", "(", "SuperOp", "(", "self", ")", ".", "compose", "(", "other", ",", "qargs", ...
Return the composition channel self∘other. Args: other (QuantumChannel): a quantum channel. qargs (list): a list of subsystem positions to compose other on. front (bool): If False compose in standard order other(self(input)) otherwise compose in reverse order self(other(input)) [default: False] Returns: Choi: The composition channel as a Choi object. Raises: QiskitError: if other cannot be converted to a channel or has incompatible dimensions.
[ "Return", "the", "composition", "channel", "self∘other", "." ]
python
test
40.461538
grabbles/grabbit
grabbit/core.py
https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L845-L929
def get_nearest(self, path, return_type='file', strict=True, all_=False, ignore_strict_entities=None, full_search=False, **kwargs): ''' Walk up the file tree from the specified path and return the nearest matching file(s). Args: path (str): The file to search from. return_type (str): What to return; must be one of 'file' (default) or 'tuple'. strict (bool): When True, all entities present in both the input path and the target file(s) must match perfectly. When False, files will be ordered by the number of matching entities, and partial matches will be allowed. all_ (bool): When True, returns all matching files. When False (default), only returns the first match. ignore_strict_entities (list): Optional list of entities to exclude from strict matching when strict is True. This allows one to search, e.g., for files of a different type while matching all other entities perfectly by passing ignore_strict_entities=['type']. full_search (bool): If True, searches all indexed files, even if they don't share a common root with the provided path. If False, only files that share a common root will be scanned. kwargs: Optional keywords to pass on to .get(). ''' entities = {} for ent in self.entities.values(): m = ent.regex.search(path) if m: entities[ent.name] = ent._astype(m.group(1)) # Remove any entities we want to ignore when strict matching is on if strict and ignore_strict_entities is not None: for k in ignore_strict_entities: entities.pop(k, None) results = self.get(return_type='file', **kwargs) folders = defaultdict(list) for filename in results: f = self.get_file(filename) folders[f.dirname].append(f) def count_matches(f): f_ents = f.entities keys = set(entities.keys()) & set(f_ents.keys()) shared = len(keys) return [shared, sum([entities[k] == f_ents[k] for k in keys])] matches = [] search_paths = [] while True: if path in folders and folders[path]: search_paths.append(path) parent = dirname(path) if parent == path: break path = parent if full_search: unchecked = set(folders.keys()) - set(search_paths) search_paths.extend(path for path in unchecked if folders[path]) for path in search_paths: # Sort by number of matching entities. Also store number of # common entities, for filtering when strict=True. num_ents = [[f] + count_matches(f) for f in folders[path]] # Filter out imperfect matches (i.e., where number of common # entities does not equal number of matching entities). if strict: num_ents = [f for f in num_ents if f[1] == f[2]] num_ents.sort(key=lambda x: x[2], reverse=True) if num_ents: matches.append(num_ents[0][0]) if not all_: break matches = [m.path if return_type == 'file' else m.as_named_tuple() for m in matches] return matches if all_ else matches[0] if matches else None
[ "def", "get_nearest", "(", "self", ",", "path", ",", "return_type", "=", "'file'", ",", "strict", "=", "True", ",", "all_", "=", "False", ",", "ignore_strict_entities", "=", "None", ",", "full_search", "=", "False", ",", "*", "*", "kwargs", ")", ":", "...
Walk up the file tree from the specified path and return the nearest matching file(s). Args: path (str): The file to search from. return_type (str): What to return; must be one of 'file' (default) or 'tuple'. strict (bool): When True, all entities present in both the input path and the target file(s) must match perfectly. When False, files will be ordered by the number of matching entities, and partial matches will be allowed. all_ (bool): When True, returns all matching files. When False (default), only returns the first match. ignore_strict_entities (list): Optional list of entities to exclude from strict matching when strict is True. This allows one to search, e.g., for files of a different type while matching all other entities perfectly by passing ignore_strict_entities=['type']. full_search (bool): If True, searches all indexed files, even if they don't share a common root with the provided path. If False, only files that share a common root will be scanned. kwargs: Optional keywords to pass on to .get().
[ "Walk", "up", "the", "file", "tree", "from", "the", "specified", "path", "and", "return", "the", "nearest", "matching", "file", "(", "s", ")", "." ]
python
train
41.282353
craigahobbs/chisel
src/chisel/app.py
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L200-L210
def response(self, status, content_type, content, headers=None): """ Send an HTTP response """ assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes' response_headers = [('Content-Type', content_type)] if headers: response_headers.extend(headers) self.start_response(status, response_headers) return content
[ "def", "response", "(", "self", ",", "status", ",", "content_type", ",", "content", ",", "headers", "=", "None", ")", ":", "assert", "not", "isinstance", "(", "content", ",", "(", "str", ",", "bytes", ")", ")", ",", "'response content cannot be of type str o...
Send an HTTP response
[ "Send", "an", "HTTP", "response" ]
python
train
37.727273
Jarn/jarn.mkrelease
jarn/mkrelease/mkrelease.py
https://github.com/Jarn/jarn.mkrelease/blob/844377f37a3cdc0a154148790a926f991019ec4a/jarn/mkrelease/mkrelease.py#L250-L277
def set_defaults(self, config_file): """Set defaults. """ self.defaults = Defaults(config_file) self.locations = Locations(self.defaults) self.python = Python() self.setuptools = Setuptools() self.scp = SCP() self.scms = SCMFactory() self.urlparser = URLParser() self.skipcommit = not self.defaults.commit self.skiptag = not self.defaults.tag self.skipregister = False # per server self.skipupload = False # special self.push = self.defaults.push self.develop = False # special self.quiet = self.defaults.quiet self.sign = False # per server self.list = False self.manifest = self.defaults.manifest self.identity = '' # per server self.branch = '' self.scmtype = '' self.infoflags = [] self.formats = [] self.distributions = [] self.directory = os.curdir self.scm = None
[ "def", "set_defaults", "(", "self", ",", "config_file", ")", ":", "self", ".", "defaults", "=", "Defaults", "(", "config_file", ")", "self", ".", "locations", "=", "Locations", "(", "self", ".", "defaults", ")", "self", ".", "python", "=", "Python", "(",...
Set defaults.
[ "Set", "defaults", "." ]
python
train
35.392857
dsoprea/PySecure
pysecure/sftp_mirror.py
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/sftp_mirror.py#L26-L42
def mirror(self, handler, path_from, path_to, log_files=False): """Recursively mirror the contents of "path_from" into "path_to". "handler" should be self.mirror_to_local_no_recursion or self.mirror_to_remote_no_recursion to represent which way the files are moving. """ q = deque(['']) while q: path = q.popleft() full_from = ('%s/%s' % (path_from, path)) if path else path_from full_to = ('%s/%s' % (path_to, path)) if path else path_to subdirs = handler(full_from, full_to, log_files) for subdir in subdirs: q.append(('%s/%s' % (path, subdir)) if path else subdir)
[ "def", "mirror", "(", "self", ",", "handler", ",", "path_from", ",", "path_to", ",", "log_files", "=", "False", ")", ":", "q", "=", "deque", "(", "[", "''", "]", ")", "while", "q", ":", "path", "=", "q", ".", "popleft", "(", ")", "full_from", "="...
Recursively mirror the contents of "path_from" into "path_to". "handler" should be self.mirror_to_local_no_recursion or self.mirror_to_remote_no_recursion to represent which way the files are moving.
[ "Recursively", "mirror", "the", "contents", "of", "path_from", "into", "path_to", ".", "handler", "should", "be", "self", ".", "mirror_to_local_no_recursion", "or", "self", ".", "mirror_to_remote_no_recursion", "to", "represent", "which", "way", "the", "files", "are...
python
train
41.411765
pazz/alot
alot/ui.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/ui.py#L448-L471
def buffer_focus(self, buf, redraw=True): """focus given :class:`~alot.buffers.Buffer`.""" # call pre_buffer_focus hook prehook = settings.get_hook('pre_buffer_focus') if prehook is not None: prehook(ui=self, dbm=self.dbman, buf=buf) success = False if buf not in self.buffers: logging.error('tried to focus unknown buffer') else: if self.current_buffer != buf: self.current_buffer = buf self.mode = buf.modename if isinstance(self.current_buffer, BufferlistBuffer): self.current_buffer.rebuild() self.update() success = True # call post_buffer_focus hook posthook = settings.get_hook('post_buffer_focus') if posthook is not None: posthook(ui=self, dbm=self.dbman, buf=buf, success=success)
[ "def", "buffer_focus", "(", "self", ",", "buf", ",", "redraw", "=", "True", ")", ":", "# call pre_buffer_focus hook", "prehook", "=", "settings", ".", "get_hook", "(", "'pre_buffer_focus'", ")", "if", "prehook", "is", "not", "None", ":", "prehook", "(", "ui"...
focus given :class:`~alot.buffers.Buffer`.
[ "focus", "given", ":", "class", ":", "~alot", ".", "buffers", ".", "Buffer", "." ]
python
train
36.583333
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1187-L1200
def prj_view_atype(self, *args, **kwargs): """View the, in the atype table view selected, assettype. :returns: None :rtype: None :raises: None """ if not self.cur_prj: return i = self.prj_atype_tablev.currentIndex() item = i.internalPointer() if item: atype = item.internal_data() self.view_atype(atype)
[ "def", "prj_view_atype", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_prj", ":", "return", "i", "=", "self", ".", "prj_atype_tablev", ".", "currentIndex", "(", ")", "item", "=", "i", ".", "internalP...
View the, in the atype table view selected, assettype. :returns: None :rtype: None :raises: None
[ "View", "the", "in", "the", "atype", "table", "view", "selected", "assettype", "." ]
python
train
28.5
erdc/RAPIDpy
RAPIDpy/postprocess/merge.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/postprocess/merge.py#L163-L198
def _validate_raw_nc(self): """Checks that raw netCDF file has the right dimensions and variables. Returns ------- int: Length of rivid dimension. int: Length of time dimension. Remarks: Raises exception if file doesn't validate. """ self.raw_nc_list = [] # add one for the first flow value RAPID # does not include total_time_len = 1 id_len_list = [] for rapid_output_file in self.rapid_output_file_list: qout_nc = RAPIDDataset(rapid_output_file) id_len_list.append(qout_nc.size_river_id) total_time_len += qout_nc.size_time self.raw_nc_list.append(qout_nc) # make sure river id lists are the same for id_len_undex in range(1, len(id_len_list)): if id_len_list[id_len_undex] != id_len_list[0]: raise Exception("River ID size is different in " "one of the files ...") for raw_nc_index in range(1, len(self.raw_nc_list)): if not (self.raw_nc_list[raw_nc_index].get_river_id_array() == self.raw_nc_list[0].get_river_id_array()).all(): raise Exception("River IDs are different in " "files ...") return id_len_list[0], total_time_len
[ "def", "_validate_raw_nc", "(", "self", ")", ":", "self", ".", "raw_nc_list", "=", "[", "]", "# add one for the first flow value RAPID\r", "# does not include\r", "total_time_len", "=", "1", "id_len_list", "=", "[", "]", "for", "rapid_output_file", "in", "self", "."...
Checks that raw netCDF file has the right dimensions and variables. Returns ------- int: Length of rivid dimension. int: Length of time dimension. Remarks: Raises exception if file doesn't validate.
[ "Checks", "that", "raw", "netCDF", "file", "has", "the", "right", "dimensions", "and", "variables", ".", "Returns", "-------", "int", ":", "Length", "of", "rivid", "dimension", ".", "int", ":", "Length", "of", "time", "dimension", ".", "Remarks", ":", "Rai...
python
train
38.388889
bspaans/python-mingus
mingus/extra/tunings.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tunings.py#L417-L449
def get_tunings(instrument=None, nr_of_strings=None, nr_of_courses=None): """Search tunings on instrument, strings, courses or a combination. The instrument is actually treated like a case-insensitive prefix. So asking for 'bass' yields the same tunings as 'Bass Guitar'; the string 'ba' yields all the instruments starting with 'ba'. Example: >>> tunings.get_tunings(nr_of_string = 4) >>> tunings.get_tunings('bass') """ search = '' if instrument is not None: search = str.upper(instrument) result = [] keys = _known.keys() inkeys = search in keys for x in keys: if (instrument is None or not inkeys and x.find(search) == 0 or inkeys and search == x): if nr_of_strings is None and nr_of_courses is None: result += _known[x][1].values() elif nr_of_strings is not None and nr_of_courses is None: result += [y for y in _known[x][1].itervalues() if y.count_strings() == nr_of_strings] elif nr_of_strings is None and nr_of_courses is not None: result += [y for y in _known[x][1].itervalues() if y.count_courses() == nr_of_courses] else: result += [y for y in _known[x][1].itervalues() if y.count_strings() == nr_of_strings and y.count_courses() == nr_of_courses] return result
[ "def", "get_tunings", "(", "instrument", "=", "None", ",", "nr_of_strings", "=", "None", ",", "nr_of_courses", "=", "None", ")", ":", "search", "=", "''", "if", "instrument", "is", "not", "None", ":", "search", "=", "str", ".", "upper", "(", "instrument"...
Search tunings on instrument, strings, courses or a combination. The instrument is actually treated like a case-insensitive prefix. So asking for 'bass' yields the same tunings as 'Bass Guitar'; the string 'ba' yields all the instruments starting with 'ba'. Example: >>> tunings.get_tunings(nr_of_string = 4) >>> tunings.get_tunings('bass')
[ "Search", "tunings", "on", "instrument", "strings", "courses", "or", "a", "combination", "." ]
python
train
44.090909
bxlab/bx-python
lib/bx_extras/stats.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1285-L1342
def lchisqprob(chisq,df): """ Returns the (1-tailed) probability value associated with the provided chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat. Usage: lchisqprob(chisq,df) """ BIG = 20.0 def ex(x): BIG = 20.0 if x < -BIG: return 0.0 else: return math.exp(x) if chisq <=0 or df < 1: return 1.0 a = 0.5 * chisq if df%2 == 0: even = 1 else: even = 0 if df > 1: y = ex(-a) if even: s = y else: s = 2.0 * zprob(-math.sqrt(chisq)) if (df > 2): chisq = 0.5 * (df - 1.0) if even: z = 1.0 else: z = 0.5 if a > BIG: if even: e = 0.0 else: e = math.log(math.sqrt(math.pi)) c = math.log(a) while (z <= chisq): e = math.log(z) + e s = s + ex(c*z-a-e) z = z + 1.0 return s else: if even: e = 1.0 else: e = 1.0 / math.sqrt(math.pi) / math.sqrt(a) c = 0.0 while (z <= chisq): e = e * (a/float(z)) c = c + e z = z + 1.0 return (c*y+s) else: return s
[ "def", "lchisqprob", "(", "chisq", ",", "df", ")", ":", "BIG", "=", "20.0", "def", "ex", "(", "x", ")", ":", "BIG", "=", "20.0", "if", "x", "<", "-", "BIG", ":", "return", "0.0", "else", ":", "return", "math", ".", "exp", "(", "x", ")", "if",...
Returns the (1-tailed) probability value associated with the provided chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat. Usage: lchisqprob(chisq,df)
[ "Returns", "the", "(", "1", "-", "tailed", ")", "probability", "value", "associated", "with", "the", "provided", "chi", "-", "square", "value", "and", "df", ".", "Adapted", "from", "chisq", ".", "c", "in", "Gary", "Perlman", "s", "|Stat", "." ]
python
train
22.465517
Neurita/boyle
boyle/utils/strings.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L209-L238
def where_is(strings, pattern, n=1, lookup_func=re.match): """Return index of the nth match found of pattern in strings Parameters ---------- strings: list of str List of strings pattern: str Pattern to be matched nth: int Number of times the match must happen to return the item index. lookup_func: callable Function to match each item in strings to the pattern, e.g., re.match or re.search. Returns ------- index: int Index of the nth item that matches the pattern. If there are no n matches will return -1 """ count = 0 for idx, item in enumerate(strings): if lookup_func(pattern, item): count += 1 if count == n: return idx return -1
[ "def", "where_is", "(", "strings", ",", "pattern", ",", "n", "=", "1", ",", "lookup_func", "=", "re", ".", "match", ")", ":", "count", "=", "0", "for", "idx", ",", "item", "in", "enumerate", "(", "strings", ")", ":", "if", "lookup_func", "(", "patt...
Return index of the nth match found of pattern in strings Parameters ---------- strings: list of str List of strings pattern: str Pattern to be matched nth: int Number of times the match must happen to return the item index. lookup_func: callable Function to match each item in strings to the pattern, e.g., re.match or re.search. Returns ------- index: int Index of the nth item that matches the pattern. If there are no n matches will return -1
[ "Return", "index", "of", "the", "nth", "match", "found", "of", "pattern", "in", "strings" ]
python
valid
25.5
quodlibet/mutagen
mutagen/musepack.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/musepack.py#L36-L57
def _parse_sv8_int(fileobj, limit=9): """Reads (max limit) bytes from fileobj until the MSB is zero. All 7 LSB will be merged to a big endian uint. Raises ValueError in case not MSB is zero, or EOFError in case the file ended before limit is reached. Returns (parsed number, number of bytes read) """ num = 0 for i in xrange(limit): c = fileobj.read(1) if len(c) != 1: raise EOFError c = bytearray(c) num = (num << 7) | (c[0] & 0x7F) if not c[0] & 0x80: return num, i + 1 if limit > 0: raise ValueError return 0, 0
[ "def", "_parse_sv8_int", "(", "fileobj", ",", "limit", "=", "9", ")", ":", "num", "=", "0", "for", "i", "in", "xrange", "(", "limit", ")", ":", "c", "=", "fileobj", ".", "read", "(", "1", ")", "if", "len", "(", "c", ")", "!=", "1", ":", "rais...
Reads (max limit) bytes from fileobj until the MSB is zero. All 7 LSB will be merged to a big endian uint. Raises ValueError in case not MSB is zero, or EOFError in case the file ended before limit is reached. Returns (parsed number, number of bytes read)
[ "Reads", "(", "max", "limit", ")", "bytes", "from", "fileobj", "until", "the", "MSB", "is", "zero", ".", "All", "7", "LSB", "will", "be", "merged", "to", "a", "big", "endian", "uint", "." ]
python
train
27.636364
obilaniu/Nauka
src/nauka/exp/experiment.py
https://github.com/obilaniu/Nauka/blob/1492a4f9d204a868c1a8a1d327bd108490b856b4/src/nauka/exp/experiment.py#L220-L231
def strategyKLogN(kls, n, k=4): """Return the directory names to preserve under the KLogN purge strategy.""" assert(k>1) s = set([n]) i = 0 while k**i <= n: s.update(range(n, n-k*k**i, -k**i)) i += 1 n -= n % k**i return set(map(str, filter(lambda x:x>=0, s)))
[ "def", "strategyKLogN", "(", "kls", ",", "n", ",", "k", "=", "4", ")", ":", "assert", "(", "k", ">", "1", ")", "s", "=", "set", "(", "[", "n", "]", ")", "i", "=", "0", "while", "k", "**", "i", "<=", "n", ":", "s", ".", "update", "(", "r...
Return the directory names to preserve under the KLogN purge strategy.
[ "Return", "the", "directory", "names", "to", "preserve", "under", "the", "KLogN", "purge", "strategy", "." ]
python
train
23
Nachtfeuer/pipeline
spline/components/tasks.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L187-L202
def process_shells_ordered(self, shells): """Processing a list of shells one after the other.""" output = [] for shell in shells: entry = shell['entry'] config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '', model=shell['model'], env=shell['env'], item=shell['item'], dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'], variables=shell['variables'], temporary_scripts_path=shell['temporary_scripts_path']) result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config)) output += result.output self.__handle_variable(entry, result.output) if not result.success: return {'success': False, 'output': output} return {'success': True, 'output': output}
[ "def", "process_shells_ordered", "(", "self", ",", "shells", ")", ":", "output", "=", "[", "]", "for", "shell", "in", "shells", ":", "entry", "=", "shell", "[", "'entry'", "]", "config", "=", "ShellConfig", "(", "script", "=", "entry", "[", "'script'", ...
Processing a list of shells one after the other.
[ "Processing", "a", "list", "of", "shells", "one", "after", "the", "other", "." ]
python
train
60.75
projectatomic/osbs-client
osbs/build/plugins_configuration.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L149-L173
def adjust_for_scratch(self): """ Remove certain plugins in order to handle the "scratch build" scenario. Scratch builds must not affect subsequent builds, and should not be imported into Koji. """ if self.user_params.scratch.value: remove_plugins = [ ("prebuild_plugins", "koji_parent"), ("postbuild_plugins", "compress"), # required only to make an archive for Koji ("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji ("postbuild_plugins", "compare_components"), ("postbuild_plugins", "import_image"), ("exit_plugins", "koji_promote"), ("exit_plugins", "koji_tag_build"), ("exit_plugins", "import_image"), ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled") ] if not self.has_tag_suffixes_placeholder(): remove_plugins.append(("postbuild_plugins", "tag_from_config")) for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'removed from scratch build request')
[ "def", "adjust_for_scratch", "(", "self", ")", ":", "if", "self", ".", "user_params", ".", "scratch", ".", "value", ":", "remove_plugins", "=", "[", "(", "\"prebuild_plugins\"", ",", "\"koji_parent\"", ")", ",", "(", "\"postbuild_plugins\"", ",", "\"compress\"",...
Remove certain plugins in order to handle the "scratch build" scenario. Scratch builds must not affect subsequent builds, and should not be imported into Koji.
[ "Remove", "certain", "plugins", "in", "order", "to", "handle", "the", "scratch", "build", "scenario", ".", "Scratch", "builds", "must", "not", "affect", "subsequent", "builds", "and", "should", "not", "be", "imported", "into", "Koji", "." ]
python
train
48.84
cognitect/transit-python
transit/writer.py
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L229-L236
def dispatch_map(self, rep, as_map_key, cache): """Used to determine and dipatch the writing of a map - a simple map with strings as keys, or a complex map, whose keys are also compound types. """ if self.are_stringable_keys(rep): return self.emit_map(rep, as_map_key, cache) return self.emit_cmap(rep, as_map_key, cache)
[ "def", "dispatch_map", "(", "self", ",", "rep", ",", "as_map_key", ",", "cache", ")", ":", "if", "self", ".", "are_stringable_keys", "(", "rep", ")", ":", "return", "self", ".", "emit_map", "(", "rep", ",", "as_map_key", ",", "cache", ")", "return", "s...
Used to determine and dipatch the writing of a map - a simple map with strings as keys, or a complex map, whose keys are also compound types.
[ "Used", "to", "determine", "and", "dipatch", "the", "writing", "of", "a", "map", "-", "a", "simple", "map", "with", "strings", "as", "keys", "or", "a", "complex", "map", "whose", "keys", "are", "also", "compound", "types", "." ]
python
train
46.75
ktdreyer/txkoji
txkoji/cache.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/cache.py#L63-L72
def filename(self, type_, id_): """ cache filename to read for this type/id. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :returns: str """ profile = self.connection.profile return os.path.join(self.directory, profile, type_, str(id_))
[ "def", "filename", "(", "self", ",", "type_", ",", "id_", ")", ":", "profile", "=", "self", ".", "connection", ".", "profile", "return", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "profile", ",", "type_", ",", "str", "(", "...
cache filename to read for this type/id. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :returns: str
[ "cache", "filename", "to", "read", "for", "this", "type", "/", "id", "." ]
python
train
30.9
horazont/aioxmpp
aioxmpp/muc/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/muc/service.py#L1470-L1498
def send_message(self, msg): """ Send a message to the MUC. :param msg: The message to send. :type msg: :class:`aioxmpp.Message` :return: The stanza token of the message. :rtype: :class:`~aioxmpp.stream.StanzaToken` There is no need to set the address attributes or the type of the message correctly; those will be overridden by this method to conform to the requirements of a message to the MUC. Other attributes are left untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and can be used as desired for the message. .. seealso:: :meth:`.AbstractConversation.send_message` for the full interface specification. """ msg.type_ = aioxmpp.MessageType.GROUPCHAT msg.to = self._mucjid # see https://mail.jabber.org/pipermail/standards/2017-January/032048.html # NOQA # for a full discussion on the rationale for this. # TL;DR: we want to help entities to discover that a message is related # to a MUC. msg.xep0045_muc_user = muc_xso.UserExt() result = self.service.client.enqueue(msg) return result
[ "def", "send_message", "(", "self", ",", "msg", ")", ":", "msg", ".", "type_", "=", "aioxmpp", ".", "MessageType", ".", "GROUPCHAT", "msg", ".", "to", "=", "self", ".", "_mucjid", "# see https://mail.jabber.org/pipermail/standards/2017-January/032048.html # NOQA", ...
Send a message to the MUC. :param msg: The message to send. :type msg: :class:`aioxmpp.Message` :return: The stanza token of the message. :rtype: :class:`~aioxmpp.stream.StanzaToken` There is no need to set the address attributes or the type of the message correctly; those will be overridden by this method to conform to the requirements of a message to the MUC. Other attributes are left untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and can be used as desired for the message. .. seealso:: :meth:`.AbstractConversation.send_message` for the full interface specification.
[ "Send", "a", "message", "to", "the", "MUC", "." ]
python
train
40.862069
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L79-L96
def create_derivative(self, word): ''' Creates derivative of (base) word by adding any affixes that apply ''' result = None if self.char_to_strip != '': if self.opt == "PFX": result = word[len(self.char_to_strip):len(word)] result = self.affix + result else: # SFX result = word[0:len(word) - len(self.char_to_strip)] result = result + self.affix else: # No characters to strip if self.opt == "PFX": result = self.affix + word else: # SFX result = word + self.affix # None means word does not meet the set condition return result
[ "def", "create_derivative", "(", "self", ",", "word", ")", ":", "result", "=", "None", "if", "self", ".", "char_to_strip", "!=", "''", ":", "if", "self", ".", "opt", "==", "\"PFX\"", ":", "result", "=", "word", "[", "len", "(", "self", ".", "char_to_...
Creates derivative of (base) word by adding any affixes that apply
[ "Creates", "derivative", "of", "(", "base", ")", "word", "by", "adding", "any", "affixes", "that", "apply" ]
python
train
39.222222
ph4r05/monero-serialize
monero_serialize/core/message_types.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/core/message_types.py#L153-L164
def container_elem_type(container_type, params): """ Returns container element type :param container_type: :param params: :return: """ elem_type = params[0] if params else None if elem_type is None: elem_type = container_type.ELEM_TYPE return elem_type
[ "def", "container_elem_type", "(", "container_type", ",", "params", ")", ":", "elem_type", "=", "params", "[", "0", "]", "if", "params", "else", "None", "if", "elem_type", "is", "None", ":", "elem_type", "=", "container_type", ".", "ELEM_TYPE", "return", "el...
Returns container element type :param container_type: :param params: :return:
[ "Returns", "container", "element", "type" ]
python
train
23.833333
coin-or/GiMPy
src/gimpy/tree.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/tree.py#L84-L104
def add_child(self, n, parent, **attrs): ''' API: add_child(self, n, parent, **attrs) Description: Adds child n to node parent and return Node n. Pre: Node with name parent should exist. Input: n: Child node name. parent: Parent node name. attrs: Attributes of node being added. Post: Updates Graph related graph data attributes. Return: Returns n Node instance. ''' attrs['level'] = self.get_node(parent).get_attr('level') + 1 attrs['parent'] = parent self.add_node(n, **attrs) self.add_edge(parent, n) return self.get_node(n)
[ "def", "add_child", "(", "self", ",", "n", ",", "parent", ",", "*", "*", "attrs", ")", ":", "attrs", "[", "'level'", "]", "=", "self", ".", "get_node", "(", "parent", ")", ".", "get_attr", "(", "'level'", ")", "+", "1", "attrs", "[", "'parent'", ...
API: add_child(self, n, parent, **attrs) Description: Adds child n to node parent and return Node n. Pre: Node with name parent should exist. Input: n: Child node name. parent: Parent node name. attrs: Attributes of node being added. Post: Updates Graph related graph data attributes. Return: Returns n Node instance.
[ "API", ":", "add_child", "(", "self", "n", "parent", "**", "attrs", ")", "Description", ":", "Adds", "child", "n", "to", "node", "parent", "and", "return", "Node", "n", ".", "Pre", ":", "Node", "with", "name", "parent", "should", "exist", ".", "Input",...
python
train
33.095238
pydata/xarray
xarray/core/common.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/common.py#L117-L134
def get_axis_num(self, dim: Union[Hashable, Iterable[Hashable]] ) -> Union[int, Tuple[int, ...]]: """Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions. """ if isinstance(dim, Iterable) and not isinstance(dim, str): return tuple(self._get_axis_num(d) for d in dim) else: return self._get_axis_num(dim)
[ "def", "get_axis_num", "(", "self", ",", "dim", ":", "Union", "[", "Hashable", ",", "Iterable", "[", "Hashable", "]", "]", ")", "->", "Union", "[", "int", ",", "Tuple", "[", "int", ",", "...", "]", "]", ":", "if", "isinstance", "(", "dim", ",", "...
Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions.
[ "Return", "axis", "number", "(", "s", ")", "corresponding", "to", "dimension", "(", "s", ")", "in", "this", "array", "." ]
python
train
35.722222
bskinn/opan
opan/utils/base.py
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L250-L424
def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL): """ Check for consistency of two geometries and atom symbol lists Cartesian coordinates are considered consistent with the input coords if each component matches to within `tol`. If coords or atoms vectors are passed that are of mismatched lengths, a |False| value is returned. Both coords vectors must be three times the length of the atoms vectors or a :exc:`~exceptions.ValueError` is raised. Parameters ---------- c1 length-3N |npfloat_| -- Vector of first set of stacked 'lab-frame' Cartesian coordinates a1 length-N |str| or |int| -- Vector of first set of atom symbols or atomic numbers c2 length-3N |npfloat_| -- Vector of second set of stacked 'lab-frame' Cartesian coordinates a2 length-N |str| or |int| -- Vector of second set of atom symbols or atomic numbers tol |float|, optional -- Tolerance for acceptable deviation of each geometry coordinate from that in the reference instance to still be considered matching. Default value is specified by :attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`) Returns ------- match |bool| -- Whether input coords and atoms match (|True|) or not (|False|) fail_type :class:`~opan.const.EnumCheckGeomMismatch` or |None| -- Type of check failure If `match` == |True|: Returns as |None| If `match` == |False|: An :class:`~opan.const.EnumCheckGeomMismatch` value indicating the reason for the failed match: :attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION` -- Mismatch in geometry size (number of atoms) :attr:`~opan.const.EnumCheckGeomMismatch.COORDS` -- Mismatch in one or more coordinates :attr:`~opan.const.EnumCheckGeomMismatch.ATOMS` -- Mismatch in one or more atoms fail_loc length-3N |bool| or length-N |bool| or |None| -- Mismatched elements If `match` == |True|: Returns as |None| If `match` == |False|: For "array-level" problems such as a dimension mismatch, a |None| value is returned. For "element-level" problems, a vector is returned indicating positions of mismatch in either `coords` or `atoms`, depending on the value of `fail_type`. |True| elements indicate **MATCHING** values |False| elements mark **MISMATCHES** Raises ------ ~exceptions.ValueError If a pair of coords & atoms array lengths is inconsistent: .. code-block:: python if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2): raise ValueError(...) """ # Import(s) from ..const import atom_num import numpy as np from ..const import EnumCheckGeomMismatch as ECGM # Initialize return value to success condition match = True #** Check coords for suitable shape. Assume 1-D np.arrays. if not len(c1.shape) == 1: # Cannot coerce to vector; complain. raise ValueError(("'c1' is not a vector.")) ## end if if not len(c2.shape) == 1: # Cannot coerce to vector; complain. raise ValueError(("'c2' is not a vector.")) ## end if #** Check atoms for suitable shape. Assume lists of strings, so # convert to np.array to check. if not len(a1.shape) == 1: # Not a vector; complain raise ValueError(("'a1' is not a simple list.")) ## end if if not len(a2.shape) == 1: # Not a vector; complain. raise ValueError(("'a2' is not a simple list.")) ## end if #** Confirm proper lengths of coords vs atoms if not c1.shape[0] == 3 * a1.shape[0]: raise ValueError("len(c1) != 3*len(a1)") ## end if if not c2.shape[0] == 3 * a2.shape[0]: raise ValueError("len(c2) != 3*len(a2)") ## end if #** Confirm matching lengths of coords and atoms w/corresponding # objects among the two geometries if not c1.shape[0] == c2.shape[0]: match = False fail_type = ECGM.DIMENSION return match, fail_type, None ## end if #** Element-wise check for geometry match to within 'tol' fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol) if sum(fail_loc) != c2.shape[0]: # Count of matching coordinates should equal the number of # coordinates. If not, complain with 'coord_mismatch' fail type. match = False fail_type = ECGM.COORDS return match, fail_type, fail_loc ## end if #** Element-wise check for atoms match. Quietly convert both input and # instance atom arrays to atom_nums to allow np.equals comparison. if np.issubdtype(a1.dtype, np.dtype('str')): # Presume atomic symbol data and attempt conversion a1 = np.array([atom_num[e] for e in a1]) ## end if if np.issubdtype(a2.dtype, np.dtype('str')): # Presume atomic symbol data and attempt conversion a2 = np.array([atom_num[e] for e in a2]) ## end if fail_loc = np.equal(a1, a2) #** Perform the test to ensure all atoms match. if sum(fail_loc) != a2.shape[0]: # Count of matching atoms should equal number of atoms. If not, # complain with the 'atom_mismatch' fail type. match = False fail_type = ECGM.ATOMS return match, fail_type, fail_loc #** If reached here, all tests passed; return success. return match, None, None
[ "def", "check_geom", "(", "c1", ",", "a1", ",", "c2", ",", "a2", ",", "tol", "=", "_DEF", ".", "XYZ_COORD_MATCH_TOL", ")", ":", "# Import(s)", "from", ".", ".", "const", "import", "atom_num", "import", "numpy", "as", "np", "from", ".", ".", "const", ...
Check for consistency of two geometries and atom symbol lists Cartesian coordinates are considered consistent with the input coords if each component matches to within `tol`. If coords or atoms vectors are passed that are of mismatched lengths, a |False| value is returned. Both coords vectors must be three times the length of the atoms vectors or a :exc:`~exceptions.ValueError` is raised. Parameters ---------- c1 length-3N |npfloat_| -- Vector of first set of stacked 'lab-frame' Cartesian coordinates a1 length-N |str| or |int| -- Vector of first set of atom symbols or atomic numbers c2 length-3N |npfloat_| -- Vector of second set of stacked 'lab-frame' Cartesian coordinates a2 length-N |str| or |int| -- Vector of second set of atom symbols or atomic numbers tol |float|, optional -- Tolerance for acceptable deviation of each geometry coordinate from that in the reference instance to still be considered matching. Default value is specified by :attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`) Returns ------- match |bool| -- Whether input coords and atoms match (|True|) or not (|False|) fail_type :class:`~opan.const.EnumCheckGeomMismatch` or |None| -- Type of check failure If `match` == |True|: Returns as |None| If `match` == |False|: An :class:`~opan.const.EnumCheckGeomMismatch` value indicating the reason for the failed match: :attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION` -- Mismatch in geometry size (number of atoms) :attr:`~opan.const.EnumCheckGeomMismatch.COORDS` -- Mismatch in one or more coordinates :attr:`~opan.const.EnumCheckGeomMismatch.ATOMS` -- Mismatch in one or more atoms fail_loc length-3N |bool| or length-N |bool| or |None| -- Mismatched elements If `match` == |True|: Returns as |None| If `match` == |False|: For "array-level" problems such as a dimension mismatch, a |None| value is returned. For "element-level" problems, a vector is returned indicating positions of mismatch in either `coords` or `atoms`, depending on the value of `fail_type`. |True| elements indicate **MATCHING** values |False| elements mark **MISMATCHES** Raises ------ ~exceptions.ValueError If a pair of coords & atoms array lengths is inconsistent: .. code-block:: python if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2): raise ValueError(...)
[ "Check", "for", "consistency", "of", "two", "geometries", "and", "atom", "symbol", "lists" ]
python
train
31.617143
reingart/gui2py
gui/menu.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/menu.py#L323-L328
def find(self, item_id=None): "Recursively find a menu item by its id (useful for event handlers)" for it in self: found = it.find(item_id) if found: return found
[ "def", "find", "(", "self", ",", "item_id", "=", "None", ")", ":", "for", "it", "in", "self", ":", "found", "=", "it", ".", "find", "(", "item_id", ")", "if", "found", ":", "return", "found" ]
Recursively find a menu item by its id (useful for event handlers)
[ "Recursively", "find", "a", "menu", "item", "by", "its", "id", "(", "useful", "for", "event", "handlers", ")" ]
python
test
36.333333
benley/butcher
butcher/targets/__init__.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/__init__.py#L28-L44
def new(ruletype, **kwargs): """Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...] """ try: ruleclass = TYPE_MAP[ruletype] except KeyError: raise error.InvalidRule('Unrecognized rule type: %s' % ruletype) try: return ruleclass(**kwargs) except TypeError: log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs) raise
[ "def", "new", "(", "ruletype", ",", "*", "*", "kwargs", ")", ":", "try", ":", "ruleclass", "=", "TYPE_MAP", "[", "ruletype", "]", "except", "KeyError", ":", "raise", "error", ".", "InvalidRule", "(", "'Unrecognized rule type: %s'", "%", "ruletype", ")", "t...
Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...]
[ "Instantiate", "a", "new", "build", "rule", "based", "on", "kwargs", "." ]
python
train
27.823529
xenadevel/PyXenaManager
xenamanager/xena_app.py
https://github.com/xenadevel/PyXenaManager/blob/384ca265f73044b8a8b471f5dd7a6103fc54f4df/xenamanager/xena_app.py#L106-L122
def reserve_ports(self, locations, force=False, reset=True): """ Reserve ports and reset factory defaults. XenaManager-2G -> Reserve/Relinquish Port. XenaManager-2G -> Reserve Port. :param locations: list of ports locations in the form <ip/slot/port> to reserve :param force: True - take forcefully. False - fail if port is reserved by other user :param reset: True - reset port, False - leave port configuration :return: ports dictionary (index: object) """ for location in locations: ip, module, port = location.split('/') self.chassis_list[ip].reserve_ports(['{}/{}'.format(module, port)], force, reset) return self.ports
[ "def", "reserve_ports", "(", "self", ",", "locations", ",", "force", "=", "False", ",", "reset", "=", "True", ")", ":", "for", "location", "in", "locations", ":", "ip", ",", "module", ",", "port", "=", "location", ".", "split", "(", "'/'", ")", "self...
Reserve ports and reset factory defaults. XenaManager-2G -> Reserve/Relinquish Port. XenaManager-2G -> Reserve Port. :param locations: list of ports locations in the form <ip/slot/port> to reserve :param force: True - take forcefully. False - fail if port is reserved by other user :param reset: True - reset port, False - leave port configuration :return: ports dictionary (index: object)
[ "Reserve", "ports", "and", "reset", "factory", "defaults", "." ]
python
train
42.117647
autokey/autokey
lib/autokey/service.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/service.py#L244-L254
def calculate_extra_keys(self, buffer): """ Determine extra keys pressed since the given buffer was built """ extraBs = len(self.inputStack) - len(buffer) if extraBs > 0: extraKeys = ''.join(self.inputStack[len(buffer)]) else: extraBs = 0 extraKeys = '' return extraBs, extraKeys
[ "def", "calculate_extra_keys", "(", "self", ",", "buffer", ")", ":", "extraBs", "=", "len", "(", "self", ".", "inputStack", ")", "-", "len", "(", "buffer", ")", "if", "extraBs", ">", "0", ":", "extraKeys", "=", "''", ".", "join", "(", "self", ".", ...
Determine extra keys pressed since the given buffer was built
[ "Determine", "extra", "keys", "pressed", "since", "the", "given", "buffer", "was", "built" ]
python
train
32.818182
thombashi/pytablewriter
pytablewriter/_factory.py
https://github.com/thombashi/pytablewriter/blob/52ea85ed8e89097afa64f137c6a1b3acdfefdbda/pytablewriter/_factory.py#L24-L88
def create_from_file_extension(cls, file_extension): """ Create a table writer class instance from a file extension. Supported file extensions are as follows: ================== =================================== Extension Writer Class ================== =================================== ``".csv"`` :py:class:`~.CsvTableWriter` ``".htm"`` :py:class:`~.HtmlTableWriter` ``".html"`` :py:class:`~.HtmlTableWriter` ``".js"`` :py:class:`~.JavaScriptTableWriter` ``".json"`` :py:class:`~.JsonTableWriter` ``".jsonl"`` :py:class:`~.JsonLinesTableWriter` ``".ltsv"`` :py:class:`~.LtsvTableWriter` ``".ldjson"`` :py:class:`~.JsonLinesTableWriter` ``".md"`` :py:class:`~.MarkdownTableWriter` ``".ndjson"`` :py:class:`~.JsonLinesTableWriter` ``".py"`` :py:class:`~.PythonCodeTableWriter` ``".rst"`` :py:class:`~.RstGridTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".xls"`` :py:class:`~.ExcelXlsTableWriter` ``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter` ``".sqlite"`` :py:class:`~.SqliteTableWriter` ``".sqlite3"`` :py:class:`~.SqliteTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".toml"`` :py:class:`~.TomlTableWriter` ================== =================================== :param str file_extension: File extension string (case insensitive). :return: Writer instance that coincides with the ``file_extension``. :rtype: :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface` :raises pytablewriter.WriterNotFoundError: |WriterNotFoundError_desc| the file extension. """ ext = os.path.splitext(file_extension)[1] if typepy.is_null_string(ext): file_extension = file_extension else: file_extension = ext file_extension = file_extension.lstrip(".").lower() for table_format in TableFormat: if file_extension not in table_format.file_extensions: continue if table_format.format_attribute & FormatAttr.SECONDARY_EXT: continue return table_format.writer_class() raise WriterNotFoundError( "\n".join( [ "{:s} (unknown file extension).".format(file_extension), "", "acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())), ] ) )
[ "def", "create_from_file_extension", "(", "cls", ",", "file_extension", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_extension", ")", "[", "1", "]", "if", "typepy", ".", "is_null_string", "(", "ext", ")", ":", "file_extension", "=",...
Create a table writer class instance from a file extension. Supported file extensions are as follows: ================== =================================== Extension Writer Class ================== =================================== ``".csv"`` :py:class:`~.CsvTableWriter` ``".htm"`` :py:class:`~.HtmlTableWriter` ``".html"`` :py:class:`~.HtmlTableWriter` ``".js"`` :py:class:`~.JavaScriptTableWriter` ``".json"`` :py:class:`~.JsonTableWriter` ``".jsonl"`` :py:class:`~.JsonLinesTableWriter` ``".ltsv"`` :py:class:`~.LtsvTableWriter` ``".ldjson"`` :py:class:`~.JsonLinesTableWriter` ``".md"`` :py:class:`~.MarkdownTableWriter` ``".ndjson"`` :py:class:`~.JsonLinesTableWriter` ``".py"`` :py:class:`~.PythonCodeTableWriter` ``".rst"`` :py:class:`~.RstGridTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".xls"`` :py:class:`~.ExcelXlsTableWriter` ``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter` ``".sqlite"`` :py:class:`~.SqliteTableWriter` ``".sqlite3"`` :py:class:`~.SqliteTableWriter` ``".tsv"`` :py:class:`~.TsvTableWriter` ``".toml"`` :py:class:`~.TomlTableWriter` ================== =================================== :param str file_extension: File extension string (case insensitive). :return: Writer instance that coincides with the ``file_extension``. :rtype: :py:class:`~pytablewriter.writer._table_writer.TableWriterInterface` :raises pytablewriter.WriterNotFoundError: |WriterNotFoundError_desc| the file extension.
[ "Create", "a", "table", "writer", "class", "instance", "from", "a", "file", "extension", ".", "Supported", "file", "extensions", "are", "as", "follows", ":" ]
python
train
43.538462
mdeous/fatbotslim
fatbotslim/irc/bot.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/irc/bot.py#L124-L145
def parse(cls, prefix): """ Extracts informations from `prefix`. :param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``. :type prefix: unicode :return: extracted informations (nickname or host, mode, username, host). :rtype: tuple(str, str, str, str) """ try: nick, rest = prefix.split(u'!') except ValueError: return prefix, None, None, None try: mode, rest = rest.split(u'=') except ValueError: mode, rest = None, rest try: user, host = rest.split(u'@') except ValueError: return nick, mode, rest, None return nick, mode, user, host
[ "def", "parse", "(", "cls", ",", "prefix", ")", ":", "try", ":", "nick", ",", "rest", "=", "prefix", ".", "split", "(", "u'!'", ")", "except", "ValueError", ":", "return", "prefix", ",", "None", ",", "None", ",", "None", "try", ":", "mode", ",", ...
Extracts informations from `prefix`. :param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``. :type prefix: unicode :return: extracted informations (nickname or host, mode, username, host). :rtype: tuple(str, str, str, str)
[ "Extracts", "informations", "from", "prefix", "." ]
python
train
32.863636
xtrementl/focus
focus/task.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/task.py#L96-L120
def _clean_prior(self): """ Cleans up from a previous task that didn't exit cleanly. Returns ``True`` if previous task was cleaned. """ if self._loaded: try: pid_file = daemon.get_daemon_pidfile(self) # check if it exists so we don't raise if os.path.isfile(pid_file): # read pid from file pid = int(common.readfile(pid_file)) # check if pid file is stale if pid and not daemon.pid_exists(pid): common.safe_remove_file(pid_file) raise ValueError except (ValueError, TypeError): self._clean() return True return False
[ "def", "_clean_prior", "(", "self", ")", ":", "if", "self", ".", "_loaded", ":", "try", ":", "pid_file", "=", "daemon", ".", "get_daemon_pidfile", "(", "self", ")", "# check if it exists so we don't raise", "if", "os", ".", "path", ".", "isfile", "(", "pid_f...
Cleans up from a previous task that didn't exit cleanly. Returns ``True`` if previous task was cleaned.
[ "Cleans", "up", "from", "a", "previous", "task", "that", "didn", "t", "exit", "cleanly", "." ]
python
train
31.12
hvac/hvac
hvac/api/auth_methods/ldap.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/auth_methods/ldap.py#L239-L287
def create_or_update_user(self, username, policies=None, groups=None, mount_point=DEFAULT_MOUNT_POINT): """ Create or update LDAP users policies and group associations. Supported methods: POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body) :param username: The username of the LDAP user :type username: str | unicode :param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited string before being passed to Vault. :type policies: str | unicode :param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited string before being passed to Vault. :type groups: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the create_or_update_user request. :rtype: requests.Response """ if policies is None: policies = [] if groups is None: groups = [] list_required_params = { 'policies': policies, 'groups': groups, } for param_name, param_arg in list_required_params.items(): if not isinstance(param_arg, list): error_msg = '"{param_name}" argument must be an instance of list or None, "{param_type}" provided.'.format( param_name=param_name, param_type=type(param_arg), ) raise exceptions.ParamValidationError(error_msg) params = { 'policies': ','.join(policies), 'groups': ','.join(groups), } api_path = '/v1/auth/{mount_point}/users/{username}'.format( mount_point=mount_point, username=username, ) return self._adapter.post( url=api_path, json=params, )
[ "def", "create_or_update_user", "(", "self", ",", "username", ",", "policies", "=", "None", ",", "groups", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "if", "policies", "is", "None", ":", "policies", "=", "[", "]", "if", "groups"...
Create or update LDAP users policies and group associations. Supported methods: POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body) :param username: The username of the LDAP user :type username: str | unicode :param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited string before being passed to Vault. :type policies: str | unicode :param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited string before being passed to Vault. :type groups: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the create_or_update_user request. :rtype: requests.Response
[ "Create", "or", "update", "LDAP", "users", "policies", "and", "group", "associations", "." ]
python
train
40