repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Shizmob/pydle
pydle/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/client.py#L115-L123
async def disconnect(self, expected=True): """ Disconnect from server. """ if self.connected: # Unschedule ping checker. if self._ping_checker_handle: self._ping_checker_handle.cancel() # Schedule disconnect. await self._disconnect(expected)
[ "async", "def", "disconnect", "(", "self", ",", "expected", "=", "True", ")", ":", "if", "self", ".", "connected", ":", "# Unschedule ping checker.", "if", "self", ".", "_ping_checker_handle", ":", "self", ".", "_ping_checker_handle", ".", "cancel", "(", ")", ...
Disconnect from server.
[ "Disconnect", "from", "server", "." ]
python
train
titusjan/argos
argos/repo/repotreemodel.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/repotreemodel.py#L126-L142
def fetchMore(self, parentIndex): # TODO: Make LazyLoadRepoTreeModel? """ Fetches any available data for the items with the parent specified by the parent index. """ parentItem = self.getItem(parentIndex) if not parentItem: return if not parentItem.canFetchChildren(): return # TODO: implement InsertItems to optimize? for childItem in parentItem.fetchChildren(): self.insertItem(childItem, parentIndex=parentIndex) # Check that Rti implementation correctly sets canFetchChildren assert not parentItem.canFetchChildren(), \ "not all children fetched: {}".format(parentItem)
[ "def", "fetchMore", "(", "self", ",", "parentIndex", ")", ":", "# TODO: Make LazyLoadRepoTreeModel?", "parentItem", "=", "self", ".", "getItem", "(", "parentIndex", ")", "if", "not", "parentItem", ":", "return", "if", "not", "parentItem", ".", "canFetchChildren", ...
Fetches any available data for the items with the parent specified by the parent index.
[ "Fetches", "any", "available", "data", "for", "the", "items", "with", "the", "parent", "specified", "by", "the", "parent", "index", "." ]
python
train
casouri/launchdman
launchdman/__init__.py
https://github.com/casouri/launchdman/blob/c83840e640cb075fab2534049f1e25fac6933c64/launchdman/__init__.py#L1062-L1111
def genInterval(self, month=(), day=(), week=(), weekday=(), hour=(), minute=()): '''Generate list of config dictionarie(s) that represent a interval of time. Used to be passed into add() or remove(). For example:: genInterval(month=(1,4), week(1,4)) # generate list contains from first to third week in from January to March Args: month (tuple): (start, end) month in a year, from 1 to 12 week (tuple): (start, end) week in a month, from 1 to 4 day (tuple): (start, end) day in a month, from 1 to 31 weekday (tuple): (start, end) weekday in a week, from 0 to 7. 0 and 7 both represent Sunday hour (tuple): (start, end) hour in a day, from 0 to 24 minute (tuple): (start, end) minute in an hour, from 0 to 59 Returns: list: a list of dictionarie(s) with form [{'Day':12, 'Month':3}, {}, etc] ''' dic = { 'Month': month, 'Day': day, 'Week': week, 'Weekday': weekday, 'Day': day, 'Hour': hour, 'Minute': minute } dic = {k: v for k, v in dic.items() if v != ()} # e.g. dic: {'month': (1,5), 'day': (2,4)} grandList = [] for k in dic: # e.g. k: 'month', dic[k]: (1,5) l = [] # rangeTuple = (dic[k][0], dic[k][1] + 1) # e.g. (1,6) rangeTuple = dic[k] for num in range(rangeTuple[0], rangeTuple[1]): # e.g. 1, 2, 3, 4, 5 l.append({k: num}) # e.g. [{'month': 1}, {'month': 2}] grandList.append(l) # e.g. [[list of month], [list of day]] print(grandList) # grandList: [[list of month], [list of day]] # l: [[a,a1,a2,...], [b,b1,b2,...]] # combineDict return: [{a,b}, {a,b1}, {a,b2}, {a1,b}, {a1,b1}, {a1, b2}, {a2,b}, {a2,b1}, {a2,b2}] return crossCombine(grandList)
[ "def", "genInterval", "(", "self", ",", "month", "=", "(", ")", ",", "day", "=", "(", ")", ",", "week", "=", "(", ")", ",", "weekday", "=", "(", ")", ",", "hour", "=", "(", ")", ",", "minute", "=", "(", ")", ")", ":", "dic", "=", "{", "'M...
Generate list of config dictionarie(s) that represent a interval of time. Used to be passed into add() or remove(). For example:: genInterval(month=(1,4), week(1,4)) # generate list contains from first to third week in from January to March Args: month (tuple): (start, end) month in a year, from 1 to 12 week (tuple): (start, end) week in a month, from 1 to 4 day (tuple): (start, end) day in a month, from 1 to 31 weekday (tuple): (start, end) weekday in a week, from 0 to 7. 0 and 7 both represent Sunday hour (tuple): (start, end) hour in a day, from 0 to 24 minute (tuple): (start, end) minute in an hour, from 0 to 59 Returns: list: a list of dictionarie(s) with form [{'Day':12, 'Month':3}, {}, etc]
[ "Generate", "list", "of", "config", "dictionarie", "(", "s", ")", "that", "represent", "a", "interval", "of", "time", ".", "Used", "to", "be", "passed", "into", "add", "()", "or", "remove", "()", ".", "For", "example", "::" ]
python
train
cobrateam/flask-mongoalchemy
flask_mongoalchemy/__init__.py
https://github.com/cobrateam/flask-mongoalchemy/blob/66ab6f857cae69e35d37035880c1dfaf1dc9bd15/flask_mongoalchemy/__init__.py#L222-L228
def get(self, mongo_id): """Returns a :class:`Document` instance from its ``mongo_id`` or ``None`` if not found""" try: return self.filter(self.type.mongo_id == mongo_id).first() except exceptions.BadValueException: return None
[ "def", "get", "(", "self", ",", "mongo_id", ")", ":", "try", ":", "return", "self", ".", "filter", "(", "self", ".", "type", ".", "mongo_id", "==", "mongo_id", ")", ".", "first", "(", ")", "except", "exceptions", ".", "BadValueException", ":", "return"...
Returns a :class:`Document` instance from its ``mongo_id`` or ``None`` if not found
[ "Returns", "a", ":", "class", ":", "Document", "instance", "from", "its", "mongo_id", "or", "None", "if", "not", "found" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/show/show_firmware_dummy/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/show/show_firmware_dummy/__init__.py#L92-L113
def _set_show_firmware_option(self, v, load=False): """ Setter method for show_firmware_option, mapped from YANG variable /show/show_firmware_dummy/show_firmware_option (container) If this variable is read-only (config: false) in the source YANG file, then _set_show_firmware_option is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_firmware_option() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_firmware_option.show_firmware_option, is_container='container', presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'show firmware', u'alt-name': u'firmware', u'display-when': u'(/local-node/swbd-number = "4000")'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_firmware_option must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=show_firmware_option.show_firmware_option, is_container='container', presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'show firmware', u'alt-name': u'firmware', u'display-when': u'(/local-node/swbd-number = "4000")'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__show_firmware_option = t if hasattr(self, '_set'): self._set()
[ "def", "_set_show_firmware_option", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", "...
Setter method for show_firmware_option, mapped from YANG variable /show/show_firmware_dummy/show_firmware_option (container) If this variable is read-only (config: false) in the source YANG file, then _set_show_firmware_option is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_firmware_option() directly.
[ "Setter", "method", "for", "show_firmware_option", "mapped", "from", "YANG", "variable", "/", "show", "/", "show_firmware_dummy", "/", "show_firmware_option", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false"...
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/preprocessor.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/preprocessor.py#L12-L28
def remove_comments(code): """Remove C-style comment from GLSL code string.""" pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)" # first group captures quoted strings (double or single) # second group captures comments (//single-line or /* multi-line */) regex = re.compile(pattern, re.MULTILINE | re.DOTALL) def do_replace(match): # if the 2nd group (capturing comments) is not None, # it means we have captured a non-quoted (real) comment string. if match.group(2) is not None: return "" # so we will return empty to remove the comment else: # otherwise, we will return the 1st group return match.group(1) # captured quoted-string return regex.sub(do_replace, code)
[ "def", "remove_comments", "(", "code", ")", ":", "pattern", "=", "r\"(\\\".*?\\\"|\\'.*?\\')|(/\\*.*?\\*/|//[^\\r\\n]*\\n)\"", "# first group captures quoted strings (double or single)", "# second group captures comments (//single-line or /* multi-line */)", "regex", "=", "re", ".", "c...
Remove C-style comment from GLSL code string.
[ "Remove", "C", "-", "style", "comment", "from", "GLSL", "code", "string", "." ]
python
train
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L369-L383
def mapPartitionsWithSplit(self, f, preservesPartitioning=False): """ Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithSplit(f).sum() 6 """ warnings.warn("mapPartitionsWithSplit is deprecated; " "use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2) return self.mapPartitionsWithIndex(f, preservesPartitioning)
[ "def", "mapPartitionsWithSplit", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"mapPartitionsWithSplit is deprecated; \"", "\"use mapPartitionsWithIndex instead\"", ",", "DeprecationWarning", ",", "stacklevel", ...
Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithSplit(f).sum() 6
[ "Deprecated", ":", "use", "mapPartitionsWithIndex", "instead", "." ]
python
train
ToFuProject/tofu
tofu/data/_core.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1341-L1349
def clear_ddata(self): """ Clear the working copy of data Harmless, as it preserves the reference copy and the treatment dict Use only to free some memory """ self._ddata = dict.fromkeys(self._get_keys_ddata()) self._ddata['uptodate'] = False
[ "def", "clear_ddata", "(", "self", ")", ":", "self", ".", "_ddata", "=", "dict", ".", "fromkeys", "(", "self", ".", "_get_keys_ddata", "(", ")", ")", "self", ".", "_ddata", "[", "'uptodate'", "]", "=", "False" ]
Clear the working copy of data Harmless, as it preserves the reference copy and the treatment dict Use only to free some memory
[ "Clear", "the", "working", "copy", "of", "data" ]
python
train
ewels/MultiQC
multiqc/modules/deeptools/plotEnrichment.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/deeptools/plotEnrichment.py#L16-L51
def parse_plotEnrichment(self): """Find plotEnrichment output.""" self.deeptools_plotEnrichment = dict() for f in self.find_log_files('deeptools/plotEnrichment'): parsed_data = self.parsePlotEnrichment(f) for k, v in parsed_data.items(): if k in self.deeptools_plotEnrichment: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotEnrichment[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotEnrichment') if len(self.deeptools_plotEnrichment) > 0: dCounts = OrderedDict() dPercents = OrderedDict() for sample, v in self.deeptools_plotEnrichment.items(): dCounts[sample] = OrderedDict() dPercents[sample] = OrderedDict() for category, v2 in v.items(): dCounts[sample][category] = v2['count'] dPercents[sample][category] = v2['percent'] config = {'data_labels': [ {'name': 'Counts in features', 'ylab': 'Counts in feature'}, {'name': 'Percents in features', 'ylab': 'Percent of reads in feature'}], 'id': 'deeptools_enrichment_plot', 'title': 'deepTools: Signal enrichment per feature', 'ylab': 'Counts in feature', 'categories': True, 'ymin': 0.0} self.add_section(name="Feature enrichment", description="Signal enrichment per feature according to plotEnrichment", anchor="deeptools_enrichment", plot=linegraph.plot([dCounts, dPercents], pconfig=config)) return len(self.deeptools_plotEnrichment)
[ "def", "parse_plotEnrichment", "(", "self", ")", ":", "self", ".", "deeptools_plotEnrichment", "=", "dict", "(", ")", "for", "f", "in", "self", ".", "find_log_files", "(", "'deeptools/plotEnrichment'", ")", ":", "parsed_data", "=", "self", ".", "parsePlotEnrichm...
Find plotEnrichment output.
[ "Find", "plotEnrichment", "output", "." ]
python
train
sorgerlab/indra
indra/sources/trips/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L695-L715
def get_modifications(self): """Extract all types of Modification INDRA Statements.""" # Get all the specific mod types mod_event_types = list(ont_to_mod_type.keys()) # Add ONT::PTMs as a special case mod_event_types += ['ONT::PTM'] mod_events = [] for mod_event_type in mod_event_types: events = self.tree.findall("EVENT/[type='%s']" % mod_event_type) mod_extracted = self.extracted_events.get(mod_event_type, []) for event in events: event_id = event.attrib.get('id') if event_id not in mod_extracted: mod_events.append(event) # Iterate over all modification events for event in mod_events: stmts = self._get_modification_event(event) if stmts: for stmt in stmts: self.statements.append(stmt)
[ "def", "get_modifications", "(", "self", ")", ":", "# Get all the specific mod types", "mod_event_types", "=", "list", "(", "ont_to_mod_type", ".", "keys", "(", ")", ")", "# Add ONT::PTMs as a special case", "mod_event_types", "+=", "[", "'ONT::PTM'", "]", "mod_events",...
Extract all types of Modification INDRA Statements.
[ "Extract", "all", "types", "of", "Modification", "INDRA", "Statements", "." ]
python
train
IdentityPython/SATOSA
src/satosa/backends/saml2.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/backends/saml2.py#L143-L195
def authn_request(self, context, entity_id): """ Do an authorization request on idp with given entity id. This is the start of the authorization. :type context: satosa.context.Context :type entity_id: str :rtype: satosa.response.Response :param context: The current context :param entity_id: Target IDP entity id :return: response to the user agent """ # If IDP blacklisting is enabled and the selected IDP is blacklisted, # stop here if self.idp_blacklist_file: with open(self.idp_blacklist_file) as blacklist_file: blacklist_array = json.load(blacklist_file)['blacklist'] if entity_id in blacklist_array: satosa_logging(logger, logging.DEBUG, "IdP with EntityID {} is blacklisted".format(entity_id), context.state, exc_info=False) raise SATOSAAuthenticationError(context.state, "Selected IdP is blacklisted for this backend") kwargs = {} authn_context = self.construct_requested_authn_context(entity_id) if authn_context: kwargs['requested_authn_context'] = authn_context try: binding, destination = self.sp.pick_binding( "single_sign_on_service", None, "idpsso", entity_id=entity_id) satosa_logging(logger, logging.DEBUG, "binding: %s, destination: %s" % (binding, destination), context.state) acs_endp, response_binding = self.sp.config.getattr("endpoints", "sp")["assertion_consumer_service"][0] req_id, req = self.sp.create_authn_request( destination, binding=response_binding, **kwargs) relay_state = util.rndstr() ht_args = self.sp.apply_binding(binding, "%s" % req, destination, relay_state=relay_state) satosa_logging(logger, logging.DEBUG, "ht_args: %s" % ht_args, context.state) except Exception as exc: satosa_logging(logger, logging.DEBUG, "Failed to construct the AuthnRequest for state", context.state, exc_info=True) raise SATOSAAuthenticationError(context.state, "Failed to construct the AuthnRequest") from exc if self.sp.config.getattr('allow_unsolicited', 'sp') is False: if req_id in self.outstanding_queries: errmsg = "Request with duplicate id {}".format(req_id) satosa_logging(logger, logging.DEBUG, errmsg, context.state) raise SATOSAAuthenticationError(context.state, errmsg) self.outstanding_queries[req_id] = req context.state[self.name] = {"relay_state": relay_state} return make_saml_response(binding, ht_args)
[ "def", "authn_request", "(", "self", ",", "context", ",", "entity_id", ")", ":", "# If IDP blacklisting is enabled and the selected IDP is blacklisted,", "# stop here", "if", "self", ".", "idp_blacklist_file", ":", "with", "open", "(", "self", ".", "idp_blacklist_file", ...
Do an authorization request on idp with given entity id. This is the start of the authorization. :type context: satosa.context.Context :type entity_id: str :rtype: satosa.response.Response :param context: The current context :param entity_id: Target IDP entity id :return: response to the user agent
[ "Do", "an", "authorization", "request", "on", "idp", "with", "given", "entity", "id", ".", "This", "is", "the", "start", "of", "the", "authorization", "." ]
python
train
Autodesk/aomi
aomi/model/resource.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/resource.py#L53-L66
def tunable(self, obj): """A tunable resource maps against a backend...""" self.tune = dict() if 'tune' in obj: for tunable in MOUNT_TUNABLES: tunable_key = tunable[0] map_val(self.tune, obj['tune'], tunable_key) if tunable_key in self.tune and \ is_vault_time(self.tune[tunable_key]): vault_time_s = vault_time_to_s(self.tune[tunable_key]) self.tune[tunable_key] = vault_time_s if 'description'in obj: self.tune['description'] = obj['description']
[ "def", "tunable", "(", "self", ",", "obj", ")", ":", "self", ".", "tune", "=", "dict", "(", ")", "if", "'tune'", "in", "obj", ":", "for", "tunable", "in", "MOUNT_TUNABLES", ":", "tunable_key", "=", "tunable", "[", "0", "]", "map_val", "(", "self", ...
A tunable resource maps against a backend...
[ "A", "tunable", "resource", "maps", "against", "a", "backend", "..." ]
python
train
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L873-L892
def padded_grid_from_shape_psf_shape_and_pixel_scale(cls, shape, psf_shape, pixel_scale): """Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale. The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \ which are beyond the input shape but will blurred light into the 2D array's shape due to the psf. Parameters ---------- shape : (int, int) The (y,x) shape of the masked-grid's 2D image in units of pixels. psf_shape : (int, int) The shape of the psf which defines the blurring region and therefore size of padding. pixel_scale : float The scale of each pixel in arc seconds """ padded_shape = (shape[0] + psf_shape[0] - 1, shape[1] + psf_shape[1] - 1) padded_regular_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin( mask=np.full(padded_shape, False), pixel_scales=(pixel_scale, pixel_scale)) padded_mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=padded_shape, pixel_scale=pixel_scale) return PaddedRegularGrid(arr=padded_regular_grid, mask=padded_mask, image_shape=shape)
[ "def", "padded_grid_from_shape_psf_shape_and_pixel_scale", "(", "cls", ",", "shape", ",", "psf_shape", ",", "pixel_scale", ")", ":", "padded_shape", "=", "(", "shape", "[", "0", "]", "+", "psf_shape", "[", "0", "]", "-", "1", ",", "shape", "[", "1", "]", ...
Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale. The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \ which are beyond the input shape but will blurred light into the 2D array's shape due to the psf. Parameters ---------- shape : (int, int) The (y,x) shape of the masked-grid's 2D image in units of pixels. psf_shape : (int, int) The shape of the psf which defines the blurring region and therefore size of padding. pixel_scale : float The scale of each pixel in arc seconds
[ "Setup", "a", "regular", "padded", "grid", "from", "a", "2D", "array", "shape", "psf", "-", "shape", "and", "pixel", "-", "scale", "." ]
python
valid
calmjs/calmjs
src/calmjs/interrogate.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/interrogate.py#L184-L199
def yield_module_imports(root, checks=string_imports()): """ Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax. """ if not isinstance(root, asttypes.Node): raise TypeError('provided root must be a node') for child in yield_function(root, deep_filter): for f, condition in checks: if condition(child): for name in f(child): yield name continue
[ "def", "yield_module_imports", "(", "root", ",", "checks", "=", "string_imports", "(", ")", ")", ":", "if", "not", "isinstance", "(", "root", ",", "asttypes", ".", "Node", ")", ":", "raise", "TypeError", "(", "'provided root must be a node'", ")", "for", "ch...
Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax.
[ "Gather", "all", "require", "and", "define", "calls", "from", "unbundled", "JavaScript", "source", "files", "and", "yield", "all", "module", "names", ".", "The", "imports", "can", "either", "be", "of", "the", "CommonJS", "or", "AMD", "syntax", "." ]
python
train
dmwm/DBS
Server/Python/src/dbs/dao/MySQL/SequenceManager.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/MySQL/SequenceManager.py#L15-L38
def increment(self, conn, seqName, transaction = False, incCount=1): """ increments the sequence `seqName` by default `Incremented by one` and returns its value """ try: seqTable = "%sS" %seqName tlock = "lock tables %s write" %seqTable self.dbi.processData(tlock, [], conn, transaction) sql = "select ID from %s" % seqTable result = self.dbi.processData(sql, [], conn, transaction) resultlist = self.formatDict(result) newSeq = resultlist[0]['id']+incCount sql = "UPDATE %s SET ID=:seq_count" % seqTable seqparms={"seq_count" : newSeq} self.dbi.processData(sql, seqparms, conn, transaction) tunlock = "unlock tables" self.dbi.processData(tunlock, [], conn, transaction) return newSeq except: #FIXME tunlock = "unlock tables" self.dbi.processData(tunlock, [], conn, transaction) raise
[ "def", "increment", "(", "self", ",", "conn", ",", "seqName", ",", "transaction", "=", "False", ",", "incCount", "=", "1", ")", ":", "try", ":", "seqTable", "=", "\"%sS\"", "%", "seqName", "tlock", "=", "\"lock tables %s write\"", "%", "seqTable", "self", ...
increments the sequence `seqName` by default `Incremented by one` and returns its value
[ "increments", "the", "sequence", "seqName", "by", "default", "Incremented", "by", "one", "and", "returns", "its", "value" ]
python
train
Valuehorizon/valuehorizon-companies
companies/models.py
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L291-L324
def save(self, *args, **kwargs): """ This method autogenerates the auto_generated_description field """ # Cache basic data self.cache_data() # Ensure slug doesn't change if self.id is not None: db_company = Company.objects.get(id=self.id) if self.slug_name != db_company.slug_name: raise ValueError("Cannot reset slug_name") if str(self.trade_name).strip() == "": self.trade_name = None # Short description check if len(str(self.short_description)) > 370: raise AssertionError("Short description must be no more than 370 characters") if self.sub_industry is not None: # Cache GICS self.industry = self.sub_industry.industry self.industry_group = self.sub_industry.industry.industry_group self.sector = self.sub_industry.industry.industry_group.sector # Cache GICS names self.sub_industry_name = self.sub_industry.name self.industry_name = self.industry.name self.industry_group_name = self.industry_group.name self.sector_name = self.sector.name # Call save method super(Company, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Cache basic data", "self", ".", "cache_data", "(", ")", "# Ensure slug doesn't change", "if", "self", ".", "id", "is", "not", "None", ":", "db_company", "=", "Company", "...
This method autogenerates the auto_generated_description field
[ "This", "method", "autogenerates", "the", "auto_generated_description", "field" ]
python
train
shmir/PyIxNetwork
ixnetwork/ixn_port.py
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_port.py#L21-L50
def reserve(self, location=None, force=False, wait_for_up=True, timeout=80): """ Reserve port and optionally wait for port to come up. :param location: port location as 'ip/module/port'. If None, the location will be taken from the configuration. :param force: whether to revoke existing reservation (True) or not (False). :param wait_for_up: True - wait for port to come up, False - return immediately. :param timeout: how long (seconds) to wait for port to come up. """ if not location or is_local_host(location): return hostname, card, port = location.split('/') chassis = self.root.hw.get_chassis(hostname) # todo - test if port owned by me. if force: chassis.get_card(int(card)).get_port(int(port)).release() try: phy_port = chassis.get_card(int(card)).get_port(int(port)) except KeyError as _: raise TgnError('Physical port {} unreachable'.format(location)) self.set_attributes(commit=True, connectedTo=phy_port.ref) while self.get_attribute('connectedTo') == '::ixNet::OBJ-null': time.sleep(1) if wait_for_up: self.wait_for_up(timeout)
[ "def", "reserve", "(", "self", ",", "location", "=", "None", ",", "force", "=", "False", ",", "wait_for_up", "=", "True", ",", "timeout", "=", "80", ")", ":", "if", "not", "location", "or", "is_local_host", "(", "location", ")", ":", "return", "hostnam...
Reserve port and optionally wait for port to come up. :param location: port location as 'ip/module/port'. If None, the location will be taken from the configuration. :param force: whether to revoke existing reservation (True) or not (False). :param wait_for_up: True - wait for port to come up, False - return immediately. :param timeout: how long (seconds) to wait for port to come up.
[ "Reserve", "port", "and", "optionally", "wait", "for", "port", "to", "come", "up", "." ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L96-L127
async def set_webhook(self, url: base.String, certificate: typing.Union[base.InputFile, None] = None, max_connections: typing.Union[base.Integer, None] = None, allowed_updates: typing.Union[typing.List[base.String], None] = None) -> base.Boolean: """ Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. Source: https://core.telegram.org/bots/api#setwebhook :param url: HTTPS url to send updates to. Use an empty string to remove webhook integration :type url: :obj:`base.String` :param certificate: Upload your public key certificate so that the root certificate in use can be checked :type certificate: :obj:`typing.Union[base.InputFile, None]` :param max_connections: Maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. :type max_connections: :obj:`typing.Union[base.Integer, None]` :param allowed_updates: List the types of updates you want your bot to receive :type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]` :return: Returns true :rtype: :obj:`base.Boolean` """ allowed_updates = prepare_arg(allowed_updates) payload = generate_payload(**locals(), exclude=['certificate']) files = {} prepare_file(payload, files, 'certificate', certificate) result = await self.request(api.Methods.SET_WEBHOOK, payload, files) return result
[ "async", "def", "set_webhook", "(", "self", ",", "url", ":", "base", ".", "String", ",", "certificate", ":", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "None", "]", "=", "None", ",", "max_connections", ":", "typing", ".", "Union", "[",...
Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. Source: https://core.telegram.org/bots/api#setwebhook :param url: HTTPS url to send updates to. Use an empty string to remove webhook integration :type url: :obj:`base.String` :param certificate: Upload your public key certificate so that the root certificate in use can be checked :type certificate: :obj:`typing.Union[base.InputFile, None]` :param max_connections: Maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. :type max_connections: :obj:`typing.Union[base.Integer, None]` :param allowed_updates: List the types of updates you want your bot to receive :type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]` :return: Returns true :rtype: :obj:`base.Boolean`
[ "Use", "this", "method", "to", "specify", "a", "url", "and", "receive", "incoming", "updates", "via", "an", "outgoing", "webhook", ".", "Whenever", "there", "is", "an", "update", "for", "the", "bot", "we", "will", "send", "an", "HTTPS", "POST", "request", ...
python
train
lucasmaystre/choix
choix/lsr.py
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L74-L109
def ilsr_pairwise( n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8): """Compute the ML estimate of model parameters using I-LSR. This function computes the maximum-likelihood (ML) estimate of model parameters given pairwise-comparison data (see :ref:`data-pairwise`), using the iterative Luce Spectral Ranking algorithm [MG15]_. The transition rates of the LSR Markov chain are initialized with ``alpha``. When ``alpha > 0``, this corresponds to a form of regularization (see :ref:`regularization` for details). Parameters ---------- n_items : int Number of distinct items. data : list of lists Pairwise-comparison data. alpha : float, optional Regularization parameter. initial_params : array_like, optional Parameters used to initialize the iterative procedure. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters. """ fun = functools.partial( lsr_pairwise, n_items=n_items, data=data, alpha=alpha) return _ilsr(fun, initial_params, max_iter, tol)
[ "def", "ilsr_pairwise", "(", "n_items", ",", "data", ",", "alpha", "=", "0.0", ",", "initial_params", "=", "None", ",", "max_iter", "=", "100", ",", "tol", "=", "1e-8", ")", ":", "fun", "=", "functools", ".", "partial", "(", "lsr_pairwise", ",", "n_ite...
Compute the ML estimate of model parameters using I-LSR. This function computes the maximum-likelihood (ML) estimate of model parameters given pairwise-comparison data (see :ref:`data-pairwise`), using the iterative Luce Spectral Ranking algorithm [MG15]_. The transition rates of the LSR Markov chain are initialized with ``alpha``. When ``alpha > 0``, this corresponds to a form of regularization (see :ref:`regularization` for details). Parameters ---------- n_items : int Number of distinct items. data : list of lists Pairwise-comparison data. alpha : float, optional Regularization parameter. initial_params : array_like, optional Parameters used to initialize the iterative procedure. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters.
[ "Compute", "the", "ML", "estimate", "of", "model", "parameters", "using", "I", "-", "LSR", "." ]
python
train
saltstack/salt
salt/modules/inspectlib/fsdb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/fsdb.py#L142-L154
def open(self, dbname=None): ''' Open database from the path with the name or latest. If there are no yet databases, create a new implicitly. :return: ''' databases = self.list() if self.is_closed(): self.db_path = os.path.join(self.path, dbname or (databases and databases[0] or self.new())) if not self._opened: self.list_tables() self._opened = True
[ "def", "open", "(", "self", ",", "dbname", "=", "None", ")", ":", "databases", "=", "self", ".", "list", "(", ")", "if", "self", ".", "is_closed", "(", ")", ":", "self", ".", "db_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "pa...
Open database from the path with the name or latest. If there are no yet databases, create a new implicitly. :return:
[ "Open", "database", "from", "the", "path", "with", "the", "name", "or", "latest", ".", "If", "there", "are", "no", "yet", "databases", "create", "a", "new", "implicitly", "." ]
python
train
biocore/burrito-fillings
bfillings/sortmerna_v2.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L421-L544
def sortmerna_map(seq_path, output_dir, refseqs_fp, sortmerna_db, e_value=1, threads=1, best=None, num_alignments=None, HALT_EXEC=False, output_sam=False, sam_SQ_tags=False, blast_format=3, print_all_reads=True, ): """Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths() """ if not (blast_format or output_sam): raise ValueError("Either Blast or SAM output alignment " "format must be chosen.") if (best and num_alignments): raise ValueError("Only one of --best or --num_alignments " "options must be chosen.") # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set the input reference sequence + indexed database path smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) # Set input query sequences path smr.Parameters['--reads'].on(seq_path) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if blast_format: smr.Parameters['--blast'].on(blast_format) # Output alignments in SAM format if output_sam: smr.Parameters['--sam'].on() if sam_SQ_tags: smr.Parameters['--SQ'].on() # Turn on NULL string alignment output if print_all_reads: smr.Parameters['--print_all_reads'].on() # Set output results path (for Blast alignments and log file) output_file = join(output_dir, "sortmerna_map") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if e_value is not None: smr.Parameters['-e'].on(e_value) # Set number of best alignments to output per read if best is not None: smr.Parameters['--best'].on(best) # Set number of first alignments passing E-value threshold # to output per read if num_alignments is not None: smr.Parameters['--num_alignments'].on(num_alignments) # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Turn off parameters related to OTU-picking smr.Parameters['--fastx'].off() smr.Parameters['--otu_map'].off() smr.Parameters['--de_novo_otu'].off() smr.Parameters['--id'].off() smr.Parameters['--coverage'].off() # Run sortmerna app_result = smr() return app_result
[ "def", "sortmerna_map", "(", "seq_path", ",", "output_dir", ",", "refseqs_fp", ",", "sortmerna_db", ",", "e_value", "=", "1", ",", "threads", "=", "1", ",", "best", "=", "None", ",", "num_alignments", "=", "None", ",", "HALT_EXEC", "=", "False", ",", "ou...
Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths()
[ "Launch", "sortmerna", "mapper" ]
python
train
marcotcr/lime
lime/lime_tabular.py
https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_tabular.py#L45-L57
def map_exp_ids(self, exp): """Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight) """ names = self.exp_feature_names if self.discretized_feature_names is not None: names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
[ "def", "map_exp_ids", "(", "self", ",", "exp", ")", ":", "names", "=", "self", ".", "exp_feature_names", "if", "self", ".", "discretized_feature_names", "is", "not", "None", ":", "names", "=", "self", ".", "discretized_feature_names", "return", "[", "(", "na...
Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight)
[ "Maps", "ids", "to", "feature", "names", "." ]
python
train
ranaroussi/pywallet
pywallet/utils/ethereum.py
https://github.com/ranaroussi/pywallet/blob/206ff224389c490d8798f660c9e79fe97ebb64cf/pywallet/utils/ethereum.py#L969-L981
def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der
[ "def", "to_der", "(", "self", ")", ":", "# Output should be:", "# 0x30 <length> 0x02 <length r> r 0x02 <length s> s", "r", ",", "s", "=", "self", ".", "_canonicalize", "(", ")", "total_length", "=", "6", "+", "len", "(", "r", ")", "+", "len", "(", "s", ")", ...
Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s).
[ "Encodes", "this", "signature", "using", "DER" ]
python
train
nrcharles/caelum
caelum/tmy3.py
https://github.com/nrcharles/caelum/blob/9a8e65806385978556d7bb2e6870f003ff82023e/caelum/tmy3.py#L128-L134
def next(self): """iterate.""" record = self.tmy_data.next() _sd = record['Date (MM/DD/YYYY)'] + ' ' + record['Time (HH:MM)'] record['utc_datetime'] = strptime(_sd, self.tz) record['datetime'] = strptime(_sd) return record
[ "def", "next", "(", "self", ")", ":", "record", "=", "self", ".", "tmy_data", ".", "next", "(", ")", "_sd", "=", "record", "[", "'Date (MM/DD/YYYY)'", "]", "+", "' '", "+", "record", "[", "'Time (HH:MM)'", "]", "record", "[", "'utc_datetime'", "]", "="...
iterate.
[ "iterate", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L457-L545
def get_workflow_info(brain_or_object, endpoint=None): """Generate workflow information of the assigned workflows :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: Workflows info :rtype: dict """ # ensure we have a full content object obj = get_object(brain_or_object) # get the portal workflow tool wf_tool = get_tool("portal_workflow") # the assigned workflows of this object workflows = wf_tool.getWorkflowsFor(obj) # no worfkflows assigned -> return if not workflows: return [] def to_transition_info(transition): """ return the transition information """ return { "title": transition["title"], "value": transition["id"], "display": transition["description"], "url": transition["url"], } def to_review_history_info(review_history): """ return the transition information """ converted = DT2dt(review_history.get('time')).\ strftime("%Y-%m-%d %H:%M:%S") review_history['time'] = converted return review_history out = [] for workflow in workflows: # get the status info of the current state (dictionary) info = wf_tool.getStatusOf(workflow.getId(), obj) if info is None: continue # get the current review_status review_state = info.get("review_state", None) inactive_state = info.get("inactive_state", None) cancellation_state = info.get("cancellation_state", None) worksheetanalysis_review_state = info.get("worksheetanalysis_review_state", None) state = review_state or \ inactive_state or \ cancellation_state or \ worksheetanalysis_review_state if state is None: logger.warn("No state variable found for {} -> {}".format( repr(obj), info)) continue # get the wf status object status_info = workflow.states[state] # get the title of the current status status = status_info.title # get the transition informations transitions = map(to_transition_info, wf_tool.getTransitionsFor(obj)) # get the review history rh = map(to_review_history_info, workflow.getInfoFor(obj, 'review_history', '')) out.append({ "workflow": workflow.getId(), "status": status, "review_state": state, "transitions": transitions, "review_history": rh, }) return {"workflow_info": out}
[ "def", "get_workflow_info", "(", "brain_or_object", ",", "endpoint", "=", "None", ")", ":", "# ensure we have a full content object", "obj", "=", "get_object", "(", "brain_or_object", ")", "# get the portal workflow tool", "wf_tool", "=", "get_tool", "(", "\"portal_workfl...
Generate workflow information of the assigned workflows :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: Workflows info :rtype: dict
[ "Generate", "workflow", "information", "of", "the", "assigned", "workflows" ]
python
train
saltstack/salt
salt/states/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L737-L1052
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group, destination_address_prefix=None, destination_port_range=None, source_address_prefix=None, source_port_range=None, description=None, destination_address_prefixes=None, destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None, connection_auth=None, **kwargs): ''' .. versionadded:: 2019.2.0 Ensure a security rule exists. :param name: Name of the security rule. :param access: 'allow' or 'deny' :param direction: 'inbound' or 'outbound' :param priority: Integer between 100 and 4096 used for ordering rule application. :param protocol: 'tcp', 'udp', or '*' :param security_group: The name of the existing network security group to contain the security rule. :param resource_group: The resource group assigned to the network security group. :param description: Optional description of the security rule. :param destination_address_prefix: The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param destination_address_prefixes: A list of destination_address_prefix values. This parameter overrides destination_address_prefix and will cause any value entered there to be ignored. :param destination_port_ranges: A list of destination_port_range values. This parameter overrides destination_port_range and will cause any value entered there to be ignored. :param source_address_prefixes: A list of source_address_prefix values. This parameter overrides source_address_prefix and will cause any value entered there to be ignored. :param source_port_ranges: A list of source_port_range values. This parameter overrides source_port_range and will cause any value entered there to be ignored. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure security rule exists: azurearm_network.security_rule_present: - name: nsg1_rule2 - security_group: nsg1 - resource_group: group1 - priority: 101 - protocol: tcp - access: allow - direction: inbound - source_address_prefix: internet - destination_address_prefix: virtualnetwork - source_port_range: '*' - destination_port_ranges: - '80' - '443' - connection_auth: {{ profile }} - require: - azurearm_network: Ensure network security group exists ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret exclusive_params = [ ('source_port_ranges', 'source_port_range'), ('source_address_prefixes', 'source_address_prefix'), ('destination_port_ranges', 'destination_port_range'), ('destination_address_prefixes', 'destination_address_prefix'), ] for params in exclusive_params: # pylint: disable=eval-used if not eval(params[0]) and not eval(params[1]): ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1]) return ret # pylint: disable=eval-used if eval(params[0]): # pylint: disable=eval-used if not isinstance(eval(params[0]), list): ret['comment'] = 'The {0} parameter must be a list!'.format(params[0]) return ret # pylint: disable=exec-used exec('{0} = None'.format(params[1])) rule = __salt__['azurearm_network.security_rule_get']( name, security_group, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' not in rule: # access changes if access.capitalize() != rule.get('access'): ret['changes']['access'] = { 'old': rule.get('access'), 'new': access } # description changes if description != rule.get('description'): ret['changes']['description'] = { 'old': rule.get('description'), 'new': description } # direction changes if direction.capitalize() != rule.get('direction'): ret['changes']['direction'] = { 'old': rule.get('direction'), 'new': direction } # priority changes if int(priority) != rule.get('priority'): ret['changes']['priority'] = { 'old': rule.get('priority'), 'new': priority } # protocol changes if protocol.lower() != rule.get('protocol', '').lower(): ret['changes']['protocol'] = { 'old': rule.get('protocol'), 'new': protocol } # destination_port_range changes if destination_port_range != rule.get('destination_port_range'): ret['changes']['destination_port_range'] = { 'old': rule.get('destination_port_range'), 'new': destination_port_range } # source_port_range changes if source_port_range != rule.get('source_port_range'): ret['changes']['source_port_range'] = { 'old': rule.get('source_port_range'), 'new': source_port_range } # destination_port_ranges changes if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])): ret['changes']['destination_port_ranges'] = { 'old': rule.get('destination_port_ranges'), 'new': destination_port_ranges } # source_port_ranges changes if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])): ret['changes']['source_port_ranges'] = { 'old': rule.get('source_port_ranges'), 'new': source_port_ranges } # destination_address_prefix changes if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower(): ret['changes']['destination_address_prefix'] = { 'old': rule.get('destination_address_prefix'), 'new': destination_address_prefix } # source_address_prefix changes if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower(): ret['changes']['source_address_prefix'] = { 'old': rule.get('source_address_prefix'), 'new': source_address_prefix } # destination_address_prefixes changes if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])): if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])): ret['changes']['destination_address_prefixes'] = { 'old': rule.get('destination_address_prefixes'), 'new': destination_address_prefixes } else: local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes), sorted(rule.get('destination_address_prefixes'))) for idx in six_range(0, len(local_dst_addrs)): if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower(): ret['changes']['destination_address_prefixes'] = { 'old': rule.get('destination_address_prefixes'), 'new': destination_address_prefixes } break # source_address_prefixes changes if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])): if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])): ret['changes']['source_address_prefixes'] = { 'old': rule.get('source_address_prefixes'), 'new': source_address_prefixes } else: local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes), sorted(rule.get('source_address_prefixes'))) for idx in six_range(0, len(local_src_addrs)): if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower(): ret['changes']['source_address_prefixes'] = { 'old': rule.get('source_address_prefixes'), 'new': source_address_prefixes } break if not ret['changes']: ret['result'] = True ret['comment'] = 'Security rule {0} is already present.'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Security rule {0} would be updated.'.format(name) return ret else: ret['changes'] = { 'old': {}, 'new': { 'name': name, 'access': access, 'description': description, 'direction': direction, 'priority': priority, 'protocol': protocol, 'destination_address_prefix': destination_address_prefix, 'destination_address_prefixes': destination_address_prefixes, 'destination_port_range': destination_port_range, 'destination_port_ranges': destination_port_ranges, 'source_address_prefix': source_address_prefix, 'source_address_prefixes': source_address_prefixes, 'source_port_range': source_port_range, 'source_port_ranges': source_port_ranges, } } if __opts__['test']: ret['comment'] = 'Security rule {0} would be created.'.format(name) ret['result'] = None return ret rule_kwargs = kwargs.copy() rule_kwargs.update(connection_auth) rule = __salt__['azurearm_network.security_rule_create_or_update']( name=name, access=access, description=description, direction=direction, priority=priority, protocol=protocol, security_group=security_group, resource_group=resource_group, destination_address_prefix=destination_address_prefix, destination_address_prefixes=destination_address_prefixes, destination_port_range=destination_port_range, destination_port_ranges=destination_port_ranges, source_address_prefix=source_address_prefix, source_address_prefixes=source_address_prefixes, source_port_range=source_port_range, source_port_ranges=source_port_ranges, **rule_kwargs ) if 'error' not in rule: ret['result'] = True ret['comment'] = 'Security rule {0} has been created.'.format(name) return ret ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error')) return ret
[ "def", "security_rule_present", "(", "name", ",", "access", ",", "direction", ",", "priority", ",", "protocol", ",", "security_group", ",", "resource_group", ",", "destination_address_prefix", "=", "None", ",", "destination_port_range", "=", "None", ",", "source_add...
.. versionadded:: 2019.2.0 Ensure a security rule exists. :param name: Name of the security rule. :param access: 'allow' or 'deny' :param direction: 'inbound' or 'outbound' :param priority: Integer between 100 and 4096 used for ordering rule application. :param protocol: 'tcp', 'udp', or '*' :param security_group: The name of the existing network security group to contain the security rule. :param resource_group: The resource group assigned to the network security group. :param description: Optional description of the security rule. :param destination_address_prefix: The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param destination_address_prefixes: A list of destination_address_prefix values. This parameter overrides destination_address_prefix and will cause any value entered there to be ignored. :param destination_port_ranges: A list of destination_port_range values. This parameter overrides destination_port_range and will cause any value entered there to be ignored. :param source_address_prefixes: A list of source_address_prefix values. This parameter overrides source_address_prefix and will cause any value entered there to be ignored. :param source_port_ranges: A list of source_port_range values. This parameter overrides source_port_range and will cause any value entered there to be ignored. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure security rule exists: azurearm_network.security_rule_present: - name: nsg1_rule2 - security_group: nsg1 - resource_group: group1 - priority: 101 - protocol: tcp - access: allow - direction: inbound - source_address_prefix: internet - destination_address_prefix: virtualnetwork - source_port_range: '*' - destination_port_ranges: - '80' - '443' - connection_auth: {{ profile }} - require: - azurearm_network: Ensure network security group exists
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
billy-yoyo/RainbowSixSiege-Python-API
r6sapi/r6sapi.py
https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L518-L539
def get_player(self, name=None, platform=None, uid=None): """|coro| Calls get_players and returns the first element, exactly one of uid and name must be given, platform must be given Parameters ---------- name : str the name of the player you're searching for platform : str the name of the platform you're searching on (See :class:`Platforms`) uid : str the uid of the player you're searching for Returns ------- :class:`Player` player found""" results = yield from self.get_players(name=name, platform=platform, uid=uid) return results[0]
[ "def", "get_player", "(", "self", ",", "name", "=", "None", ",", "platform", "=", "None", ",", "uid", "=", "None", ")", ":", "results", "=", "yield", "from", "self", ".", "get_players", "(", "name", "=", "name", ",", "platform", "=", "platform", ",",...
|coro| Calls get_players and returns the first element, exactly one of uid and name must be given, platform must be given Parameters ---------- name : str the name of the player you're searching for platform : str the name of the platform you're searching on (See :class:`Platforms`) uid : str the uid of the player you're searching for Returns ------- :class:`Player` player found
[ "|coro|" ]
python
train
gaqzi/gocd-cli
gocd_cli/utils.py
https://github.com/gaqzi/gocd-cli/blob/ca8df8ec2274fdc69bce0619aa3794463c4f5a6f/gocd_cli/utils.py#L126-L151
def get_settings(section='gocd', settings_paths=('~/.gocd/gocd-cli.cfg', '/etc/go/gocd-cli.cfg')): """Returns a `gocd_cli.settings.Settings` configured for settings file The settings will be read from environment variables first, then it'll be read from the first config file found (if any). Environment variables are expected to be in UPPERCASE and to be prefixed with `GOCD_`. Args: section: The prefix to use for reading environment variables and the name of the section in the config file. Default: gocd settings_path: Possible paths for the configuration file. Default: `('~/.gocd/gocd-cli.cfg', '/etc/go/gocd-cli.cfg')` Returns: `gocd_cli.settings.Settings` instance """ if isinstance(settings_paths, basestring): settings_paths = (settings_paths,) config_file = next((path for path in settings_paths if is_file_readable(path)), None) if config_file: config_file = expand_user(config_file) return Settings(prefix=section, section=section, filename=config_file)
[ "def", "get_settings", "(", "section", "=", "'gocd'", ",", "settings_paths", "=", "(", "'~/.gocd/gocd-cli.cfg'", ",", "'/etc/go/gocd-cli.cfg'", ")", ")", ":", "if", "isinstance", "(", "settings_paths", ",", "basestring", ")", ":", "settings_paths", "=", "(", "se...
Returns a `gocd_cli.settings.Settings` configured for settings file The settings will be read from environment variables first, then it'll be read from the first config file found (if any). Environment variables are expected to be in UPPERCASE and to be prefixed with `GOCD_`. Args: section: The prefix to use for reading environment variables and the name of the section in the config file. Default: gocd settings_path: Possible paths for the configuration file. Default: `('~/.gocd/gocd-cli.cfg', '/etc/go/gocd-cli.cfg')` Returns: `gocd_cli.settings.Settings` instance
[ "Returns", "a", "gocd_cli", ".", "settings", ".", "Settings", "configured", "for", "settings", "file" ]
python
train
gccxml/pygccxml
pygccxml/declarations/type_traits.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/type_traits.py#L288-L294
def array_size(type_): """returns array size""" nake_type = remove_alias(type_) nake_type = remove_reference(nake_type) nake_type = remove_cv(nake_type) assert isinstance(nake_type, cpptypes.array_t) return nake_type.size
[ "def", "array_size", "(", "type_", ")", ":", "nake_type", "=", "remove_alias", "(", "type_", ")", "nake_type", "=", "remove_reference", "(", "nake_type", ")", "nake_type", "=", "remove_cv", "(", "nake_type", ")", "assert", "isinstance", "(", "nake_type", ",", ...
returns array size
[ "returns", "array", "size" ]
python
train
astorfi/speechpy
speechpy/processing.py
https://github.com/astorfi/speechpy/blob/9e99ae81398e7584e6234db371d6d7b5e8736192/speechpy/processing.py#L239-L271
def cmvn(vec, variance_normalization=False): """ This function is aimed to perform global cepstral mean and variance normalization (CMVN) on input feature vector "vec". The code assumes that there is one observation per row. Args: vec (array): input feature matrix (size:(num_observation,num_features)) variance_normalization (bool): If the variance normilization should be performed or not. Return: array: The mean(or mean+variance) normalized feature vector. """ eps = 2**-30 rows, cols = vec.shape # Mean calculation norm = np.mean(vec, axis=0) norm_vec = np.tile(norm, (rows, 1)) # Mean subtraction mean_subtracted = vec - norm_vec # Variance normalization if variance_normalization: stdev = np.std(mean_subtracted, axis=0) stdev_vec = np.tile(stdev, (rows, 1)) output = mean_subtracted / (stdev_vec + eps) else: output = mean_subtracted return output
[ "def", "cmvn", "(", "vec", ",", "variance_normalization", "=", "False", ")", ":", "eps", "=", "2", "**", "-", "30", "rows", ",", "cols", "=", "vec", ".", "shape", "# Mean calculation", "norm", "=", "np", ".", "mean", "(", "vec", ",", "axis", "=", "...
This function is aimed to perform global cepstral mean and variance normalization (CMVN) on input feature vector "vec". The code assumes that there is one observation per row. Args: vec (array): input feature matrix (size:(num_observation,num_features)) variance_normalization (bool): If the variance normilization should be performed or not. Return: array: The mean(or mean+variance) normalized feature vector.
[ "This", "function", "is", "aimed", "to", "perform", "global", "cepstral", "mean", "and", "variance", "normalization", "(", "CMVN", ")", "on", "input", "feature", "vector", "vec", ".", "The", "code", "assumes", "that", "there", "is", "one", "observation", "pe...
python
train
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L719-L727
def resnet_cifar_15(): """Set of hyperparameters.""" hp = resnet_base() hp.block_fn = "residual" hp.is_cifar = True hp.layer_sizes = [2, 2, 2] hp.filter_sizes = [16, 32, 64, 128] return hp
[ "def", "resnet_cifar_15", "(", ")", ":", "hp", "=", "resnet_base", "(", ")", "hp", ".", "block_fn", "=", "\"residual\"", "hp", ".", "is_cifar", "=", "True", "hp", ".", "layer_sizes", "=", "[", "2", ",", "2", ",", "2", "]", "hp", ".", "filter_sizes", ...
Set of hyperparameters.
[ "Set", "of", "hyperparameters", "." ]
python
train
fitnr/convertdate
convertdate/islamic.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/islamic.py#L28-L30
def to_jd(year, month, day): '''Determine Julian day count from Islamic date''' return (day + ceil(29.5 * (month - 1)) + (year - 1) * 354 + trunc((3 + (11 * year)) / 30) + EPOCH) - 1
[ "def", "to_jd", "(", "year", ",", "month", ",", "day", ")", ":", "return", "(", "day", "+", "ceil", "(", "29.5", "*", "(", "month", "-", "1", ")", ")", "+", "(", "year", "-", "1", ")", "*", "354", "+", "trunc", "(", "(", "3", "+", "(", "1...
Determine Julian day count from Islamic date
[ "Determine", "Julian", "day", "count", "from", "Islamic", "date" ]
python
train
thumbor/thumbor
thumbor/filters/frame.py
https://github.com/thumbor/thumbor/blob/558ccdd6e3bc29e1c9ee3687372c4b3eb05ac607/thumbor/filters/frame.py#L50-L76
def handle_padding(self, padding): '''Pads the image with transparent pixels if necessary.''' left = padding[0] top = padding[1] right = padding[2] bottom = padding[3] offset_x = 0 offset_y = 0 new_width = self.engine.size[0] new_height = self.engine.size[1] if left > 0: offset_x = left new_width += left if top > 0: offset_y = top new_height += top if right > 0: new_width += right if bottom > 0: new_height += bottom new_engine = self.context.modules.engine.__class__(self.context) new_engine.image = new_engine.gen_image((new_width, new_height), '#fff') new_engine.enable_alpha() new_engine.paste(self.engine, (offset_x, offset_y)) self.engine.image = new_engine.image
[ "def", "handle_padding", "(", "self", ",", "padding", ")", ":", "left", "=", "padding", "[", "0", "]", "top", "=", "padding", "[", "1", "]", "right", "=", "padding", "[", "2", "]", "bottom", "=", "padding", "[", "3", "]", "offset_x", "=", "0", "o...
Pads the image with transparent pixels if necessary.
[ "Pads", "the", "image", "with", "transparent", "pixels", "if", "necessary", "." ]
python
train
PmagPy/PmagPy
programs/conversion_scripts2/sio_magic2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/sio_magic2.py#L8-L693
def main(command_line=True, **kwargs): """ NAME sio_magic.py DESCRIPTION converts SIO .mag format files to magic_measurements format files SYNTAX sio_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: identify user, default is "" -f FILE: specify .mag format input file, required -fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention) -F FILE: specify output file, default is magic_measurements.txt -Fsy: specify er_synthetics file, default is er_sythetics.txt -LP [colon delimited list of protocols, include all that apply] AF: af demag T: thermal including thellier but not trm acquisition S: Shaw method I: IRM (acquisition) I3d: 3D IRM experiment N: NRM only TRM: trm acquisition ANI: anisotropy experiment D: double AF demag G: triple AF demag (GRM protocol) CR: cooling rate experiment. The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional) where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps. XXX.00 is optional zerofield baseline. XXX.70 is alteration check. syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70 if you use a zerofield step then no need to specify the cooling rate for the zerofield It is important to add to the command line the -A option so the measurements will not be averaged. But users need to make sure that there are no duplicate measurements in the file -V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3 -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic -syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE -ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is "" -dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment -ac B : peak AF field (in mT) for ARM acquisition, default is none -ncn NCON: specify naming convention: default is #1 below -A: don't average replicate measurements Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. [8] synthetic - has no site name [9] ODP naming convention INPUT Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in seperate .mag files (eg. af.mag, thermal.mag, etc.) Format of SIO .mag files: Spec Treat CSD Intensity Declination Inclination [optional metadata string] Spec: specimen name Treat: treatment step XXX T in Centigrade XXX AF in mT for special experiments: Thellier: XXX.0 first zero field step XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order] XXX.2 second in-field step at lower temperature (pTRM check) XXX.3 second zero-field step after infield (pTRM check step) XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3] AARM: X.00 baseline step (AF in zero bias field - high peak field) X.1 ARM step (in field step) where X is the step number in the 15 position scheme (see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2) ATRM: X.00 optional baseline X.1 ATRM step (+X) X.2 ATRM step (+Y) X.3 ATRM step (+Z) X.4 ATRM step (-X) X.5 ATRM step (-Y) X.6 ATRM step (-Z) X.7 optional alteration check (+X) TRM: XXX.YYY XXX is temperature step of total TRM YYY is dc field in microtesla Intensity assumed to be total moment in 10^3 Am^2 (emu) Declination: Declination in specimen coordinate system Inclination: Declination in specimen coordinate system Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS hh in 24 hours. dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively xx.xxx DC field UNITS of DC field (microT, mT) INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions) NMEAS: number of measurements in a single position (1,3,200...) """ # initialize some stuff mag_file = None codelist = None infile_type="mag" noave=0 methcode,inst="LP-NO","" phi,theta,peakfield,labfield=0,0,0,0 pTRM,MD,samp_con,Z=0,0,'1',1 dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0] inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45] tdec=[0,90,0,180,270,0,0,90,0] tinc=[0,0,90,0,0,-90,0,0,90] missing=1 demag="N" er_location_name="" citation='This study' args=sys.argv fmt='old' syn=0 synfile='er_synthetics.txt' samp_infile,Samps='',[] trm=0 irm=0 specnum=0 coil="" mag_file="" # # get command line arguments # meas_file="magic_measurements.txt" user="" if not command_line: user = kwargs.get('user', '') meas_file = kwargs.get('meas_file', '') syn_file = kwargs.get('syn_file', '') mag_file = kwargs.get('mag_file', '') labfield = kwargs.get('labfield', '') if labfield: labfield = float(labfield) *1e-6 else: labfield = 0 phi = kwargs.get('phi', 0) if phi: phi = float(phi) else: phi = 0 theta = kwargs.get('theta', 0) if theta: theta=float(theta) else: theta = 0 peakfield = kwargs.get('peakfield', 0) if peakfield: peakfield=float(peakfield) *1e-3 else: peakfield = 0 specnum = kwargs.get('specnum', 0) samp_con = kwargs.get('samp_con', '1') er_location_name = kwargs.get('er_location_name', '') samp_infile = kwargs.get('samp_infile', '') syn = kwargs.get('syn', 0) institution = kwargs.get('institution', '') syntype = kwargs.get('syntype', '') inst = kwargs.get('inst', '') noave = kwargs.get('noave', 0) codelist = kwargs.get('codelist', '') coil = kwargs.get('coil', '') cooling_rates = kwargs.get('cooling_rates', '') if command_line: if "-h" in args: print(main.__doc__) return False if "-usr" in args: ind=args.index("-usr") user=args[ind+1] if '-F' in args: ind=args.index("-F") meas_file=args[ind+1] if '-Fsy' in args: ind=args.index("-Fsy") synfile=args[ind+1] if '-f' in args: ind=args.index("-f") mag_file=args[ind+1] if "-dc" in args: ind=args.index("-dc") labfield=float(args[ind+1])*1e-6 phi=float(args[ind+2]) theta=float(args[ind+3]) if "-ac" in args: ind=args.index("-ac") peakfield=float(args[ind+1])*1e-3 if "-spc" in args: ind=args.index("-spc") specnum=int(args[ind+1]) if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-fsa" in args: ind=args.index("-fsa") samp_infile = args[ind+1] if '-syn' in args: syn=1 ind=args.index("-syn") institution=args[ind+1] syntype=args[ind+2] if '-fsy' in args: ind=args.index("-fsy") synfile=args[ind+1] if "-ins" in args: ind=args.index("-ins") inst=args[ind+1] if "-A" in args: noave=1 if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if '-LP' in args: ind=args.index("-LP") codelist=args[ind+1] if "-V" in args: ind=args.index("-V") coil=args[ind+1] # make sure all initial values are correctly set up (whether they come from the command line or a GUI) if samp_infile: Samps, file_type = pmag.magic_read(samp_infile) if coil: coil = str(coil) methcode="LP-IRM" irmunits = "V" if coil not in ["1","2","3"]: print(main.__doc__) print('not a valid coil specification') return False, '{} is not a valid coil specification'.format(coil) if mag_file: try: #with open(mag_file,'r') as finput: # lines = finput.readlines() lines=pmag.open_file(mag_file) except: print("bad mag file name") return False, "bad mag file name" if not mag_file: print(main.__doc__) print("mag_file field is required option") return False, "mag_file field is required option" if specnum!=0: specnum=-specnum #print 'samp_con:', samp_con if samp_con: if "4" == samp_con[0]: if "-" not in samp_con: print("naming convention option [4] must be in form 4-Z where Z is an integer") print('---------------') return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="4" if "7" == samp_con[0]: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z=samp_con.split("-")[1] samp_con="7" if codelist: codes=codelist.split(':') if "AF" in codes: demag='AF' if'-dc' not in args: methcode="LT-AF-Z" if'-dc' in args: methcode="LT-AF-I" if "T" in codes: demag="T" if '-dc' not in args: methcode="LT-T-Z" if '-dc' in args: methcode="LT-T-I" if "I" in codes: methcode="LP-IRM" irmunits="mT" if "I3d" in codes: methcode="LT-T-Z:LP-IRM-3D" if "S" in codes: demag="S" methcode="LP-PI-TRM:LP-PI-ALT-AFARM" trm_labfield=labfield ans=input("DC lab field for ARM step: [50uT] ") if ans=="": arm_labfield=50e-6 else: arm_labfield=float(ans)*1e-6 ans=input("temperature for total trm step: [600 C] ") if ans=="": trm_peakT=600+273 # convert to kelvin else: trm_peakT=float(ans)+273 # convert to kelvin if "G" in codes: methcode="LT-AF-G" if "D" in codes: methcode="LT-AF-D" if "TRM" in codes: demag="T" trm=1 if "CR" in codes: demag="T" cooling_rate_experiment=1 if command_line: ind=args.index("CR") cooling_rates=args[ind+1] cooling_rates_list=cooling_rates.split(',') else: cooling_rates_list=str(cooling_rates).split(',') if demag=="T" and "ANI" in codes: methcode="LP-AN-TRM" if demag=="T" and "CR" in codes: methcode="LP-CR-TRM" if demag=="AF" and "ANI" in codes: methcode="LP-AN-ARM" if labfield==0: labfield=50e-6 if peakfield==0: peakfield=.180 SynRecs,MagRecs=[],[] version_num=pmag.get_version() ################################## if 1: #if infile_type=="SIO format": for line in lines: instcode="" if len(line)>2: SynRec={} MagRec={} MagRec['er_location_name']=er_location_name MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["treatment_ac_field"]='0' MagRec["treatment_dc_field"]='0' MagRec["treatment_dc_field_phi"]='0' MagRec["treatment_dc_field_theta"]='0' meas_type="LT-NO" rec=line.split() if rec[1]==".00":rec[1]="0.00" treat=rec[1].split('.') if methcode=="LP-IRM": if irmunits=='mT': labfield=float(treat[0])*1e-3 else: labfield=pmag.getfield(irmunits,coil,treat[0]) if rec[1][0]!="-": phi,theta=0.,90. else: phi,theta=0.,-90. meas_type="LT-IRM" MagRec["treatment_dc_field"]='%8.3e'%(labfield) MagRec["treatment_dc_field_phi"]='%7.1f'%(phi) MagRec["treatment_dc_field_theta"]='%7.1f'%(theta) if len(rec)>6: code1=rec[6].split(';') # break e.g., 10/15/02;7:45 indo date and time if len(code1)==2: # old format with AM/PM missing=0 code2=code1[0].split('/') # break date into mon/day/year code3=rec[7].split(';') # break e.g., AM;C34;200 into time;instr/axes/measuring pos;number of measurements yy=int(code2[2]) if yy <90: yyyy=str(2000+yy) else: yyyy=str(1900+yy) mm=int(code2[0]) if mm<10: mm="0"+str(mm) else: mm=str(mm) dd=int(code2[1]) if dd<10: dd="0"+str(dd) else: dd=str(dd) time=code1[1].split(':') hh=int(time[0]) if code3[0]=="PM":hh=hh+12 if hh<10: hh="0"+str(hh) else: hh=str(hh) min=int(time[1]) if min<10: min= "0"+str(min) else: min=str(min) MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00" MagRec["measurement_time_zone"]='SAN' if inst=="": if code3[1][0]=='C':instcode='SIO-bubba' if code3[1][0]=='G':instcode='SIO-flo' else: instcode='' MagRec["measurement_positions"]=code3[1][2] elif len(code1)>2: # newest format (cryo7 or later) if "LP-AN-ARM" not in methcode:labfield=0 fmt='new' date=code1[0].split('/') # break date into mon/day/year yy=int(date[2]) if yy <90: yyyy=str(2000+yy) else: yyyy=str(1900+yy) mm=int(date[0]) if mm<10: mm="0"+str(mm) else: mm=str(mm) dd=int(date[1]) if dd<10: dd="0"+str(dd) else: dd=str(dd) time=code1[1].split(':') hh=int(time[0]) if hh<10: hh="0"+str(hh) else: hh=str(hh) min=int(time[1]) if min<10: min= "0"+str(min) else: min=str(min) MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00" MagRec["measurement_time_zone"]='SAN' if inst=="": if code1[6][0]=='C': instcode='SIO-bubba' if code1[6][0]=='G': instcode='SIO-flo' else: instcode='' if len(code1)>1: MagRec["measurement_positions"]=code1[6][2] else: MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different if user=="":user=code1[5] if code1[2][-1]=='C': demag="T" if code1[4]=='microT' and float(code1[3])!=0. and "LP-AN-ARM" not in methcode: labfield=float(code1[3])*1e-6 if code1[2]=='mT' and methcode!="LP-IRM": demag="AF" if code1[4]=='microT' and float(code1[3])!=0.: labfield=float(code1[3])*1e-6 if code1[4]=='microT' and labfield!=0. and meas_type!="LT-IRM": phi,theta=0.,-90. if demag=="T": meas_type="LT-T-I" if demag=="AF": meas_type="LT-AF-I" MagRec["treatment_dc_field"]='%8.3e'%(labfield) MagRec["treatment_dc_field_phi"]='%7.1f'%(phi) MagRec["treatment_dc_field_theta"]='%7.1f'%(theta) if code1[4]=='' or labfield==0. and meas_type!="LT-IRM": if demag=='T':meas_type="LT-T-Z" if demag=="AF":meas_type="LT-AF-Z" MagRec["treatment_dc_field"]='0' if syn==0: MagRec["er_specimen_name"]=rec[0] MagRec["er_synthetic_name"]="" MagRec["er_site_name"]="" if specnum!=0: MagRec["er_sample_name"]=rec[0][:specnum] else: MagRec["er_sample_name"]=rec[0] if samp_infile and Samps: # if samp_infile was provided AND yielded sample data samp=pmag.get_dictitem(Samps,'er_sample_name',MagRec['er_sample_name'],'T') if len(samp)>0: MagRec["er_location_name"]=samp[0]["er_location_name"] MagRec["er_site_name"]=samp[0]["er_site_name"] else: MagRec['er_location_name']='' MagRec["er_site_name"]='' elif int(samp_con)!=6: site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z) MagRec["er_site_name"]=site if MagRec['er_site_name']=="": print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name']) if MagRec["er_location_name"]=="": print('no location name for: ',MagRec["er_specimen_name"]) else: MagRec["er_specimen_name"]=rec[0] if specnum!=0: MagRec["er_sample_name"]=rec[0][:specnum] else: MagRec["er_sample_name"]=rec[0] MagRec["er_site_name"]="" MagRec["er_synthetic_name"]=MagRec["er_specimen_name"] SynRec["er_synthetic_name"]=MagRec["er_specimen_name"] site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z) SynRec["synthetic_parent_sample"]=site SynRec["er_citation_names"]="This study" SynRec["synthetic_institution"]=institution SynRec["synthetic_type"]=syntype SynRecs.append(SynRec) if float(rec[1])==0: pass elif demag=="AF": if methcode != "LP-AN-ARM": MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla if meas_type=="LT-AF-Z": MagRec["treatment_dc_field"]='0' else: # AARM experiment if treat[1][0]=='0': meas_type="LT-AF-Z:LP-AN-ARM:" MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla MagRec["treatment_dc_field"]='%8.3e'%(0) if labfield!=0 and methcode!="LP-AN-ARM": print("Warning - inconsistency in mag file with lab field - overriding file with 0") else: meas_type="LT-AF-I:LP-AN-ARM" ipos=int(treat[0])-1 MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos]) MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos]) MagRec["treatment_dc_field"]='%8.3e'%(labfield) MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla elif demag=="T" and methcode == "LP-AN-TRM": MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin if treat[1][0]=='0': meas_type="LT-T-Z:LP-AN-TRM" MagRec["treatment_dc_field"]='%8.3e'%(0) MagRec["treatment_dc_field_phi"]='0' MagRec["treatment_dc_field_theta"]='0' else: MagRec["treatment_dc_field"]='%8.3e'%(labfield) if treat[1][0]=='7': # alteration check as final measurement meas_type="LT-PTRM-I:LP-AN-TRM" else: meas_type="LT-T-I:LP-AN-TRM" # find the direction of the lab field in two ways: # (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z) ipos_code=int(treat[1][0])-1 # (2) using the magnetization DEC=float(rec[4]) INC=float(rec[5]) if INC < 45 and INC > -45: if DEC>315 or DEC<45: ipos_guess=0 if DEC>45 and DEC<135: ipos_guess=1 if DEC>135 and DEC<225: ipos_guess=3 if DEC>225 and DEC<315: ipos_guess=4 else: if INC >45: ipos_guess=2 if INC <-45: ipos_guess=5 # prefer the guess over the code ipos=ipos_guess MagRec["treatment_dc_field_phi"]='%7.1f' %(tdec[ipos]) MagRec["treatment_dc_field_theta"]='%7.1f'% (tinc[ipos]) # check it if ipos_guess!=ipos_code and treat[1][0]!='7': print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!"%(rec[0],".".join(list(treat)))) elif demag=="S": # Shaw experiment if treat[1][1]=='0': if int(treat[0])!=0: MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # AF field in tesla MagRec["treatment_dc_field"]='0' meas_type="LT-AF-Z" # first AF else: meas_type="LT-NO" MagRec["treatment_ac_field"]='0' MagRec["treatment_dc_field"]='0' elif treat[1][1]=='1': if int(treat[0])==0: MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield) MagRec["treatment_dc_field_phi"]='%7.1f'%(phi) MagRec["treatment_dc_field_theta"]='%7.1f'%(theta) meas_type="LT-AF-I" else: MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla MagRec["treatment_dc_field"]='0' meas_type="LT-AF-Z" elif treat[1][1]=='2': if int(treat[0])==0: MagRec["treatment_ac_field"]='0' MagRec["treatment_dc_field"]='%8.3e'%(trm_labfield) MagRec["treatment_dc_field_phi"]='%7.1f'%(phi) MagRec["treatment_dc_field_theta"]='%7.1f'%(theta) MagRec["treatment_temp"]='%8.3e' % (trm_peakT) meas_type="LT-T-I" else: MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla MagRec["treatment_dc_field"]='0' meas_type="LT-AF-Z" elif treat[1][1]=='3': if int(treat[0])==0: MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield) MagRec["treatment_dc_field_phi"]='%7.1f'%(phi) MagRec["treatment_dc_field_theta"]='%7.1f'%(theta) meas_type="LT-AF-I" else: MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla MagRec["treatment_dc_field"]='0' meas_type="LT-AF-Z" # Cooling rate experient # added by rshaar elif demag=="T" and methcode == "LP-CR-TRM": MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin if treat[1][0]=='0': meas_type="LT-T-Z:LP-CR-TRM" MagRec["treatment_dc_field"]='%8.3e'%(0) MagRec["treatment_dc_field_phi"]='0' MagRec["treatment_dc_field_theta"]='0' else: MagRec["treatment_dc_field"]='%8.3e'%(labfield) if treat[1][0]=='7': # alteration check as final measurement meas_type="LT-PTRM-I:LP-CR-TRM" else: meas_type="LT-T-I:LP-CR-TRM" MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta indx=int(treat[1][0])-1 # alteration check matjed as 0.7 in the measurement file if indx==6: cooling_time= cooling_rates_list[-1] else: cooling_time=cooling_rates_list[indx] MagRec["measurement_description"]="cooling_rate"+":"+cooling_time+":"+"K/min" elif demag!='N': if len(treat)==1:treat.append('0') MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin if trm==0: # demag=T and not trmaq if treat[1][0]=='0': meas_type="LT-T-Z" else: MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT) MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step if treat[1][0]=='2': meas_type="LT-PTRM-I" # pTRM check pTRM=1 if treat[1][0]=='3': MagRec["treatment_dc_field"]='0' # this is a zero field step meas_type="LT-PTRM-MD" # pTRM tail check else: labfield=float(treat[1])*1e-6 MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT) MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta meas_type="LT-T-I:LP-TRM" # trm acquisition experiment MagRec["measurement_csd"]=rec[2] MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu) MagRec["measurement_dec"]=rec[4] MagRec["measurement_inc"]=rec[5] MagRec["magic_instrument_codes"]=instcode MagRec["er_analyst_mail_names"]=user MagRec["er_citation_names"]=citation if "LP-IRM-3D" in methcode : meas_type=methcode #MagRec["magic_method_codes"]=methcode.strip(':') MagRec["magic_method_codes"]=meas_type MagRec["measurement_flag"]='g' MagRec["er_specimen_name"]=rec[0] if 'std' in rec[0]: MagRec["measurement_standard"]='s' else: MagRec["measurement_standard"]='u' MagRec["measurement_number"]='1' #print MagRec['treatment_temp'] MagRecs.append(MagRec) MagOuts=pmag.measurements_methods(MagRecs,noave) pmag.magic_write(meas_file,MagOuts,'magic_measurements') print("results put in ",meas_file) if len(SynRecs)>0: pmag.magic_write(synfile,SynRecs,'er_synthetics') print("synthetics put in ",synfile) return True, meas_file
[ "def", "main", "(", "command_line", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# initialize some stuff", "mag_file", "=", "None", "codelist", "=", "None", "infile_type", "=", "\"mag\"", "noave", "=", "0", "methcode", ",", "inst", "=", "\"LP-NO\"", ",...
NAME sio_magic.py DESCRIPTION converts SIO .mag format files to magic_measurements format files SYNTAX sio_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: identify user, default is "" -f FILE: specify .mag format input file, required -fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention) -F FILE: specify output file, default is magic_measurements.txt -Fsy: specify er_synthetics file, default is er_sythetics.txt -LP [colon delimited list of protocols, include all that apply] AF: af demag T: thermal including thellier but not trm acquisition S: Shaw method I: IRM (acquisition) I3d: 3D IRM experiment N: NRM only TRM: trm acquisition ANI: anisotropy experiment D: double AF demag G: triple AF demag (GRM protocol) CR: cooling rate experiment. The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional) where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps. XXX.00 is optional zerofield baseline. XXX.70 is alteration check. syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70 if you use a zerofield step then no need to specify the cooling rate for the zerofield It is important to add to the command line the -A option so the measurements will not be averaged. But users need to make sure that there are no duplicate measurements in the file -V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3 -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic -syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE -ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is "" -dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment -ac B : peak AF field (in mT) for ARM acquisition, default is none -ncn NCON: specify naming convention: default is #1 below -A: don't average replicate measurements Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail ltauxe@ucsd.edu for help. [8] synthetic - has no site name [9] ODP naming convention INPUT Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in seperate .mag files (eg. af.mag, thermal.mag, etc.) Format of SIO .mag files: Spec Treat CSD Intensity Declination Inclination [optional metadata string] Spec: specimen name Treat: treatment step XXX T in Centigrade XXX AF in mT for special experiments: Thellier: XXX.0 first zero field step XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order] XXX.2 second in-field step at lower temperature (pTRM check) XXX.3 second zero-field step after infield (pTRM check step) XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3] AARM: X.00 baseline step (AF in zero bias field - high peak field) X.1 ARM step (in field step) where X is the step number in the 15 position scheme (see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2) ATRM: X.00 optional baseline X.1 ATRM step (+X) X.2 ATRM step (+Y) X.3 ATRM step (+Z) X.4 ATRM step (-X) X.5 ATRM step (-Y) X.6 ATRM step (-Z) X.7 optional alteration check (+X) TRM: XXX.YYY XXX is temperature step of total TRM YYY is dc field in microtesla Intensity assumed to be total moment in 10^3 Am^2 (emu) Declination: Declination in specimen coordinate system Inclination: Declination in specimen coordinate system Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS hh in 24 hours. dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively xx.xxx DC field UNITS of DC field (microT, mT) INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions) NMEAS: number of measurements in a single position (1,3,200...)
[ "NAME", "sio_magic", ".", "py" ]
python
train
lokhman/pydbal
pydbal/threading.py
https://github.com/lokhman/pydbal/blob/53f396a2a18826e9fff178cd2c0636c1656cbaea/pydbal/threading.py#L161-L169
def insert(self, table, values): """Inserts a table row with specified data. :param table: the expression of the table to insert data into, quoted or unquoted :param values: a dictionary containing column-value pairs :return: last inserted ID """ with self.locked() as conn: return conn.insert(table, values)
[ "def", "insert", "(", "self", ",", "table", ",", "values", ")", ":", "with", "self", ".", "locked", "(", ")", "as", "conn", ":", "return", "conn", ".", "insert", "(", "table", ",", "values", ")" ]
Inserts a table row with specified data. :param table: the expression of the table to insert data into, quoted or unquoted :param values: a dictionary containing column-value pairs :return: last inserted ID
[ "Inserts", "a", "table", "row", "with", "specified", "data", "." ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L514-L521
def get_album_by_mbid(self, mbid): """Looks up an album by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "album.getInfo", params).execute(True) return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
[ "def", "get_album_by_mbid", "(", "self", ",", "mbid", ")", ":", "params", "=", "{", "\"mbid\"", ":", "mbid", "}", "doc", "=", "_Request", "(", "self", ",", "\"album.getInfo\"", ",", "params", ")", ".", "execute", "(", "True", ")", "return", "Album", "(...
Looks up an album by its MusicBrainz ID
[ "Looks", "up", "an", "album", "by", "its", "MusicBrainz", "ID" ]
python
train
mk-fg/feedjack
feedjack/fjlib.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/fjlib.py#L131-L169
def get_page(site, page=1, **criterias): 'Returns a paginator object and a requested page from it.' global _since_formats_vary if 'since' in criterias: since = criterias['since'] if since in _since_offsets: since = datetime.today() - timedelta(_since_offsets[since]) else: if _since_formats_vary: for fmt, substs in it.product( list(_since_formats), it.chain.from_iterable( it.combinations(_since_formats_vary, n) for n in xrange(1, len(_since_formats_vary)) ) ): for src, dst in substs: fmt = fmt.replace(src, dst) _since_formats.add(fmt) _since_formats_vary = None # to avoid doing it again for fmt in _since_formats: try: since = datetime.strptime(since, fmt) except ValueError: pass else: break else: raise Http404 # invalid format try: criterias['since'] = timezone.make_aware( since, timezone.get_current_timezone() ) except ( timezone.pytz.exceptions.AmbiguousTimeError if timezone.pytz else RuntimeError ): # Since there's no "right" way here anyway... criterias['since'] = since.replace(tzinfo=timezone) order_force = criterias.pop('asc', None) posts = models.Post.objects.filtered(site, **criterias)\ .sorted(site.order_posts_by, force=order_force)\ .select_related('feed') paginator = Paginator(posts, site.posts_per_page) try: return paginator.page(page) except InvalidPage: raise Http404
[ "def", "get_page", "(", "site", ",", "page", "=", "1", ",", "*", "*", "criterias", ")", ":", "global", "_since_formats_vary", "if", "'since'", "in", "criterias", ":", "since", "=", "criterias", "[", "'since'", "]", "if", "since", "in", "_since_offsets", ...
Returns a paginator object and a requested page from it.
[ "Returns", "a", "paginator", "object", "and", "a", "requested", "page", "from", "it", "." ]
python
train
KyleWpppd/css-audit
cssaudit/parser.py
https://github.com/KyleWpppd/css-audit/blob/cab4d4204cf30d54bc1881deee6ad92ae6aacc56/cssaudit/parser.py#L154-L200
def parse_inline_styles(self, data=None, import_type ='string'): """ Function for parsing styles defined in the body of the document. This only includes data inside of HTML <style> tags, a URL, or file to open. """ if data is None: raise parser = cssutils.CSSParser() if import_type == 'string': #print "importing string with url=%s" % self.url_root sheet = parser.parseString(data,href=self.url_root) elif import_type == 'url': if data[:5].lower() == 'http:' or data[:6].lower() == 'https:': print "YES because it was: %s " % data[:5].lower() try: sheet = parser.parseUrl(data) except: sys.stderr.write("WARNING: Failed attempting to parse %s" % data) return elif import_type == 'file': sheet = parser.parseFile(data) else: raise hrefs = [] for i in range(len(sheet.cssRules)): if sheet.cssRules[i].type == cssutils.css.CSSStyleRule.STYLE_RULE: selector = sheet.cssRules[i].selectorText #print "cssparser found selector: %s" % selector selectors = selector.split(',') self.defined_classes.extend(selectors) elif ( self.follow_css_links == True and sheet.cssRules[i].type == cssutils.css.CSSStyleRule.IMPORT_RULE ): href = sheet.cssRules[i].href sys.stderr.write("Added %s to the stylesheets to crawl" % href) if href[:5].lower() == 'http:' or href[:6].lower() == 'https:': self.linked_sheets.append(href) else: # We'll have to try to add in a url root here, if these are relative # links. self.linked_sheets.append(self.url_root+href) self.parse_inline_styles(data=self.url_root+href, import_type='url') else: # We won't worry about the other rule types. pass
[ "def", "parse_inline_styles", "(", "self", ",", "data", "=", "None", ",", "import_type", "=", "'string'", ")", ":", "if", "data", "is", "None", ":", "raise", "parser", "=", "cssutils", ".", "CSSParser", "(", ")", "if", "import_type", "==", "'string'", ":...
Function for parsing styles defined in the body of the document. This only includes data inside of HTML <style> tags, a URL, or file to open.
[ "Function", "for", "parsing", "styles", "defined", "in", "the", "body", "of", "the", "document", ".", "This", "only", "includes", "data", "inside", "of", "HTML", "<style", ">", "tags", "a", "URL", "or", "file", "to", "open", "." ]
python
train
wooyek/django-powerbank
setup.py
https://github.com/wooyek/django-powerbank/blob/df91189f2ac18bacc545ccf3c81c4465fb993949/setup.py#L42-L50
def get_version(*file_paths): """Retrieves the version from path""" filename = os.path.join(os.path.dirname(__file__), *file_paths) print("Looking for version in: {}".format(filename)) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
[ "def", "get_version", "(", "*", "file_paths", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "*", "file_paths", ")", "print", "(", "\"Looking for version in: {}\"", ".", "form...
Retrieves the version from path
[ "Retrieves", "the", "version", "from", "path" ]
python
train
bollwyvl/nosebook
nosebook.py
https://github.com/bollwyvl/nosebook/blob/6a79104b9be4b5acf1ff06cbf745f220a54a4613/nosebook.py#L136-L165
def configure(self, options, conf): """ apply configured options """ super(Nosebook, self).configure(options, conf) self.testMatch = re.compile(options.nosebookTestMatch).match self.testMatchCell = re.compile(options.nosebookTestMatchCell).match scrubs = [] if options.nosebookScrub: try: scrubs = json.loads(options.nosebookScrub) except Exception: scrubs = [options.nosebookScrub] if isstr(scrubs): scrubs = { scrubs: "<...>" } elif not isinstance(scrubs, dict): scrubs = dict([ (scrub, "<...%s>" % i) for i, scrub in enumerate(scrubs) ]) self.scrubMatch = { re.compile(scrub): sub for scrub, sub in scrubs.items() }
[ "def", "configure", "(", "self", ",", "options", ",", "conf", ")", ":", "super", "(", "Nosebook", ",", "self", ")", ".", "configure", "(", "options", ",", "conf", ")", "self", ".", "testMatch", "=", "re", ".", "compile", "(", "options", ".", "noseboo...
apply configured options
[ "apply", "configured", "options" ]
python
train
jaraco/irc
irc/message.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/message.py#L6-L36
def parse(item): r""" >>> Tag.parse('x') == {'key': 'x', 'value': None} True >>> Tag.parse('x=yes') == {'key': 'x', 'value': 'yes'} True >>> Tag.parse('x=3')['value'] '3' >>> Tag.parse('x=red fox\\:green eggs')['value'] 'red fox;green eggs' >>> Tag.parse('x=red fox:green eggs')['value'] 'red fox:green eggs' >>> Tag.parse('x=a\\nb\\nc')['value'] 'a\nb\nc' """ key, sep, value = item.partition('=') value = value.replace('\\:', ';') value = value.replace('\\s', ' ') value = value.replace('\\n', '\n') value = value.replace('\\r', '\r') value = value.replace('\\\\', '\\') value = value or None return { 'key': key, 'value': value, }
[ "def", "parse", "(", "item", ")", ":", "key", ",", "sep", ",", "value", "=", "item", ".", "partition", "(", "'='", ")", "value", "=", "value", ".", "replace", "(", "'\\\\:'", ",", "';'", ")", "value", "=", "value", ".", "replace", "(", "'\\\\s'", ...
r""" >>> Tag.parse('x') == {'key': 'x', 'value': None} True >>> Tag.parse('x=yes') == {'key': 'x', 'value': 'yes'} True >>> Tag.parse('x=3')['value'] '3' >>> Tag.parse('x=red fox\\:green eggs')['value'] 'red fox;green eggs' >>> Tag.parse('x=red fox:green eggs')['value'] 'red fox:green eggs' >>> Tag.parse('x=a\\nb\\nc')['value'] 'a\nb\nc'
[ "r", ">>>", "Tag", ".", "parse", "(", "x", ")", "==", "{", "key", ":", "x", "value", ":", "None", "}", "True" ]
python
train
keans/lmnotify
lmnotify/ssdp.py
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/ssdp.py#L117-L180
def get_filtered_devices( self, model_name, device_types="upnp:rootdevice", timeout=2 ): """ returns a dict of devices that contain the given model name """ # get list of all UPNP devices in the network upnp_devices = self.discover_upnp_devices(st=device_types) # go through all UPNP devices and filter wanted devices filtered_devices = collections.defaultdict(dict) for dev in upnp_devices.values(): try: # download XML file with information about the device # from the device's location r = requests.get(dev.location, timeout=timeout) if r.status_code == requests.codes.ok: # parse returned XML root = ET.fromstring(r.text) # add shortcut for XML namespace to access sub nodes ns = {"upnp": "urn:schemas-upnp-org:device-1-0"} # get device element device = root.find("upnp:device", ns) if model_name in device.find( "upnp:modelName", ns ).text: # model name is wanted => add to list # get unique UDN of the device that is used as key udn = device.find("upnp:UDN", ns).text # add url base url_base = root.find("upnp:URLBase", ns) if url_base is not None: filtered_devices[udn][ "URLBase" ] = url_base.text # add interesting device attributes and # use unique UDN as key for attr in ( "deviceType", "friendlyName", "manufacturer", "manufacturerURL", "modelDescription", "modelName", "modelNumber" ): el = device.find("upnp:%s" % attr, ns) if el is not None: filtered_devices[udn][ attr ] = el.text.strip() except ET.ParseError: # just skip devices that are invalid xml pass except requests.exceptions.ConnectTimeout: # just skip devices that are not replying in time print("Timeout for '%s'. Skipping." % dev.location) return filtered_devices
[ "def", "get_filtered_devices", "(", "self", ",", "model_name", ",", "device_types", "=", "\"upnp:rootdevice\"", ",", "timeout", "=", "2", ")", ":", "# get list of all UPNP devices in the network", "upnp_devices", "=", "self", ".", "discover_upnp_devices", "(", "st", "...
returns a dict of devices that contain the given model name
[ "returns", "a", "dict", "of", "devices", "that", "contain", "the", "given", "model", "name" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/__init__.py#L140-L161
def _set_stp(self, v, load=False): """ Setter method for stp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/stp (container) If this variable is read-only (config: false) in the source YANG file, then _set_stp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_stp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=stp.stp, is_container='container', presence=False, yang_name="stp", rest_name="stp", parent=self, choice=(u'spanning-tree-mode', u'stp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """stp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=stp.stp, is_container='container', presence=False, yang_name="stp", rest_name="stp", parent=self, choice=(u'spanning-tree-mode', u'stp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True)""", }) self.__stp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_stp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "...
Setter method for stp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/stp (container) If this variable is read-only (config: false) in the source YANG file, then _set_stp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_stp() directly.
[ "Setter", "method", "for", "stp", "mapped", "from", "YANG", "variable", "/", "brocade_xstp_ext_rpc", "/", "get_stp_brief_info", "/", "output", "/", "spanning_tree_info", "/", "stp", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(...
python
train
pywbem/pywbem
pywbem/_subscription_manager.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_subscription_manager.py#L442-L455
def remove_all_servers(self): """ Remove all registered WBEM servers from the subscription manager. This also unregisters listeners from these servers and removes all owned indication subscriptions, owned indication filters, and owned listener destinations. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. """ for server_id in list(self._servers.keys()): self.remove_server(server_id)
[ "def", "remove_all_servers", "(", "self", ")", ":", "for", "server_id", "in", "list", "(", "self", ".", "_servers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_server", "(", "server_id", ")" ]
Remove all registered WBEM servers from the subscription manager. This also unregisters listeners from these servers and removes all owned indication subscriptions, owned indication filters, and owned listener destinations. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`.
[ "Remove", "all", "registered", "WBEM", "servers", "from", "the", "subscription", "manager", ".", "This", "also", "unregisters", "listeners", "from", "these", "servers", "and", "removes", "all", "owned", "indication", "subscriptions", "owned", "indication", "filters"...
python
train
henocdz/workon
workon/script.py
https://github.com/henocdz/workon/blob/46f1f6dc4ea95d8efd10adf93a06737237a6874d/workon/script.py#L49-L66
def _path_is_valid(self, path): """validates if a given path is: - absolute, - exists on current machine - is a directory """ VALIDATORS = [ (os.path.isabs, self._ERROR_PATH_NOT_ABSOLUTE), (os.path.exists, self._ERROR_PATH_DOESNT_EXISTS), (os.path.isdir, self._ERROR_PATH_NOT_A_DIR), ] for validator in VALIDATORS: func, str_err = validator if not func(path): self._print(str_err.format(path), 'red') return return True
[ "def", "_path_is_valid", "(", "self", ",", "path", ")", ":", "VALIDATORS", "=", "[", "(", "os", ".", "path", ".", "isabs", ",", "self", ".", "_ERROR_PATH_NOT_ABSOLUTE", ")", ",", "(", "os", ".", "path", ".", "exists", ",", "self", ".", "_ERROR_PATH_DOE...
validates if a given path is: - absolute, - exists on current machine - is a directory
[ "validates", "if", "a", "given", "path", "is", ":", "-", "absolute", "-", "exists", "on", "current", "machine", "-", "is", "a", "directory" ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L4602-L4613
def _register_update(self, method='isel', replot=False, dims={}, fmt={}, force=False, todefault=False): """ Register new dimensions and formatoptions for updating Parameters ---------- %(InteractiveArray._register_update.parameters)s""" ArrayList._register_update(self, method=method, dims=dims) InteractiveBase._register_update(self, fmt=fmt, todefault=todefault, replot=bool(dims) or replot, force=force)
[ "def", "_register_update", "(", "self", ",", "method", "=", "'isel'", ",", "replot", "=", "False", ",", "dims", "=", "{", "}", ",", "fmt", "=", "{", "}", ",", "force", "=", "False", ",", "todefault", "=", "False", ")", ":", "ArrayList", ".", "_regi...
Register new dimensions and formatoptions for updating Parameters ---------- %(InteractiveArray._register_update.parameters)s
[ "Register", "new", "dimensions", "and", "formatoptions", "for", "updating" ]
python
train
mental32/spotify.py
spotify/http.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/http.py#L286-L295
def artist_related_artists(self, spotify_id): """Get related artists for an artist by their ID. Parameters ---------- spotify_id : str The spotify_id to search by. """ route = Route('GET', '/artists/{spotify_id}/related-artists', spotify_id=spotify_id) return self.request(route)
[ "def", "artist_related_artists", "(", "self", ",", "spotify_id", ")", ":", "route", "=", "Route", "(", "'GET'", ",", "'/artists/{spotify_id}/related-artists'", ",", "spotify_id", "=", "spotify_id", ")", "return", "self", ".", "request", "(", "route", ")" ]
Get related artists for an artist by their ID. Parameters ---------- spotify_id : str The spotify_id to search by.
[ "Get", "related", "artists", "for", "an", "artist", "by", "their", "ID", "." ]
python
test
tkf/rash
rash/watchrecord.py
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/watchrecord.py#L48-L74
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
[ "def", "watch_record", "(", "indexer", ",", "use_polling", "=", "False", ")", ":", "if", "use_polling", ":", "from", "watchdog", ".", "observers", ".", "polling", "import", "PollingObserver", "as", "Observer", "Observer", "# fool pyflakes", "else", ":", "from", ...
Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer
[ "Start", "watching", "cfstore", ".", "record_path", "." ]
python
train
developersociety/django-glitter
glitter/page.py
https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/page.py#L176-L184
def add_block_widget(self, top=False): """ Return a select widget for blocks which can be added to this column. """ widget = AddBlockSelect(attrs={ 'class': 'glitter-add-block-select', }, choices=self.add_block_options(top=top)) return widget.render(name='', value=None)
[ "def", "add_block_widget", "(", "self", ",", "top", "=", "False", ")", ":", "widget", "=", "AddBlockSelect", "(", "attrs", "=", "{", "'class'", ":", "'glitter-add-block-select'", ",", "}", ",", "choices", "=", "self", ".", "add_block_options", "(", "top", ...
Return a select widget for blocks which can be added to this column.
[ "Return", "a", "select", "widget", "for", "blocks", "which", "can", "be", "added", "to", "this", "column", "." ]
python
train
philklei/tahoma-api
tahoma_api/tahoma_api.py
https://github.com/philklei/tahoma-api/blob/fc84f6ba3b673d0cd0e9e618777834a74a3c7b64/tahoma_api/tahoma_api.py#L70-L114
def get_user(self): """Get the user informations from the server. :return: a dict with all the informations :rtype: dict raises ValueError in case of protocol issues :Example: >>> "creationTime": <time>, >>> "lastUpdateTime": <time>, >>> "userId": "<email for login>", >>> "title": 0, >>> "firstName": "<First>", >>> "lastName": "<Last>", >>> "email": "<contact email>", >>> "phoneNumber": "<phone>", >>> "mobilePhone": "<mobile>", >>> "locale": "<two char country code>" :Warning: The type and amount of values in the dictionary can change any time. """ header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get(BASE_URL + 'getEndUser', headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.get_user() return try: result = request.json() except ValueError: raise Exception( "Not a valid result for getEndUser, protocol error!") return result['endUser']
[ "def", "get_user", "(", "self", ")", ":", "header", "=", "BASE_HEADERS", ".", "copy", "(", ")", "header", "[", "'Cookie'", "]", "=", "self", ".", "__cookie", "request", "=", "requests", ".", "get", "(", "BASE_URL", "+", "'getEndUser'", ",", "headers", ...
Get the user informations from the server. :return: a dict with all the informations :rtype: dict raises ValueError in case of protocol issues :Example: >>> "creationTime": <time>, >>> "lastUpdateTime": <time>, >>> "userId": "<email for login>", >>> "title": 0, >>> "firstName": "<First>", >>> "lastName": "<Last>", >>> "email": "<contact email>", >>> "phoneNumber": "<phone>", >>> "mobilePhone": "<mobile>", >>> "locale": "<two char country code>" :Warning: The type and amount of values in the dictionary can change any time.
[ "Get", "the", "user", "informations", "from", "the", "server", "." ]
python
train
rsmuc/health_monitoring_plugins
health_monitoring_plugins/__init__.py
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/__init__.py#L87-L103
def walk_snmp_values(sess, helper, oid, check): """ return a snmp value or exits the plugin with unknown""" try: snmp_walk = sess.walk_oid(oid) result_list = [] for x in range(len(snmp_walk)): result_list.append(snmp_walk[x].val) if result_list != []: return result_list else: raise SnmpException("No content") except SnmpException: helper.exit(summary="No response from device for {} ({})".format(check, oid), exit_code=unknown, perfdata='')
[ "def", "walk_snmp_values", "(", "sess", ",", "helper", ",", "oid", ",", "check", ")", ":", "try", ":", "snmp_walk", "=", "sess", ".", "walk_oid", "(", "oid", ")", "result_list", "=", "[", "]", "for", "x", "in", "range", "(", "len", "(", "snmp_walk", ...
return a snmp value or exits the plugin with unknown
[ "return", "a", "snmp", "value", "or", "exits", "the", "plugin", "with", "unknown" ]
python
train
openspending/os-package-registry
os_package_registry/package_registry.py
https://github.com/openspending/os-package-registry/blob/02f3628340417ed7d943a6cc6c25ea0469de22cd/os_package_registry/package_registry.py#L256-L290
def get_stats(self): """ Get some stats on the packages in the registry """ try: query = { # We only care about the aggregations, so don't return the hits 'size': 0, 'aggs': { 'num_packages': { 'value_count': { 'field': 'id', }, }, 'num_records': { 'sum': { 'field': 'package.count_of_rows', }, }, 'num_countries': { 'cardinality': { 'field': 'package.countryCode.keyword', }, }, }, } aggregations = self.es.search(index=self.index_name, body=query)['aggregations'] return { key: int(value['value']) for key, value in aggregations.items() } except NotFoundError: return {}
[ "def", "get_stats", "(", "self", ")", ":", "try", ":", "query", "=", "{", "# We only care about the aggregations, so don't return the hits", "'size'", ":", "0", ",", "'aggs'", ":", "{", "'num_packages'", ":", "{", "'value_count'", ":", "{", "'field'", ":", "'id'...
Get some stats on the packages in the registry
[ "Get", "some", "stats", "on", "the", "packages", "in", "the", "registry" ]
python
train
psd-tools/psd-tools
src/psd_tools/api/smart_object.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/smart_object.py#L94-L98
def filesize(self): """File size of the object.""" if self.kind == 'data': return len(self._data.data) return self._data.filesize
[ "def", "filesize", "(", "self", ")", ":", "if", "self", ".", "kind", "==", "'data'", ":", "return", "len", "(", "self", ".", "_data", ".", "data", ")", "return", "self", ".", "_data", ".", "filesize" ]
File size of the object.
[ "File", "size", "of", "the", "object", "." ]
python
train
tensorflow/tensorboard
tensorboard/summary/writer/event_file_writer.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/summary/writer/event_file_writer.py#L157-L165
def close(self): '''Closes the underlying writer, flushing any pending writes first.''' if not self._closed: with self._lock: if not self._closed: self._closed = True self._worker.stop() self._writer.flush() self._writer.close()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_closed", ":", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_closed", ":", "self", ".", "_closed", "=", "True", "self", ".", "_worker", ".", "stop", "(", ")", "sel...
Closes the underlying writer, flushing any pending writes first.
[ "Closes", "the", "underlying", "writer", "flushing", "any", "pending", "writes", "first", "." ]
python
train
PmagPy/PmagPy
programs/thellier_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/thellier_magic.py#L18-L100
def main(): """ NAME thellier_magic.py DESCRIPTION plots Thellier-Thellier data in version 3.0 format Reads saved interpretations from a specimen formatted table, default: specimens.txt SYNTAX thellier_magic.py [command line options] OPTIONS -h prints help message and quits -f MEAS, set measurements input file, default is 'measurements.txt' -WD: directory to output files to (default : current directory) Note: if using Windows, all figures will output to current directory -ID: directory to read files from (default : same as -WD) -fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt' -fmt [svg,png,jpg], format for images - default is svg -sav, saves plots without review (in format specified by -fmt key or default) -spc SPEC, plots single specimen SPEC, saves plot with specified format with optional -b bounds and quits -n SPECIMENS, number of specimens to plot OUTPUT figures: ALL: numbers refer to temperature steps in command line window 1) Arai plot: closed circles are zero-field first/infield open circles are infield first/zero-field triangles are pTRM checks squares are pTRM tail checks VDS is vector difference sum diamonds are bounds for interpretation 2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes X rotated to NRM direction 3) (De/Re)Magnetization diagram: circles are NRM remaining squares are pTRM gained 4) equal area projections: green triangles are pTRM gained direction red (purple) circles are lower(upper) hemisphere of ZI step directions blue (cyan) squares are lower(upper) hemisphere IZ step directions 5) Optional: TRM acquisition 6) Optional: TDS normalization command line window: list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements) list of possible commands: type letter followed by return to select option saving of plots creates image files with specimen, plot type as name """ # # parse command line options # if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=".") input_dir_path = pmag.get_named_arg('-ID', "") input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path) meas_file = pmag.get_named_arg( "-f", default_val="measurements.txt") #spec_file = pmag.get_named_arg( # "-fsp", default_val="specimens.txt") #crit_file = pmag.get_named_arg("-fcr", default_val="criteria.txt") #spec_file = os.path.join(dir_path, spec_file) #crit_file = os.path.join(dir_path, crit_file) meas_file = pmag.resolve_file_name(meas_file, input_dir_path) fmt = pmag.get_named_arg("-fmt", "svg") save_plots = False interactive = True if '-sav' in sys.argv: save_plots = True interactive=False spec = pmag.get_named_arg("-spc", default_val="") n_specs = pmag.get_named_arg("-n", default_val="all") try: n_specs = int(n_specs) except ValueError: pass ipmag.thellier_magic(meas_file, dir_path, input_dir_path, spec, n_specs, save_plots, fmt, interactive)
[ "def", "main", "(", ")", ":", "#", "# parse command line options", "#", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "dir_path", "=", "pmag", ".", "get_named_arg", "(", "\"-WD\"",...
NAME thellier_magic.py DESCRIPTION plots Thellier-Thellier data in version 3.0 format Reads saved interpretations from a specimen formatted table, default: specimens.txt SYNTAX thellier_magic.py [command line options] OPTIONS -h prints help message and quits -f MEAS, set measurements input file, default is 'measurements.txt' -WD: directory to output files to (default : current directory) Note: if using Windows, all figures will output to current directory -ID: directory to read files from (default : same as -WD) -fsp PRIOR, set specimens.txt prior interpretations file, default is 'specimens.txt' -fmt [svg,png,jpg], format for images - default is svg -sav, saves plots without review (in format specified by -fmt key or default) -spc SPEC, plots single specimen SPEC, saves plot with specified format with optional -b bounds and quits -n SPECIMENS, number of specimens to plot OUTPUT figures: ALL: numbers refer to temperature steps in command line window 1) Arai plot: closed circles are zero-field first/infield open circles are infield first/zero-field triangles are pTRM checks squares are pTRM tail checks VDS is vector difference sum diamonds are bounds for interpretation 2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes X rotated to NRM direction 3) (De/Re)Magnetization diagram: circles are NRM remaining squares are pTRM gained 4) equal area projections: green triangles are pTRM gained direction red (purple) circles are lower(upper) hemisphere of ZI step directions blue (cyan) squares are lower(upper) hemisphere IZ step directions 5) Optional: TRM acquisition 6) Optional: TDS normalization command line window: list is: temperature step numbers, temperatures (C), Dec, Inc, Int (units of measuements) list of possible commands: type letter followed by return to select option saving of plots creates image files with specimen, plot type as name
[ "NAME", "thellier_magic", ".", "py" ]
python
train
SHTOOLS/SHTOOLS
pyshtools/make_docs.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/make_docs.py#L143-L188
def process_f2pydoc(f2pydoc): """ this function replace all optional _d0 arguments with their default values in the function signature. These arguments are not intended to be used and signify merely the array dimensions of the associated argument. """ # ---- split f2py document in its parts # 0=Call Signature # 1=Parameters # 2=Other (optional) Parameters (only if present) # 3=Returns docparts = re.split('\n--', f2pydoc) if len(docparts) == 4: doc_has_optionals = True elif len(docparts) == 3: doc_has_optionals = False else: print('-- uninterpretable f2py documentation --') return f2pydoc # ---- replace arguments with _d suffix with empty string in ---- # ---- function signature (remove them): ---- docparts[0] = re.sub('[\[(,]\w+_d\d', '', docparts[0]) # ---- replace _d arguments of the return arrays with their default value: if doc_has_optionals: returnarray_dims = re.findall('[\[(,](\w+_d\d)', docparts[3]) for arg in returnarray_dims: searchpattern = arg + ' : input.*\n.*Default: (.*)\n' match = re.search(searchpattern, docparts[2]) if match: default = match.group(1) docparts[3] = re.sub(arg, default, docparts[3]) docparts[2] = re.sub(searchpattern, '', docparts[2]) # ---- remove all optional _d# from optional argument list: if doc_has_optionals: searchpattern = '\w+_d\d : input.*\n.*Default: (.*)\n' docparts[2] = re.sub(searchpattern, '', docparts[2]) # ---- combine doc parts to a single string processed_signature = '\n--'.join(docparts) return processed_signature
[ "def", "process_f2pydoc", "(", "f2pydoc", ")", ":", "# ---- split f2py document in its parts", "# 0=Call Signature", "# 1=Parameters", "# 2=Other (optional) Parameters (only if present)", "# 3=Returns", "docparts", "=", "re", ".", "split", "(", "'\\n--'", ",", "f2pydoc", ")",...
this function replace all optional _d0 arguments with their default values in the function signature. These arguments are not intended to be used and signify merely the array dimensions of the associated argument.
[ "this", "function", "replace", "all", "optional", "_d0", "arguments", "with", "their", "default", "values", "in", "the", "function", "signature", ".", "These", "arguments", "are", "not", "intended", "to", "be", "used", "and", "signify", "merely", "the", "array...
python
train
knipknap/exscript
Exscript/protocols/protocol.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/protocol.py#L989-L1030
def waitfor(self, prompt): """ Monitors the data received from the remote host and waits until the response matches the given prompt. Once a match has been found, the buffer containing incoming data is NOT changed. In other words, consecutive calls to this function will always work, e.g.:: conn.waitfor('myprompt>') conn.waitfor('myprompt>') conn.waitfor('myprompt>') will always work. Hence in most cases, you probably want to use expect() instead. This method also stores the received data in the response attribute (self.response). Returns the index of the regular expression that matched. :type prompt: str|re.RegexObject|list(str|re.RegexObject) :param prompt: One or more regular expressions. :rtype: int, re.MatchObject :return: The index of the regular expression that matched, and the match object. @raise TimeoutException: raised if the timeout was reached. @raise ExpectCancelledException: raised when cancel_expect() was called in a callback. @raise ProtocolException: on other internal errors. @raise Exception: May raise other exceptions that are caused within the underlying protocol implementations. """ while True: try: result = self._waitfor(prompt) except DriverReplacedException: continue # retry return result
[ "def", "waitfor", "(", "self", ",", "prompt", ")", ":", "while", "True", ":", "try", ":", "result", "=", "self", ".", "_waitfor", "(", "prompt", ")", "except", "DriverReplacedException", ":", "continue", "# retry", "return", "result" ]
Monitors the data received from the remote host and waits until the response matches the given prompt. Once a match has been found, the buffer containing incoming data is NOT changed. In other words, consecutive calls to this function will always work, e.g.:: conn.waitfor('myprompt>') conn.waitfor('myprompt>') conn.waitfor('myprompt>') will always work. Hence in most cases, you probably want to use expect() instead. This method also stores the received data in the response attribute (self.response). Returns the index of the regular expression that matched. :type prompt: str|re.RegexObject|list(str|re.RegexObject) :param prompt: One or more regular expressions. :rtype: int, re.MatchObject :return: The index of the regular expression that matched, and the match object. @raise TimeoutException: raised if the timeout was reached. @raise ExpectCancelledException: raised when cancel_expect() was called in a callback. @raise ProtocolException: on other internal errors. @raise Exception: May raise other exceptions that are caused within the underlying protocol implementations.
[ "Monitors", "the", "data", "received", "from", "the", "remote", "host", "and", "waits", "until", "the", "response", "matches", "the", "given", "prompt", ".", "Once", "a", "match", "has", "been", "found", "the", "buffer", "containing", "incoming", "data", "is...
python
train
PyPSA/PyPSA
pypsa/components.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L472-L503
def remove(self, class_name, name): """ Removes a single component from the network. Removes it from component DataFrames. Parameters ---------- class_name : string Component class name name : string Component name Examples -------- >>> network.remove("Line","my_line 12345") """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None cls_df = self.df(class_name) cls_df.drop(name, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): if name in df: df.drop(name, axis=1, inplace=True)
[ "def", "remove", "(", "self", ",", "class_name", ",", "name", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", ")", "return", ...
Removes a single component from the network. Removes it from component DataFrames. Parameters ---------- class_name : string Component class name name : string Component name Examples -------- >>> network.remove("Line","my_line 12345")
[ "Removes", "a", "single", "component", "from", "the", "network", "." ]
python
train
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1364-L1421
def reproject(self, dst_crs=None, resolution=None, dimensions=None, src_bounds=None, dst_bounds=None, target_aligned_pixels=False, resampling=Resampling.cubic, creation_options=None, **kwargs): """Return re-projected raster to new raster. Parameters ------------ dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output (in destination georeferenced units). target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. resampling: rasterio.enums.Resampling Reprojection resampling method. Default is `cubic`. creation_options: dict, optional Custom creation options. kwargs: optional Additional arguments passed to transformation function. Returns --------- out: GeoRaster2 """ if self._image is None and self._filename is not None: # image is not loaded yet with tempfile.NamedTemporaryFile(suffix='.tif', delete=False) as tf: warp(self._filename, tf.name, dst_crs=dst_crs, resolution=resolution, dimensions=dimensions, creation_options=creation_options, src_bounds=src_bounds, dst_bounds=dst_bounds, target_aligned_pixels=target_aligned_pixels, resampling=resampling, **kwargs) new_raster = self.__class__(filename=tf.name, temporary=True, band_names=self.band_names) else: # image is loaded already # SimpleNamespace is handy to hold the properties that calc_transform expects, see # https://docs.python.org/3/library/types.html#types.SimpleNamespace src = SimpleNamespace(width=self.width, height=self.height, transform=self.transform, crs=self.crs, bounds=BoundingBox(*self.footprint().get_bounds(self.crs)), gcps=None) dst_crs, dst_transform, dst_width, dst_height = calc_transform( src, dst_crs=dst_crs, resolution=resolution, dimensions=dimensions, target_aligned_pixels=target_aligned_pixels, src_bounds=src_bounds, dst_bounds=dst_bounds) new_raster = self._reproject(dst_width, dst_height, dst_transform, dst_crs=dst_crs, resampling=resampling) return new_raster
[ "def", "reproject", "(", "self", ",", "dst_crs", "=", "None", ",", "resolution", "=", "None", ",", "dimensions", "=", "None", ",", "src_bounds", "=", "None", ",", "dst_bounds", "=", "None", ",", "target_aligned_pixels", "=", "False", ",", "resampling", "="...
Return re-projected raster to new raster. Parameters ------------ dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output (in destination georeferenced units). target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. resampling: rasterio.enums.Resampling Reprojection resampling method. Default is `cubic`. creation_options: dict, optional Custom creation options. kwargs: optional Additional arguments passed to transformation function. Returns --------- out: GeoRaster2
[ "Return", "re", "-", "projected", "raster", "to", "new", "raster", "." ]
python
train
nerdvegas/rez
src/rez/solver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2137-L2157
def dump(self): """Print a formatted summary of the current solve state.""" from rez.utils.formatting import columnise rows = [] for i, phase in enumerate(self.phase_stack): rows.append((self._depth_label(i), phase.status, str(phase))) print "status: %s (%s)" % (self.status.name, self.status.description) print "initial request: %s" % str(self.request_list) print print "solve stack:" print '\n'.join(columnise(rows)) if self.failed_phase_list: rows = [] for i, phase in enumerate(self.failed_phase_list): rows.append(("#%d" % i, phase.status, str(phase))) print print "previous failures:" print '\n'.join(columnise(rows))
[ "def", "dump", "(", "self", ")", ":", "from", "rez", ".", "utils", ".", "formatting", "import", "columnise", "rows", "=", "[", "]", "for", "i", ",", "phase", "in", "enumerate", "(", "self", ".", "phase_stack", ")", ":", "rows", ".", "append", "(", ...
Print a formatted summary of the current solve state.
[ "Print", "a", "formatted", "summary", "of", "the", "current", "solve", "state", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/image/image_folder.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/image_folder.py#L156-L164
def _generate_examples(self, label_images): """Generate example for each image in the dict.""" for label, image_paths in label_images.items(): for image_path in image_paths: yield { "image": image_path, "label": label, }
[ "def", "_generate_examples", "(", "self", ",", "label_images", ")", ":", "for", "label", ",", "image_paths", "in", "label_images", ".", "items", "(", ")", ":", "for", "image_path", "in", "image_paths", ":", "yield", "{", "\"image\"", ":", "image_path", ",", ...
Generate example for each image in the dict.
[ "Generate", "example", "for", "each", "image", "in", "the", "dict", "." ]
python
train
gccxml/pygccxml
pygccxml/declarations/type_traits_classes.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/type_traits_classes.py#L788-L818
def is_unary_operator(oper): """returns True, if operator is unary operator, otherwise False""" # definition: # member in class # ret-type operator symbol() # ret-type operator [++ --](int) # globally # ret-type operator symbol( arg ) # ret-type operator [++ --](X&, int) symbols = ['!', '&', '~', '*', '+', '++', '-', '--'] if not isinstance(oper, calldef_members.operator_t): return False if oper.symbol not in symbols: return False if isinstance(oper, calldef_members.member_operator_t): if len(oper.arguments) == 0: return True elif oper.symbol in ['++', '--'] and \ isinstance(oper.arguments[0].decl_type, cpptypes.int_t): return True return False if len(oper.arguments) == 1: return True elif oper.symbol in ['++', '--'] \ and len(oper.arguments) == 2 \ and isinstance(oper.arguments[1].decl_type, cpptypes.int_t): # may be I need to add additional check whether first argument is # reference or not? return True return False
[ "def", "is_unary_operator", "(", "oper", ")", ":", "# definition:", "# member in class", "# ret-type operator symbol()", "# ret-type operator [++ --](int)", "# globally", "# ret-type operator symbol( arg )", "# ret-type operator [++ --](X&, int)", "symbols", "=", "[", "'!'", ",", ...
returns True, if operator is unary operator, otherwise False
[ "returns", "True", "if", "operator", "is", "unary", "operator", "otherwise", "False" ]
python
train
bukun/TorCMS
torcms/model/user_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/user_model.py#L66-L76
def check_user(user_id, u_pass): ''' Checking the password by user's ID. ''' user_count = TabMember.select().where(TabMember.uid == user_id).count() if user_count == 0: return -1 the_user = TabMember.get(uid=user_id) if the_user.user_pass == tools.md5(u_pass): return 1 return 0
[ "def", "check_user", "(", "user_id", ",", "u_pass", ")", ":", "user_count", "=", "TabMember", ".", "select", "(", ")", ".", "where", "(", "TabMember", ".", "uid", "==", "user_id", ")", ".", "count", "(", ")", "if", "user_count", "==", "0", ":", "retu...
Checking the password by user's ID.
[ "Checking", "the", "password", "by", "user", "s", "ID", "." ]
python
train
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L378-L441
def _refresh(self, session, stopping=False): '''Get this task's current state. This must be called under the registry's lock. It updates the :attr:`finished` and :attr:`failed` flags and the :attr:`data` dictionary based on the current state in the registry. In the normal case, nothing will change and this function will return normally. If it turns out that the work unit is already finished, the state of this object will change before :exc:`rejester.exceptions.LostLease` is raised. :param session: locked registry session :param stopping: don't raise if the work unit is finished :raises rejester.exceptions.LostLease: if this worker is no longer doing this work unit ''' data = session.get( WORK_UNITS_ + self.work_spec_name + _FINISHED, self.key) if data is not None: self.finished = True self.data = data if not stopping: raise LostLease('work unit is already finished') return self.finished = False data = session.get( WORK_UNITS_ + self.work_spec_name + _FAILED, self.key) if data is not None: self.failed = True self.data = data if not stopping: raise LostLease('work unit has already failed') return self.failed = False # (You need a pretty specific sequence of events to get here) data = session.get( WORK_UNITS_ + self.work_spec_name + _BLOCKED, self.key) if data is not None: self.data = data raise LostLease('work unit now blocked by others') worker_id = session.get( WORK_UNITS_ + self.work_spec_name + '_locks', self.key) if worker_id != self.worker_id: raise LostLease('work unit claimed by %r', worker_id) # NB: We could check the priority here, but don't. # If at this point we're technically overtime but nobody # else has started doing work yet, since we're under the # global lock, we can get away with finishing whatever # transition we were going to try to do. data = session.get( WORK_UNITS_ + self.work_spec_name, self.key) if data is None: raise NoSuchWorkUnitError('work unit is gone') # Since we should still own the work unit, any changes # in data should be on our end; do not touch it return
[ "def", "_refresh", "(", "self", ",", "session", ",", "stopping", "=", "False", ")", ":", "data", "=", "session", ".", "get", "(", "WORK_UNITS_", "+", "self", ".", "work_spec_name", "+", "_FINISHED", ",", "self", ".", "key", ")", "if", "data", "is", "...
Get this task's current state. This must be called under the registry's lock. It updates the :attr:`finished` and :attr:`failed` flags and the :attr:`data` dictionary based on the current state in the registry. In the normal case, nothing will change and this function will return normally. If it turns out that the work unit is already finished, the state of this object will change before :exc:`rejester.exceptions.LostLease` is raised. :param session: locked registry session :param stopping: don't raise if the work unit is finished :raises rejester.exceptions.LostLease: if this worker is no longer doing this work unit
[ "Get", "this", "task", "s", "current", "state", "." ]
python
train
ralphbean/taskw
taskw/warrior.py
https://github.com/ralphbean/taskw/blob/11e2f9132eaedd157f514538de9b5f3b69c30a52/taskw/warrior.py#L827-L833
def task_start(self, **kw): """ Marks a task as started. """ id, task = self.get_task(**kw) self._execute(id, 'start') return self.get_task(uuid=task['uuid'])[1]
[ "def", "task_start", "(", "self", ",", "*", "*", "kw", ")", ":", "id", ",", "task", "=", "self", ".", "get_task", "(", "*", "*", "kw", ")", "self", ".", "_execute", "(", "id", ",", "'start'", ")", "return", "self", ".", "get_task", "(", "uuid", ...
Marks a task as started.
[ "Marks", "a", "task", "as", "started", "." ]
python
train
uber/rides-python-sdk
uber_rides/client.py
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/client.py#L109-L127
def get_products(self, latitude, longitude): """Get information about the Uber products offered at a given location. Parameters latitude (float) The latitude component of a location. longitude (float) The longitude component of a location. Returns (Response) A Response object containing available products information. """ args = OrderedDict([ ('latitude', latitude), ('longitude', longitude), ]) return self._api_call('GET', 'v1.2/products', args=args)
[ "def", "get_products", "(", "self", ",", "latitude", ",", "longitude", ")", ":", "args", "=", "OrderedDict", "(", "[", "(", "'latitude'", ",", "latitude", ")", ",", "(", "'longitude'", ",", "longitude", ")", ",", "]", ")", "return", "self", ".", "_api_...
Get information about the Uber products offered at a given location. Parameters latitude (float) The latitude component of a location. longitude (float) The longitude component of a location. Returns (Response) A Response object containing available products information.
[ "Get", "information", "about", "the", "Uber", "products", "offered", "at", "a", "given", "location", "." ]
python
train
CityOfZion/neo-boa
boa/code/module.py
https://github.com/CityOfZion/neo-boa/blob/5ec0f0acb2e2e3e4bbd3530252e6eae61b23d59b/boa/code/module.py#L234-L280
def link_methods(self): """ Perform linkage of addresses between methods. """ from ..compiler import Compiler for method in self.methods: method.prepare() self.all_vm_tokens = OrderedDict() address = 0 for method in self.orderered_methods: if not method.is_interop: # print("ADDING METHOD %s " % method.full_name) method.address = address for key, vmtoken in method.vm_tokens.items(): self.all_vm_tokens[address] = vmtoken address += 1 if vmtoken.data is not None and vmtoken.vm_op != VMOp.NOP: address += len(vmtoken.data) vmtoken.addr = vmtoken.addr + method.address for key, vmtoken in self.all_vm_tokens.items(): if vmtoken.src_method is not None: target_method = self.method_by_name(vmtoken.target_method) if target_method: jump_len = target_method.address - vmtoken.addr param_ret_counts = bytearray() if Compiler.instance().nep8: param_ret_counts = vmtoken.data[0:2] jump_len -= 2 if jump_len > -32767 and jump_len < 32767: vmtoken.data = param_ret_counts + jump_len.to_bytes(2, 'little', signed=True) else: vmtoken.data = param_ret_counts + jump_len.to_bytes(4, 'little', signed=True) else: raise Exception("Target method %s not found" % vmtoken.target_method)
[ "def", "link_methods", "(", "self", ")", ":", "from", ".", ".", "compiler", "import", "Compiler", "for", "method", "in", "self", ".", "methods", ":", "method", ".", "prepare", "(", ")", "self", ".", "all_vm_tokens", "=", "OrderedDict", "(", ")", "address...
Perform linkage of addresses between methods.
[ "Perform", "linkage", "of", "addresses", "between", "methods", "." ]
python
train
spencerahill/aospy
aospy/data_loader.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/data_loader.py#L267-L272
def _setattr_default(obj, attr, value, default): """Set an attribute of an object to a value or default value.""" if value is None: setattr(obj, attr, default) else: setattr(obj, attr, value)
[ "def", "_setattr_default", "(", "obj", ",", "attr", ",", "value", ",", "default", ")", ":", "if", "value", "is", "None", ":", "setattr", "(", "obj", ",", "attr", ",", "default", ")", "else", ":", "setattr", "(", "obj", ",", "attr", ",", "value", ")...
Set an attribute of an object to a value or default value.
[ "Set", "an", "attribute", "of", "an", "object", "to", "a", "value", "or", "default", "value", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3740-L3761
def double_discriminator(x, filters1=128, filters2=None, kernel_size=8, strides=4, pure_mean=False): """A convolutional discriminator with 2 layers and concatenated output.""" if filters2 is None: filters2 = 4 * filters1 with tf.variable_scope("discriminator"): batch_size = shape_list(x)[0] net = layers().Conv2D( filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x) if pure_mean: net1 = tf.reduce_mean(net, [1, 2]) else: net1 = mean_with_attention(net, "mean_with_attention1") tf.reshape(net, [batch_size, -1]) net = tf.nn.relu(net) net = layers().Conv2D( filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x) if pure_mean: net2 = tf.reduce_mean(net, [1, 2]) else: net2 = mean_with_attention(net, "mean_with_attention2") return tf.concat([net1, net2], axis=-1)
[ "def", "double_discriminator", "(", "x", ",", "filters1", "=", "128", ",", "filters2", "=", "None", ",", "kernel_size", "=", "8", ",", "strides", "=", "4", ",", "pure_mean", "=", "False", ")", ":", "if", "filters2", "is", "None", ":", "filters2", "=", ...
A convolutional discriminator with 2 layers and concatenated output.
[ "A", "convolutional", "discriminator", "with", "2", "layers", "and", "concatenated", "output", "." ]
python
train
zblz/naima
naima/extern/minimize.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/extern/minimize.py#L70-L243
def _minimize_neldermead( func, x0, args=(), callback=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, disp=False, return_all=False, ): # pragma: no cover """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options for the Nelder-Mead algorithm are: disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter : int Maximum number of iterations to perform. maxfev : int Maximum number of function evaluations to make. """ maxfun = maxfev retall = return_all fcalls, func = wrap_function(func, args) x0 = asfarray(x0).flatten() N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: raise ValueError("Initial guess must be a scalar or rank-1 sequence.") if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1 chi = 2 psi = 0.5 sigma = 0.5 one2np1 = list(range(1, N + 1)) if rank == 0: sim = numpy.zeros((N + 1,), dtype=x0.dtype) else: sim = numpy.zeros((N + 1, N), dtype=x0.dtype) fsim = numpy.zeros((N + 1,), float) sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = func(x0) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0, N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt) * y[k] else: y[k] = zdelt sim[k + 1] = y f = func(y) fsim[k + 1] = f ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) # sort so sim[0,:] has the lowest function value sim = numpy.take(sim, ind, 0) iterations = 1 while fcalls[0] < maxfun and iterations < maxiter: if ( numpy.max(numpy.ravel(numpy.abs((sim[1:] - sim[0]) / sim[0]))) <= xtol and numpy.max(numpy.abs((fsim[0] - fsim[1:]) / fsim[0])) <= ftol ): break xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: # Perform an inside contraction xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = numpy.min(fsim) warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message["maxfev"] if disp: print("Warning: " + msg) elif iterations >= maxiter: warnflag = 2 msg = _status_message["maxiter"] if disp: print("Warning: " + msg) else: msg = _status_message["success"] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iterations) print(" Function evaluations: %d" % fcalls[0]) result = OptimizeResult( fun=fval, nit=iterations, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x, ) if retall: result["allvecs"] = allvecs return result
[ "def", "_minimize_neldermead", "(", "func", ",", "x0", ",", "args", "=", "(", ")", ",", "callback", "=", "None", ",", "xtol", "=", "1e-4", ",", "ftol", "=", "1e-4", ",", "maxiter", "=", "None", ",", "maxfev", "=", "None", ",", "disp", "=", "False",...
Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options for the Nelder-Mead algorithm are: disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter : int Maximum number of iterations to perform. maxfev : int Maximum number of function evaluations to make.
[ "Minimization", "of", "scalar", "function", "of", "one", "or", "more", "variables", "using", "the", "Nelder", "-", "Mead", "algorithm", "." ]
python
train
jtwhite79/pyemu
pyemu/utils/gw_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/gw_utils.py#L24-L57
def modflow_pval_to_template_file(pval_file,tpl_file=None): """write a template file for a modflow parameter value file. Uses names in the first column in the pval file as par names. Parameters ---------- pval_file : str parameter value file tpl_file : str, optional template file to write. If None, use <pval_file>.tpl. Default is None Returns ------- df : pandas.DataFrame pandas DataFrame with control file parameter information """ if tpl_file is None: tpl_file = pval_file + ".tpl" pval_df = pd.read_csv(pval_file,delim_whitespace=True, header=None,skiprows=2, names=["parnme","parval1"]) pval_df.index = pval_df.parnme pval_df.loc[:,"tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x)) with open(tpl_file,'w') as f: f.write("ptf ~\n#pval template file from pyemu\n") f.write("{0:10d} #NP\n".format(pval_df.shape[0])) f.write(pval_df.loc[:,["parnme","tpl"]].to_string(col_space=0, formatters=[SFMT,SFMT], index=False, header=False, justify="left")) return pval_df
[ "def", "modflow_pval_to_template_file", "(", "pval_file", ",", "tpl_file", "=", "None", ")", ":", "if", "tpl_file", "is", "None", ":", "tpl_file", "=", "pval_file", "+", "\".tpl\"", "pval_df", "=", "pd", ".", "read_csv", "(", "pval_file", ",", "delim_whitespac...
write a template file for a modflow parameter value file. Uses names in the first column in the pval file as par names. Parameters ---------- pval_file : str parameter value file tpl_file : str, optional template file to write. If None, use <pval_file>.tpl. Default is None Returns ------- df : pandas.DataFrame pandas DataFrame with control file parameter information
[ "write", "a", "template", "file", "for", "a", "modflow", "parameter", "value", "file", ".", "Uses", "names", "in", "the", "first", "column", "in", "the", "pval", "file", "as", "par", "names", "." ]
python
train
openvax/varcode
varcode/effects/effect_ordering.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L190-L201
def effect_has_complete_transcript(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.complete, default=False)
[ "def", "effect_has_complete_transcript", "(", "effect", ")", ":", "return", "apply_to_transcript_if_exists", "(", "effect", "=", "effect", ",", "fn", "=", "lambda", "t", ":", "t", ".", "complete", ",", "default", "=", "False", ")" ]
Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS
[ "Parameters", "----------", "effect", ":", "subclass", "of", "MutationEffect" ]
python
train
IvanMalison/okcupyd
okcupyd/question.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/question.py#L148-L160
def get_answer_id_for_question(self, question): """Get the answer_id corresponding to the answer given for question by looking at this :class:`~.UserQuestion`'s answer_options. The given :class:`~.Question` instance must have the same id as this :class:`~.UserQuestion`. That this method exists is admittedly somewhat weird. Unfortunately, it seems to be the only way to retrieve this information. """ assert question.id == self.id for answer_option in self.answer_options: if answer_option.text == question.their_answer: return answer_option.id
[ "def", "get_answer_id_for_question", "(", "self", ",", "question", ")", ":", "assert", "question", ".", "id", "==", "self", ".", "id", "for", "answer_option", "in", "self", ".", "answer_options", ":", "if", "answer_option", ".", "text", "==", "question", "."...
Get the answer_id corresponding to the answer given for question by looking at this :class:`~.UserQuestion`'s answer_options. The given :class:`~.Question` instance must have the same id as this :class:`~.UserQuestion`. That this method exists is admittedly somewhat weird. Unfortunately, it seems to be the only way to retrieve this information.
[ "Get", "the", "answer_id", "corresponding", "to", "the", "answer", "given", "for", "question", "by", "looking", "at", "this", ":", "class", ":", "~", ".", "UserQuestion", "s", "answer_options", ".", "The", "given", ":", "class", ":", "~", ".", "Question", ...
python
train
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L423-L429
def p_identifier_name_string(self, p): """identifier_name_string : identifier_name """ p[0] = asttypes.PropIdentifier(p[1].value) # manually clone the position attributes. for k in ('_token_map', 'lexpos', 'lineno', 'colno'): setattr(p[0], k, getattr(p[1], k))
[ "def", "p_identifier_name_string", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "asttypes", ".", "PropIdentifier", "(", "p", "[", "1", "]", ".", "value", ")", "# manually clone the position attributes.", "for", "k", "in", "(", "'_token_map'", ...
identifier_name_string : identifier_name
[ "identifier_name_string", ":", "identifier_name" ]
python
train
armet/python-armet
armet/http/response.py
https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/http/response.py#L104-L106
def insert(self, name, index, value): """Insert a value at the passed index in the named header.""" return self._sequence[name].insert(index, value)
[ "def", "insert", "(", "self", ",", "name", ",", "index", ",", "value", ")", ":", "return", "self", ".", "_sequence", "[", "name", "]", ".", "insert", "(", "index", ",", "value", ")" ]
Insert a value at the passed index in the named header.
[ "Insert", "a", "value", "at", "the", "passed", "index", "in", "the", "named", "header", "." ]
python
valid
Azure/azure-storage-python
azure-storage-queue/azure/storage/queue/queueservice.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-queue/azure/storage/queue/queueservice.py#L692-L730
def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None): ''' Sets stored access policies for the queue that may be used with Shared Access Signatures. When you set permissions for a queue, the existing permissions are replaced. To update the queue's permissions, call :func:`~get_queue_acl` to fetch all access policies associated with the queue, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a queue, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str queue_name: The name of an existing queue. :param signed_identifiers: A dictionary of access policies to associate with the queue. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('queue_name', queue_name) _validate_access_policies(signed_identifiers) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(queue_name) request.query = { 'comp': 'acl', 'timeout': _int_to_str(timeout), } request.body = _get_request_body( _convert_signed_identifiers_to_xml(signed_identifiers)) self._perform_request(request)
[ "def", "set_queue_acl", "(", "self", ",", "queue_name", ",", "signed_identifiers", "=", "None", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'queue_name'", ",", "queue_name", ")", "_validate_access_policies", "(", "signed_identifiers", ")", "...
Sets stored access policies for the queue that may be used with Shared Access Signatures. When you set permissions for a queue, the existing permissions are replaced. To update the queue's permissions, call :func:`~get_queue_acl` to fetch all access policies associated with the queue, modify the access policy that you wish to change, and then call this function with the complete set of data to perform the update. When you establish a stored access policy on a queue, it may take up to 30 seconds to take effect. During this interval, a shared access signature that is associated with the stored access policy will throw an :class:`AzureHttpError` until the access policy becomes active. :param str queue_name: The name of an existing queue. :param signed_identifiers: A dictionary of access policies to associate with the queue. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) :param int timeout: The server timeout, expressed in seconds.
[ "Sets", "stored", "access", "policies", "for", "the", "queue", "that", "may", "be", "used", "with", "Shared", "Access", "Signatures", ".", "When", "you", "set", "permissions", "for", "a", "queue", "the", "existing", "permissions", "are", "replaced", ".", "To...
python
train
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L1600-L1631
def _index_iterator(column_size, max_size, mix_index=False): """ This will iterate over the indexes and return a list of indexes :param column_size: list of int of the size of each list :param max_size: int of the max number of iterations :param mix_index: bool if True will go first then last then middle :return: list of int of indexes """ # todo implement a proper partial factorial design indexes = [0] * len(column_size) index_order = [0] if mix_index: for i in range(1, max(column_size)): index_order += [-1 * i, i] else: index_order += range(1, max(column_size)) for i in range(max_size): yield [index_order[indexes[i]] for i in range(len(indexes))] for index in range(len(column_size)): indexes[index] += 1 if indexes[index] < column_size[index]: break indexes[index] = 0 if index == len(column_size) - 1: if sys.version_info[0] == 2: raise StopIteration() else: return
[ "def", "_index_iterator", "(", "column_size", ",", "max_size", ",", "mix_index", "=", "False", ")", ":", "# todo implement a proper partial factorial design", "indexes", "=", "[", "0", "]", "*", "len", "(", "column_size", ")", "index_order", "=", "[", "0", "]", ...
This will iterate over the indexes and return a list of indexes :param column_size: list of int of the size of each list :param max_size: int of the max number of iterations :param mix_index: bool if True will go first then last then middle :return: list of int of indexes
[ "This", "will", "iterate", "over", "the", "indexes", "and", "return", "a", "list", "of", "indexes", ":", "param", "column_size", ":", "list", "of", "int", "of", "the", "size", "of", "each", "list", ":", "param", "max_size", ":", "int", "of", "the", "ma...
python
train
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L122-L157
def _run_cortex_on_region(region, align_bam, ref_file, work_dir, out_file_base, config): """Run cortex on a specified chromosome start/end region. """ kmers = [31, 51, 71] min_reads = 1750 cortex_dir = config_utils.get_program("cortex", config, "dir") stampy_dir = config_utils.get_program("stampy", config, "dir") vcftools_dir = config_utils.get_program("vcftools", config, "dir") if cortex_dir is None or stampy_dir is None: raise ValueError("cortex_var requires path to pre-built cortex and stampy") region_str = "{0}-{1}-{2}".format(*region) base_dir = safe_makedir(os.path.join(work_dir, region_str)) try: out_vcf_base = os.path.join(base_dir, "{0}-{1}".format( os.path.splitext(os.path.basename(out_file_base))[0], region_str)) out_file = os.path.join(work_dir, os.path.basename("{0}.vcf".format(out_vcf_base))) if not file_exists(out_file): fastq = _get_fastq_in_region(region, align_bam, out_vcf_base) if _count_fastq_reads(fastq, min_reads) < min_reads: vcfutils.write_empty_vcf(out_file) else: local_ref, genome_size = _get_local_ref(region, ref_file, out_vcf_base) indexes = _index_local_ref(local_ref, cortex_dir, stampy_dir, kmers) cortex_out = _run_cortex(fastq, indexes, {"kmers": kmers, "genome_size": genome_size, "sample": get_sample_name(align_bam)}, out_vcf_base, {"cortex": cortex_dir, "stampy": stampy_dir, "vcftools": vcftools_dir}, config) if cortex_out: _remap_cortex_out(cortex_out, region, out_file) else: vcfutils.write_empty_vcf(out_file) finally: if os.path.exists(base_dir): shutil.rmtree(base_dir) return [region[0], int(region[1]), int(region[2]), out_file]
[ "def", "_run_cortex_on_region", "(", "region", ",", "align_bam", ",", "ref_file", ",", "work_dir", ",", "out_file_base", ",", "config", ")", ":", "kmers", "=", "[", "31", ",", "51", ",", "71", "]", "min_reads", "=", "1750", "cortex_dir", "=", "config_utils...
Run cortex on a specified chromosome start/end region.
[ "Run", "cortex", "on", "a", "specified", "chromosome", "start", "/", "end", "region", "." ]
python
train
joerick/pyinstrument
pyinstrument/__main__.py
https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/__main__.py#L255-L274
def save_report(session): ''' Saves the session to a temp file, and returns that path. Also prunes the number of reports to 10 so there aren't loads building up. ''' # prune this folder to contain the last 10 sessions previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport')) previous_reports.sort(reverse=True) while len(previous_reports) > 10: report_file = previous_reports.pop() os.remove(report_file) identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time)) path = os.path.join( report_dir(), identifier + '.pyireport' ) session.save(path) return path, identifier
[ "def", "save_report", "(", "session", ")", ":", "# prune this folder to contain the last 10 sessions", "previous_reports", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "report_dir", "(", ")", ",", "'*.pyireport'", ")", ")", "previous_reports...
Saves the session to a temp file, and returns that path. Also prunes the number of reports to 10 so there aren't loads building up.
[ "Saves", "the", "session", "to", "a", "temp", "file", "and", "returns", "that", "path", ".", "Also", "prunes", "the", "number", "of", "reports", "to", "10", "so", "there", "aren", "t", "loads", "building", "up", "." ]
python
train
mathandy/svgpathtools
svgpathtools/document.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/document.py#L213-L219
def flatten_all_paths(self, group_filter=lambda x: True, path_filter=lambda x: True, path_conversions=CONVERSIONS): """Forward the tree of this document into the more general flatten_all_paths function and return the result.""" return flatten_all_paths(self.tree.getroot(), group_filter, path_filter, path_conversions)
[ "def", "flatten_all_paths", "(", "self", ",", "group_filter", "=", "lambda", "x", ":", "True", ",", "path_filter", "=", "lambda", "x", ":", "True", ",", "path_conversions", "=", "CONVERSIONS", ")", ":", "return", "flatten_all_paths", "(", "self", ".", "tree"...
Forward the tree of this document into the more general flatten_all_paths function and return the result.
[ "Forward", "the", "tree", "of", "this", "document", "into", "the", "more", "general", "flatten_all_paths", "function", "and", "return", "the", "result", "." ]
python
train
horazont/aioxmpp
aioxmpp/security_layer.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/security_layer.py#L595-L614
def import_from_json(self, data, *, override=False): """ Import a JSON dictionary which must have the same format as exported by :meth:`export`. If *override* is true, the existing data in the pin store will be overriden with the data from `data`. Otherwise, the `data` will be merged into the store. """ if override: self._storage = { hostname: set(self._decode_key(key) for key in pins) for hostname, pins in data.items() } return for hostname, pins in data.items(): existing_pins = self._storage.setdefault(hostname, set()) existing_pins.update(self._decode_key(key) for key in pins)
[ "def", "import_from_json", "(", "self", ",", "data", ",", "*", ",", "override", "=", "False", ")", ":", "if", "override", ":", "self", ".", "_storage", "=", "{", "hostname", ":", "set", "(", "self", ".", "_decode_key", "(", "key", ")", "for", "key", ...
Import a JSON dictionary which must have the same format as exported by :meth:`export`. If *override* is true, the existing data in the pin store will be overriden with the data from `data`. Otherwise, the `data` will be merged into the store.
[ "Import", "a", "JSON", "dictionary", "which", "must", "have", "the", "same", "format", "as", "exported", "by", ":", "meth", ":", "export", "." ]
python
train
mdgoldberg/sportsref
sportsref/nfl/boxscores.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/boxscores.py#L105-L112
def away_score(self): """Returns score of the away team. :returns: int of the away score. """ doc = self.get_doc() table = doc('table.linescore') away_score = table('tr').eq(1)('td')[-1].text_content() return int(away_score)
[ "def", "away_score", "(", "self", ")", ":", "doc", "=", "self", ".", "get_doc", "(", ")", "table", "=", "doc", "(", "'table.linescore'", ")", "away_score", "=", "table", "(", "'tr'", ")", ".", "eq", "(", "1", ")", "(", "'td'", ")", "[", "-", "1",...
Returns score of the away team. :returns: int of the away score.
[ "Returns", "score", "of", "the", "away", "team", ".", ":", "returns", ":", "int", "of", "the", "away", "score", "." ]
python
test
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/commands.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/commands.py#L101-L127
def update_scenenode(f): """Set the id of the current scene node to the id for the given file :param f: the file to save the current scene to :type f: :class:`jukeboxcore.filesys.JB_File` :returns: None :rtype: None :raises: None """ n = get_current_scene_node() if not n: msg = "Could not find a scene node." return ActionStatus(ActionStatus.FAILURE, msg) # get dbentry for for the given jbfile tfi = f.get_obj() assert tfi tf = dj.taskfiles.get(task=tfi.task, releasetype=tfi.releasetype, version=tfi.version, descriptor=tfi.descriptor, typ=tfi.typ) cmds.setAttr('%s.taskfile_id' % n, lock=False) cmds.setAttr('%s.taskfile_id' % n, tf.pk) cmds.setAttr('%s.taskfile_id' % n, lock=True) msg = "Successfully updated scene node to %s" % tf.id return ActionStatus(ActionStatus.SUCCESS, msg)
[ "def", "update_scenenode", "(", "f", ")", ":", "n", "=", "get_current_scene_node", "(", ")", "if", "not", "n", ":", "msg", "=", "\"Could not find a scene node.\"", "return", "ActionStatus", "(", "ActionStatus", ".", "FAILURE", ",", "msg", ")", "# get dbentry for...
Set the id of the current scene node to the id for the given file :param f: the file to save the current scene to :type f: :class:`jukeboxcore.filesys.JB_File` :returns: None :rtype: None :raises: None
[ "Set", "the", "id", "of", "the", "current", "scene", "node", "to", "the", "id", "for", "the", "given", "file" ]
python
train
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L1491-L1507
def priceDF(symbol, token='', version=''): '''Price of ticker https://iexcloud.io/docs/api/#price 4:30am-8pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.io.json.json_normalize({'price': price(symbol, token, version)}) _toDatetime(df) return df
[ "def", "priceDF", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "df", "=", "pd", ".", "io", ".", "json", ".", "json_normalize", "(", "{", "'price'", ":", "price", "(", "symbol", ",", "token", ",", "version", ")", "}...
Price of ticker https://iexcloud.io/docs/api/#price 4:30am-8pm ET Mon-Fri Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
[ "Price", "of", "ticker" ]
python
valid
daler/metaseq
metaseq/integration/chipseq.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/integration/chipseq.py#L410-L429
def xcorr(x, y, maxlags): """ Streamlined version of matplotlib's `xcorr`, without the plots. :param x, y: NumPy arrays to cross-correlate :param maxlags: Max number of lags; result will be `2*maxlags+1` in length """ xlen = len(x) ylen = len(y) assert xlen == ylen c = np.correlate(x, y, mode=2) # normalize c /= np.sqrt(np.dot(x, x) * np.dot(y, y)) lags = np.arange(-maxlags, maxlags + 1) c = c[xlen - 1 - maxlags:xlen + maxlags] return c
[ "def", "xcorr", "(", "x", ",", "y", ",", "maxlags", ")", ":", "xlen", "=", "len", "(", "x", ")", "ylen", "=", "len", "(", "y", ")", "assert", "xlen", "==", "ylen", "c", "=", "np", ".", "correlate", "(", "x", ",", "y", ",", "mode", "=", "2",...
Streamlined version of matplotlib's `xcorr`, without the plots. :param x, y: NumPy arrays to cross-correlate :param maxlags: Max number of lags; result will be `2*maxlags+1` in length
[ "Streamlined", "version", "of", "matplotlib", "s", "xcorr", "without", "the", "plots", "." ]
python
train
ninuxorg/nodeshot
nodeshot/networking/net/models/interfaces/ethernet.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/net/models/interfaces/ethernet.py#L21-L24
def save(self, *args, **kwargs): """ automatically set Interface.type to ethernet """ self.type = INTERFACE_TYPES.get('ethernet') super(Ethernet, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "type", "=", "INTERFACE_TYPES", ".", "get", "(", "'ethernet'", ")", "super", "(", "Ethernet", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*",...
automatically set Interface.type to ethernet
[ "automatically", "set", "Interface", ".", "type", "to", "ethernet" ]
python
train
lowandrew/OLCTools
databasesetup/database_setup.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L197-L214
def clark(self, databasepath): """ Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database type, and species for taxonomic level :param databasepath: path to use to save the database """ if self.clarkpath: logging.info('Downloading CLARK database') # Create the folder in which the database is to be stored databasepath = self.create_database_folder(databasepath, 'clark') # Set the call to create the database - use the --light option, as we don't require the full database targetcall = 'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'\ .format(clarkpath=self.clarkpath, dbpath=databasepath) # Download the database self.database_clone(targetcall, databasepath) else: logging.warning('No CLARK scripts detected in $PATH. Cannot download database.')
[ "def", "clark", "(", "self", ",", "databasepath", ")", ":", "if", "self", ".", "clarkpath", ":", "logging", ".", "info", "(", "'Downloading CLARK database'", ")", "# Create the folder in which the database is to be stored", "databasepath", "=", "self", ".", "create_da...
Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database type, and species for taxonomic level :param databasepath: path to use to save the database
[ "Download", "and", "set", "-", "up", "the", "CLARK", "database", "using", "the", "set_targets", ".", "sh", "script", ".", "Use", "defaults", "of", "bacteria", "for", "database", "type", "and", "species", "for", "taxonomic", "level", ":", "param", "databasepa...
python
train
apache/incubator-mxnet
python/mxnet/module/base_module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/base_module.py#L719-L743
def load_params(self, fname): """Loads model parameters from file. Parameters ---------- fname : str Path to input param file. Examples -------- >>> # An example of loading module parameters. >>> mod.load_params('myfile') """ save_dict = ndarray.load(fname) arg_params = {} aux_params = {} for k, value in save_dict.items(): arg_type, name = k.split(':', 1) if arg_type == 'arg': arg_params[name] = value elif arg_type == 'aux': aux_params[name] = value else: raise ValueError("Invalid param file " + fname) self.set_params(arg_params, aux_params)
[ "def", "load_params", "(", "self", ",", "fname", ")", ":", "save_dict", "=", "ndarray", ".", "load", "(", "fname", ")", "arg_params", "=", "{", "}", "aux_params", "=", "{", "}", "for", "k", ",", "value", "in", "save_dict", ".", "items", "(", ")", "...
Loads model parameters from file. Parameters ---------- fname : str Path to input param file. Examples -------- >>> # An example of loading module parameters. >>> mod.load_params('myfile')
[ "Loads", "model", "parameters", "from", "file", "." ]
python
train
CTPUG/wafer
wafer/utils.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/utils.py#L34-L54
def cache_result(cache_key, timeout): """A decorator for caching the result of a function.""" def decorator(f): cache_name = settings.WAFER_CACHE @functools.wraps(f) def wrapper(*args, **kw): cache = caches[cache_name] result = cache.get(cache_key) if result is None: result = f(*args, **kw) cache.set(cache_key, result, timeout) return result def invalidate(): cache = caches[cache_name] cache.delete(cache_key) wrapper.invalidate = invalidate return wrapper return decorator
[ "def", "cache_result", "(", "cache_key", ",", "timeout", ")", ":", "def", "decorator", "(", "f", ")", ":", "cache_name", "=", "settings", ".", "WAFER_CACHE", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", ...
A decorator for caching the result of a function.
[ "A", "decorator", "for", "caching", "the", "result", "of", "a", "function", "." ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L3696-L3707
def close(self, file_des): """Close a file descriptor. Args: file_des: An integer file descriptor for the file object requested. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer. """ file_handle = self.filesystem.get_open_file(file_des) file_handle.close()
[ "def", "close", "(", "self", ",", "file_des", ")", ":", "file_handle", "=", "self", ".", "filesystem", ".", "get_open_file", "(", "file_des", ")", "file_handle", ".", "close", "(", ")" ]
Close a file descriptor. Args: file_des: An integer file descriptor for the file object requested. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer.
[ "Close", "a", "file", "descriptor", "." ]
python
train
productml/blurr
blurr/core/store.py
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/store.py#L63-L70
def _get_range_timestamp_key(self, start: Key, end: Key, count: int = 0) -> List[Tuple[Key, Any]]: """ Returns the list of items from the store based on the given time range or count. This is used when the key being used is a TIMESTAMP key. """ raise NotImplementedError()
[ "def", "_get_range_timestamp_key", "(", "self", ",", "start", ":", "Key", ",", "end", ":", "Key", ",", "count", ":", "int", "=", "0", ")", "->", "List", "[", "Tuple", "[", "Key", ",", "Any", "]", "]", ":", "raise", "NotImplementedError", "(", ")" ]
Returns the list of items from the store based on the given time range or count. This is used when the key being used is a TIMESTAMP key.
[ "Returns", "the", "list", "of", "items", "from", "the", "store", "based", "on", "the", "given", "time", "range", "or", "count", "." ]
python
train
goose3/goose3
goose3/extractors/content.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L281-L306
def is_highlink_density(self, element): """ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(element, tag='a') if not links: return False text = self.parser.getText(element) words = text.split(' ') words_number = float(len(words)) link_text_parts = [] for link in links: link_text_parts.append(self.parser.getText(link)) link_text = ''.join(link_text_parts) link_words = link_text.split(' ') number_of_link_words = float(len(link_words)) number_of_links = float(len(links)) link_divisor = float(number_of_link_words / words_number) score = float(link_divisor * number_of_links) if score >= 1.0: return True return False
[ "def", "is_highlink_density", "(", "self", ",", "element", ")", ":", "links", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "element", ",", "tag", "=", "'a'", ")", "if", "not", "links", ":", "return", "False", "text", "=", "self", ".", "par...
checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good
[ "checks", "the", "density", "of", "links", "within", "a", "node", "is", "there", "not", "much", "text", "and", "most", "of", "it", "contains", "linky", "shit?", "if", "so", "it", "s", "no", "good" ]
python
valid
Spinmob/spinmob
_pylab_tweaks.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_tweaks.py#L388-L416
def image_format_figure(figure=None, draw=True): """ This formats the figure in a compact way with (hopefully) enough useful information for printing large data sets. Used mostly for line and scatter plots with long, information-filled titles. Chances are somewhat slim this will be ideal for you but it very well might and is at least a good starting point. figure=None specify a figure object. None will use gcf() """ _pylab.ioff() if figure == None: figure = _pylab.gcf() set_figure_window_geometry(figure, (0,0), (550,470)) axes = figure.axes[0] # set up the title label axes.title.set_horizontalalignment('right') axes.title.set_size(8) axes.title.set_position([1.27,1.02]) axes.title.set_visible(1) if draw: _pylab.ion() _pylab.draw()
[ "def", "image_format_figure", "(", "figure", "=", "None", ",", "draw", "=", "True", ")", ":", "_pylab", ".", "ioff", "(", ")", "if", "figure", "==", "None", ":", "figure", "=", "_pylab", ".", "gcf", "(", ")", "set_figure_window_geometry", "(", "figure", ...
This formats the figure in a compact way with (hopefully) enough useful information for printing large data sets. Used mostly for line and scatter plots with long, information-filled titles. Chances are somewhat slim this will be ideal for you but it very well might and is at least a good starting point. figure=None specify a figure object. None will use gcf()
[ "This", "formats", "the", "figure", "in", "a", "compact", "way", "with", "(", "hopefully", ")", "enough", "useful", "information", "for", "printing", "large", "data", "sets", ".", "Used", "mostly", "for", "line", "and", "scatter", "plots", "with", "long", ...
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_fc05_functions2.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc05_functions2.py#L141-L202
def set_widgets(self): """Set widgets on the Impact Functions Table 2 tab.""" self.tblFunctions2.clear() hazard, exposure, _, _ = self.parent.\ selected_impact_function_constraints() hazard_layer_geometries = get_allowed_geometries( layer_purpose_hazard['key']) exposure_layer_geometries = get_allowed_geometries( layer_purpose_exposure['key']) self.lblSelectFunction2.setText( select_function_constraints2_question % ( hazard['name'], exposure['name'])) self.tblFunctions2.setColumnCount(len(hazard_layer_geometries)) self.tblFunctions2.setRowCount(len(exposure_layer_geometries)) self.tblFunctions2.setHorizontalHeaderLabels( [i['name'].capitalize() for i in hazard_layer_geometries]) for i in range(len(exposure_layer_geometries)): item = QtWidgets.QTableWidgetItem() item.setText(exposure_layer_geometries[i]['name'].capitalize()) item.setTextAlignment(QtCore.Qt.AlignCenter) self.tblFunctions2.setVerticalHeaderItem(i, item) self.tblFunctions2.horizontalHeader().setSectionResizeMode( QtWidgets.QHeaderView.Stretch) self.tblFunctions2.verticalHeader().setSectionResizeMode( QtWidgets.QHeaderView.Stretch) active_items = [] for column in range(len(hazard_layer_geometries)): for row in range(len(exposure_layer_geometries)): hazard_geometry = hazard_layer_geometries[column] exposure_geometry = exposure_layer_geometries[row] item = QtWidgets.QTableWidgetItem() hazard_geometry_allowed = hazard_geometry['key'] in hazard[ 'allowed_geometries'] exposure_geometry_allowed = ( exposure_geometry['key'] in exposure[ 'allowed_geometries']) if hazard_geometry_allowed and exposure_geometry_allowed: background_color = available_option_color active_items += [item] else: background_color = unavailable_option_color item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEnabled) item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable) item.setBackground(QtGui.QBrush(background_color)) item.setFont(big_font) item.setTextAlignment( QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter) item.setData(RoleHazard, hazard) item.setData(RoleExposure, exposure) item.setData(RoleHazardConstraint, hazard_geometry) item.setData(RoleExposureConstraint, exposure_geometry) self.tblFunctions2.setItem(row, column, item) # Automatically select one item... if len(active_items) == 1: active_items[0].setSelected(True) # set focus, as the inactive selection style is gray self.tblFunctions2.setFocus()
[ "def", "set_widgets", "(", "self", ")", ":", "self", ".", "tblFunctions2", ".", "clear", "(", ")", "hazard", ",", "exposure", ",", "_", ",", "_", "=", "self", ".", "parent", ".", "selected_impact_function_constraints", "(", ")", "hazard_layer_geometries", "=...
Set widgets on the Impact Functions Table 2 tab.
[ "Set", "widgets", "on", "the", "Impact", "Functions", "Table", "2", "tab", "." ]
python
train
PMEAL/OpenPNM
openpnm/models/phases/diffusivity.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/diffusivity.py#L76-L113
def tyn_calus(target, VA, VB, sigma_A, sigma_B, temperature='pore.temperature', viscosity='pore.viscosity'): r""" Uses Tyn_Calus model to estimate diffusion coefficient in a dilute liquid solution of A in B from first principles at conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. VA : float, array_like Molar volume of component A at boiling temperature (m3/mol) VB : float, array_like Molar volume of component B at boiling temperature (m3/mol) sigmaA: float, array_like Surface tension of component A at boiling temperature (N/m) sigmaB: float, array_like Surface tension of component B at boiling temperature (N/m) pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K) """ T = target[temperature] mu = target[viscosity] A = 8.93e-8*(VB*1e6)**0.267/(VA*1e6)**0.433*T B = (sigma_B/sigma_A)**0.15/(mu*1e3) value = A*B return value
[ "def", "tyn_calus", "(", "target", ",", "VA", ",", "VB", ",", "sigma_A", ",", "sigma_B", ",", "temperature", "=", "'pore.temperature'", ",", "viscosity", "=", "'pore.viscosity'", ")", ":", "T", "=", "target", "[", "temperature", "]", "mu", "=", "target", ...
r""" Uses Tyn_Calus model to estimate diffusion coefficient in a dilute liquid solution of A in B from first principles at conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. VA : float, array_like Molar volume of component A at boiling temperature (m3/mol) VB : float, array_like Molar volume of component B at boiling temperature (m3/mol) sigmaA: float, array_like Surface tension of component A at boiling temperature (N/m) sigmaB: float, array_like Surface tension of component B at boiling temperature (N/m) pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K)
[ "r", "Uses", "Tyn_Calus", "model", "to", "estimate", "diffusion", "coefficient", "in", "a", "dilute", "liquid", "solution", "of", "A", "in", "B", "from", "first", "principles", "at", "conditions", "of", "interest" ]
python
train
MaritimeRenewable/PyResis
PyResis/propulsion_power.py
https://github.com/MaritimeRenewable/PyResis/blob/c53f83598c8760d532c44036ea3ecd0c84eada95/PyResis/propulsion_power.py#L13-L24
def frictional_resistance_coef(length, speed, **kwargs): """ Flat plate frictional resistance of the ship according to ITTC formula. ref: https://ittc.info/media/2021/75-02-02-02.pdf :param length: metres length of the vehicle :param speed: m/s speed of the vehicle :param kwargs: optional could take in temperature to take account change of water property :return: Frictional resistance coefficient of the vehicle """ Cf = 0.075 / (np.log10(reynolds_number(length, speed, **kwargs)) - 2) ** 2 return Cf
[ "def", "frictional_resistance_coef", "(", "length", ",", "speed", ",", "*", "*", "kwargs", ")", ":", "Cf", "=", "0.075", "/", "(", "np", ".", "log10", "(", "reynolds_number", "(", "length", ",", "speed", ",", "*", "*", "kwargs", ")", ")", "-", "2", ...
Flat plate frictional resistance of the ship according to ITTC formula. ref: https://ittc.info/media/2021/75-02-02-02.pdf :param length: metres length of the vehicle :param speed: m/s speed of the vehicle :param kwargs: optional could take in temperature to take account change of water property :return: Frictional resistance coefficient of the vehicle
[ "Flat", "plate", "frictional", "resistance", "of", "the", "ship", "according", "to", "ITTC", "formula", ".", "ref", ":", "https", ":", "//", "ittc", ".", "info", "/", "media", "/", "2021", "/", "75", "-", "02", "-", "02", "-", "02", ".", "pdf" ]
python
valid
bukun/TorCMS
torcms/model/collect_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/collect_model.py#L55-L67
def count_of_user(user_id): ''' Get the cound of views. ''' return TabCollect.select( TabCollect, TabPost.uid.alias('post_uid'), TabPost.title.alias('post_title'), TabPost.view_count.alias('post_view_count') ).where( TabCollect.user_id == user_id ).join( TabPost, on=(TabCollect.post_id == TabPost.uid) ).count()
[ "def", "count_of_user", "(", "user_id", ")", ":", "return", "TabCollect", ".", "select", "(", "TabCollect", ",", "TabPost", ".", "uid", ".", "alias", "(", "'post_uid'", ")", ",", "TabPost", ".", "title", ".", "alias", "(", "'post_title'", ")", ",", "TabP...
Get the cound of views.
[ "Get", "the", "cound", "of", "views", "." ]
python
train
pydsigner/pygu
pygu/pygw.py
https://github.com/pydsigner/pygu/blob/09fe71534900933908ab83db12f5659b7827e31c/pygu/pygw.py#L252-L257
def unbind(self, func, etype): ''' Wraps around container.unbind(). ''' wrapped = self.event_cbs[func] self.container.unbind(self, wrapped, etype)
[ "def", "unbind", "(", "self", ",", "func", ",", "etype", ")", ":", "wrapped", "=", "self", ".", "event_cbs", "[", "func", "]", "self", ".", "container", ".", "unbind", "(", "self", ",", "wrapped", ",", "etype", ")" ]
Wraps around container.unbind().
[ "Wraps", "around", "container", ".", "unbind", "()", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9891-L9905
def prsint(string): """ Parse a string as an integer, encapsulating error handling. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html :param string: String representing an integer. :type string: str :return: Integer value obtained by parsing string. :rtype: int """ string = stypes.stringToCharP(string) intval = ctypes.c_int() libspice.prsint_c(string, ctypes.byref(intval)) return intval.value
[ "def", "prsint", "(", "string", ")", ":", "string", "=", "stypes", ".", "stringToCharP", "(", "string", ")", "intval", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "prsint_c", "(", "string", ",", "ctypes", ".", "byref", "(", "intval", ")", ...
Parse a string as an integer, encapsulating error handling. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html :param string: String representing an integer. :type string: str :return: Integer value obtained by parsing string. :rtype: int
[ "Parse", "a", "string", "as", "an", "integer", "encapsulating", "error", "handling", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4296-L4309
def get_stp_mst_detail_output_cist_port_edge_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") edge_port = ET.SubElement(port, "edge-port") edge_port.text = kwargs.pop('edge_port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_edge_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train