repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
martinrusev/solid-python
solidpy/utils/wsgi.py
https://github.com/martinrusev/solid-python/blob/c5c39ad43c19e6746ea0297e0d440a2fccfb25ed/solidpy/utils/wsgi.py#L2-L12
def get_headers(environ): """ Returns only proper HTTP headers. """ for key, value in environ.iteritems(): key = str(key) if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield key[5:].replace('_', '-').title(), value elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield key.replace('_', '-').title(), value
[ "def", "get_headers", "(", "environ", ")", ":", "for", "key", ",", "value", "in", "environ", ".", "iteritems", "(", ")", ":", "key", "=", "str", "(", "key", ")", "if", "key", ".", "startswith", "(", "'HTTP_'", ")", "and", "key", "not", "in", "(", ...
Returns only proper HTTP headers.
[ "Returns", "only", "proper", "HTTP", "headers", "." ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L4838-L4845
def drop_function(self, dbName, funcName): """ Parameters: - dbName - funcName """ self.send_drop_function(dbName, funcName) self.recv_drop_function()
[ "def", "drop_function", "(", "self", ",", "dbName", ",", "funcName", ")", ":", "self", ".", "send_drop_function", "(", "dbName", ",", "funcName", ")", "self", ".", "recv_drop_function", "(", ")" ]
Parameters: - dbName - funcName
[ "Parameters", ":", "-", "dbName", "-", "funcName" ]
python
train
pytroll/satpy
satpy/readers/aapp_l1b.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/aapp_l1b.py#L193-L220
def navigate(self): """Return the longitudes and latitudes of the scene. """ tic = datetime.now() lons40km = self._data["pos"][:, :, 1] * 1e-4 lats40km = self._data["pos"][:, :, 0] * 1e-4 try: from geotiepoints import SatelliteInterpolator except ImportError: logger.warning("Could not interpolate lon/lats, " "python-geotiepoints missing.") self.lons, self.lats = lons40km, lats40km else: cols40km = np.arange(24, 2048, 40) cols1km = np.arange(2048) lines = lons40km.shape[0] rows40km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator( (lons40km, lats40km), (rows40km, cols40km), (rows1km, cols1km), along_track_order, cross_track_order) self.lons, self.lats = satint.interpolate() logger.debug("Navigation time %s", str(datetime.now() - tic))
[ "def", "navigate", "(", "self", ")", ":", "tic", "=", "datetime", ".", "now", "(", ")", "lons40km", "=", "self", ".", "_data", "[", "\"pos\"", "]", "[", ":", ",", ":", ",", "1", "]", "*", "1e-4", "lats40km", "=", "self", ".", "_data", "[", "\"p...
Return the longitudes and latitudes of the scene.
[ "Return", "the", "longitudes", "and", "latitudes", "of", "the", "scene", "." ]
python
train
kressi/terminalplot
terminalplot/terminalplot.py
https://github.com/kressi/terminalplot/blob/af05f3fe0793c957cc0b0ebf4afbe54c72d18b66/terminalplot/terminalplot.py#L7-L38
def plot(x, y, rows=None, columns=None): """ x, y list of values on x- and y-axis plot those values within canvas size (rows and columns) """ if not rows or not columns: rows, columns = get_terminal_size() # offset for caption rows -= 4 # Scale points such that they fit on canvas x_scaled = scale(x, columns) y_scaled = scale(y, rows) # Create empty canvas canvas = [[' ' for _ in range(columns)] for _ in range(rows)] # Add scaled points to canvas for ix, iy in zip(x_scaled, y_scaled): canvas[rows - iy - 1][ix] = '*' # Print rows of canvas for row in [''.join(row) for row in canvas]: print(row) # Print scale print(''.join([ '\nMin x: ', str(min(x)), ' Max x: ', str(max(x)), ' Min y: ', str(min(y)), ' Max y: ', str(max(y)) ]))
[ "def", "plot", "(", "x", ",", "y", ",", "rows", "=", "None", ",", "columns", "=", "None", ")", ":", "if", "not", "rows", "or", "not", "columns", ":", "rows", ",", "columns", "=", "get_terminal_size", "(", ")", "# offset for caption", "rows", "-=", "4...
x, y list of values on x- and y-axis plot those values within canvas size (rows and columns)
[ "x", "y", "list", "of", "values", "on", "x", "-", "and", "y", "-", "axis", "plot", "those", "values", "within", "canvas", "size", "(", "rows", "and", "columns", ")" ]
python
train
Cadair/jupyter_environment_kernels
environment_kernels/activate_helper.py
https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/activate_helper.py#L244-L271
def _is_executable_file(path): """Checks that path is an executable regular file, or a symlink towards one. This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``. This function was forked from pexpect originally: Copyright (c) 2013-2014, Pexpect development team Copyright (c) 2012, Noah Spurrier <noah@noah.org> PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ # follow symlinks, fpath = os.path.realpath(path) if not os.path.isfile(fpath): # non-files (directories, fifo, etc.) return False return os.access(fpath, os.X_OK)
[ "def", "_is_executable_file", "(", "path", ")", ":", "# follow symlinks,", "fpath", "=", "os", ".", "path", ".", "realpath", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "fpath", ")", ":", "# non-files (directories, fifo, etc.)", "re...
Checks that path is an executable regular file, or a symlink towards one. This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``. This function was forked from pexpect originally: Copyright (c) 2013-2014, Pexpect development team Copyright (c) 2012, Noah Spurrier <noah@noah.org> PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
[ "Checks", "that", "path", "is", "an", "executable", "regular", "file", "or", "a", "symlink", "towards", "one", ".", "This", "is", "roughly", "os", ".", "path", "isfile", "(", "path", ")", "and", "os", ".", "access", "(", "path", "os", ".", "X_OK", ")...
python
train
saltstack/salt
salt/states/bigip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/bigip.py#L73-L106
def _check_for_changes(entity_type, ret, existing, modified): ''' take an existing entity and a modified entity and check for changes. ''' ret['result'] = True #were there any changes? generation always changes, remove it. if isinstance(existing, dict) and isinstance(modified, dict): if 'generation' in modified['content'].keys(): del modified['content']['generation'] if 'generation' in existing['content'].keys(): del existing['content']['generation'] if modified['content'] == existing['content']: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ 'were enforced. See changes for details.'.format(entity_type=entity_type) ret['changes']['old'] = existing['content'] ret['changes']['new'] = modified['content'] else: if modified == existing: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ 'were enforced. See changes for details.'.format(entity_type=entity_type) ret['changes']['old'] = existing ret['changes']['new'] = modified return ret
[ "def", "_check_for_changes", "(", "entity_type", ",", "ret", ",", "existing", ",", "modified", ")", ":", "ret", "[", "'result'", "]", "=", "True", "#were there any changes? generation always changes, remove it.", "if", "isinstance", "(", "existing", ",", "dict", ")"...
take an existing entity and a modified entity and check for changes.
[ "take", "an", "existing", "entity", "and", "a", "modified", "entity", "and", "check", "for", "changes", "." ]
python
train
apache/airflow
airflow/executors/local_executor.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/local_executor.py#L73-L92
def execute_work(self, key, command): """ Executes command received and stores result state in queue. :param key: the key to identify the TI :type key: tuple(dag_id, task_id, execution_date) :param command: the command to execute :type command: str """ if key is None: return self.log.info("%s running %s", self.__class__.__name__, command) try: subprocess.check_call(command, close_fds=True) state = State.SUCCESS except subprocess.CalledProcessError as e: state = State.FAILED self.log.error("Failed to execute task %s.", str(e)) # TODO: Why is this commented out? # raise e self.result_queue.put((key, state))
[ "def", "execute_work", "(", "self", ",", "key", ",", "command", ")", ":", "if", "key", "is", "None", ":", "return", "self", ".", "log", ".", "info", "(", "\"%s running %s\"", ",", "self", ".", "__class__", ".", "__name__", ",", "command", ")", "try", ...
Executes command received and stores result state in queue. :param key: the key to identify the TI :type key: tuple(dag_id, task_id, execution_date) :param command: the command to execute :type command: str
[ "Executes", "command", "received", "and", "stores", "result", "state", "in", "queue", ".", ":", "param", "key", ":", "the", "key", "to", "identify", "the", "TI", ":", "type", "key", ":", "tuple", "(", "dag_id", "task_id", "execution_date", ")", ":", "par...
python
test
Yelp/kafka-utils
kafka_utils/util/zookeeper.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L390-L399
def _fetch_partition_info(self, topic_id, partition_id): """Fetch partition info for given topic-partition.""" info_path = "/brokers/topics/{topic_id}/partitions/{p_id}" try: _, partition_info = self.get( info_path.format(topic_id=topic_id, p_id=partition_id), ) return partition_info except NoNodeError: return {}
[ "def", "_fetch_partition_info", "(", "self", ",", "topic_id", ",", "partition_id", ")", ":", "info_path", "=", "\"/brokers/topics/{topic_id}/partitions/{p_id}\"", "try", ":", "_", ",", "partition_info", "=", "self", ".", "get", "(", "info_path", ".", "format", "("...
Fetch partition info for given topic-partition.
[ "Fetch", "partition", "info", "for", "given", "topic", "-", "partition", "." ]
python
train
matrix-org/matrix-python-sdk
matrix_client/client.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/client.py#L219-L236
def login_with_password_no_sync(self, username, password): """Deprecated. Use ``login`` with ``sync=False``. Login to the homeserver. Args: username (str): Account username password (str): Account password Returns: str: Access token Raises: MatrixRequestError """ warn("login_with_password_no_sync is deprecated. Use login with sync=False.", DeprecationWarning) return self.login(username, password, sync=False)
[ "def", "login_with_password_no_sync", "(", "self", ",", "username", ",", "password", ")", ":", "warn", "(", "\"login_with_password_no_sync is deprecated. Use login with sync=False.\"", ",", "DeprecationWarning", ")", "return", "self", ".", "login", "(", "username", ",", ...
Deprecated. Use ``login`` with ``sync=False``. Login to the homeserver. Args: username (str): Account username password (str): Account password Returns: str: Access token Raises: MatrixRequestError
[ "Deprecated", ".", "Use", "login", "with", "sync", "=", "False", "." ]
python
train
eonpatapon/contrail-api-cli
contrail_api_cli/resource.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/resource.py#L575-L591
def save(self): """Save the resource to the API server If the resource doesn't have a uuid the resource will be created. If uuid is present the resource is updated. :rtype: Resource """ if self.path.is_collection: self.session.post_json(self.href, {self.type: dict(self.data)}, cls=ResourceEncoder) else: self.session.put_json(self.href, {self.type: dict(self.data)}, cls=ResourceEncoder) return self.fetch(exclude_children=True, exclude_back_refs=True)
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "path", ".", "is_collection", ":", "self", ".", "session", ".", "post_json", "(", "self", ".", "href", ",", "{", "self", ".", "type", ":", "dict", "(", "self", ".", "data", ")", "}", ",", ...
Save the resource to the API server If the resource doesn't have a uuid the resource will be created. If uuid is present the resource is updated. :rtype: Resource
[ "Save", "the", "resource", "to", "the", "API", "server" ]
python
train
gr33ndata/dysl
dysl/dyslib/lm.py
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L290-L308
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1): ''' Add new document to our Language Model (training phase) doc_id is used here, so we build seperate LF for each doc_id I.e. if you call it more than once with same doc_id, then all terms given via doc_terms will contribute to same LM doc_terms: list of words in document to be added doc_length: the length of the document, you can provide it yourself, otherwise, we use len(doc_terms) instead. ''' if doc_length == -1: self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms)) else: self.update_lengths(doc_id=doc_id, doc_length=int(doc_length)) for term in doc_terms: self.vocabulary.add(term) terms = self.lr_padding(doc_terms) ngrams = self.to_ngrams(terms) self.update_counts(doc_id, ngrams)
[ "def", "add_doc", "(", "self", ",", "doc_id", "=", "''", ",", "doc_terms", "=", "[", "]", ",", "doc_length", "=", "-", "1", ")", ":", "if", "doc_length", "==", "-", "1", ":", "self", ".", "update_lengths", "(", "doc_id", "=", "doc_id", ",", "doc_le...
Add new document to our Language Model (training phase) doc_id is used here, so we build seperate LF for each doc_id I.e. if you call it more than once with same doc_id, then all terms given via doc_terms will contribute to same LM doc_terms: list of words in document to be added doc_length: the length of the document, you can provide it yourself, otherwise, we use len(doc_terms) instead.
[ "Add", "new", "document", "to", "our", "Language", "Model", "(", "training", "phase", ")", "doc_id", "is", "used", "here", "so", "we", "build", "seperate", "LF", "for", "each", "doc_id", "I", ".", "e", ".", "if", "you", "call", "it", "more", "than", ...
python
train
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L654-L664
def setMinimumPixmapSize(self, size): """ Sets the minimum pixmap size that will be displayed to the user for the dock widget. :param size | <int> """ self._minimumPixmapSize = size position = self.position() self._position = None self.setPosition(position)
[ "def", "setMinimumPixmapSize", "(", "self", ",", "size", ")", ":", "self", ".", "_minimumPixmapSize", "=", "size", "position", "=", "self", ".", "position", "(", ")", "self", ".", "_position", "=", "None", "self", ".", "setPosition", "(", "position", ")" ]
Sets the minimum pixmap size that will be displayed to the user for the dock widget. :param size | <int>
[ "Sets", "the", "minimum", "pixmap", "size", "that", "will", "be", "displayed", "to", "the", "user", "for", "the", "dock", "widget", ".", ":", "param", "size", "|", "<int", ">" ]
python
train
google/brotli
research/brotlidump.py
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1145-L1165
def value(self, dcode, dextra): """Decode value of symbol together with the extra bits. >>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10) >>> d[34].value(2) (0, 35) """ if dcode<16: return [(1,0),(2,0),(3,0),(4,0), (1,-1),(1,+1),(1,-2),(1,+2),(1,-3),(1,+3), (2,-1),(2,+1),(2,-2),(2,+2),(2,-3),(2,+3) ][dcode] if dcode<16+self.NDIRECT: return (0,dcode-16) #we use the original formulas, instead of my clear explanation POSTFIX_MASK = (1 << self.NPOSTFIX) - 1 ndistbits = 1 + ((dcode - self.NDIRECT - 16) >> (self.NPOSTFIX + 1)) hcode = (dcode - self.NDIRECT - 16) >> self.NPOSTFIX lcode = (dcode - self.NDIRECT - 16) & POSTFIX_MASK offset = ((2 + (hcode & 1)) << ndistbits) - 4 distance = ((offset + dextra) << self.NPOSTFIX) + lcode + self.NDIRECT + 1 return (0,distance)
[ "def", "value", "(", "self", ",", "dcode", ",", "dextra", ")", ":", "if", "dcode", "<", "16", ":", "return", "[", "(", "1", ",", "0", ")", ",", "(", "2", ",", "0", ")", ",", "(", "3", ",", "0", ")", ",", "(", "4", ",", "0", ")", ",", ...
Decode value of symbol together with the extra bits. >>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10) >>> d[34].value(2) (0, 35)
[ "Decode", "value", "of", "symbol", "together", "with", "the", "extra", "bits", ".", ">>>", "d", "=", "DistanceAlphabet", "(", "D", "NPOSTFIX", "=", "2", "NDIRECT", "=", "10", ")", ">>>", "d", "[", "34", "]", ".", "value", "(", "2", ")", "(", "0", ...
python
test
noahbenson/neuropythy
neuropythy/commands/surface_to_image.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/surface_to_image.py#L83-L187
def main(args): ''' surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info. ''' # Parse the arguments (args, opts) = _surface_to_ribbon_parser(args) # First, help? if opts['help']: print(info, file=sys.stdout) return 1 # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) return verbose # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # figure out our arguments: (lhfl, rhfl) = (opts['lh_file'], opts['rh_file']) if len(args) == 0: raise ValueError('Not enough arguments provided!') elif len(args) == 1: # must be that the subject is in the env? sub = find_subject_path(os.getenv('SUBJECT')) outfl = args[0] elif len(args) == 2: sbpth = find_subject_path(args[0]) if sbpth is not None: sub = sbpth else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Given arg is not a subject: %s' % args[0]) outfl = args[1] elif len(args) == 3: sbpth0 = find_subject_path(args[0]) sbpth1 = find_subject_path(args[1]) if sbpth0 is not None: sub = sbpth0 if lhfl is not None: rhfl = args[1] elif rhfl is not None: lhfl = args[1] else: raise ValueError('Too many arguments given: %s' % args[1]) elif sbpth1 is not None: sub = sbpth1 if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Too many arguments given: %s' % args[0]) else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') (lhfl, rhfl) = args outfl = args[2] elif len(args) == 4: if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None) if subidx is None: raise ValueError('No subject given') sub = find_subject_path(args[subidx]) del args[subidx] (lhfl, rhfl, outfl) = args else: raise ValueError('Too many arguments provided!') if sub is None: raise ValueError('No subject specified or found in $SUBJECT') if lhfl is None and rhfl is None: raise ValueError('No surfaces provided') # check the method method = opts['method'].lower() if method not in ['linear', 'lines', 'nearest', 'auto']: raise ValueError('Unsupported method: %s' % method) # and the datatype if opts['dtype'] is None: dtyp = None elif opts['dtype'].lower() == 'float': dtyp = np.float32 elif opts['dtype'].lower() == 'int': dtyp = np.int32 else: raise ValueError('Type argument must be float or int') if method == 'auto': if dtyp is np.float32: method = 'linear' elif dtyp is np.int32: method = 'nearest' else: method = 'linear' # Now, load the data: note('Reading surfaces...') (lhdat, rhdat) = (None, None) if lhfl is not None: note(' - Reading LH file: %s' % lhfl) lhdat = read_surf_file(lhfl) if rhfl is not None: note(' - Reading RH file: %s' % rhfl) rhdat = read_surf_file(rhfl) (dat, hemi) = (rhdat, 'rh') if lhdat is None else \ (lhdat, 'lh') if rhdat is None else \ ((lhdat, rhdat), None) sub = subject(sub) # okay, make the volume... note('Generating volume...') vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp) # and write out the file note('Exporting volume file: %s' % outfl) save(outfl, vol, affine=sub.voxel_to_native_matrix) note('surface_to_image complete!') return 0
[ "def", "main", "(", "args", ")", ":", "# Parse the arguments", "(", "args", ",", "opts", ")", "=", "_surface_to_ribbon_parser", "(", "args", ")", "# First, help?", "if", "opts", "[", "'help'", "]", ":", "print", "(", "info", ",", "file", "=", "sys", ".",...
surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info.
[ "surface_to_rubbon", ".", "main", "(", "args", ")", "can", "be", "given", "a", "list", "of", "arguments", "such", "as", "sys", ".", "argv", "[", "1", ":", "]", ";", "these", "arguments", "may", "include", "any", "options", "and", "must", "include", "ex...
python
train
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L358-L366
def strToTempfile(s, suffix=None, prefix=None, dir=None, binary=False): """Create a new tempfile, write ``s`` to it and return the filename. `suffix`, `prefix` and `dir` are like in `tempfile.mkstemp`. """ fd, filename = tempfile.mkstemp(**dict((k,v) for (k,v) in [('suffix',suffix),('prefix',prefix),('dir', dir)] if v is not None)) spitOut(s, fd, binary) return filename
[ "def", "strToTempfile", "(", "s", ",", "suffix", "=", "None", ",", "prefix", "=", "None", ",", "dir", "=", "None", ",", "binary", "=", "False", ")", ":", "fd", ",", "filename", "=", "tempfile", ".", "mkstemp", "(", "*", "*", "dict", "(", "(", "k"...
Create a new tempfile, write ``s`` to it and return the filename. `suffix`, `prefix` and `dir` are like in `tempfile.mkstemp`.
[ "Create", "a", "new", "tempfile", "write", "s", "to", "it", "and", "return", "the", "filename", ".", "suffix", "prefix", "and", "dir", "are", "like", "in", "tempfile", ".", "mkstemp", "." ]
python
train
helixyte/everest
everest/entities/attributes.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/entities/attributes.py#L127-L134
def get_domain_class_relationship_attribute_iterator(ent): """ Returns an iterator over all terminal attributes in the given registered resource. """ for attr in itervalues_(ent.__everest_attributes__): if attr.kind != RESOURCE_ATTRIBUTE_KINDS.TERMINAL: yield attr
[ "def", "get_domain_class_relationship_attribute_iterator", "(", "ent", ")", ":", "for", "attr", "in", "itervalues_", "(", "ent", ".", "__everest_attributes__", ")", ":", "if", "attr", ".", "kind", "!=", "RESOURCE_ATTRIBUTE_KINDS", ".", "TERMINAL", ":", "yield", "a...
Returns an iterator over all terminal attributes in the given registered resource.
[ "Returns", "an", "iterator", "over", "all", "terminal", "attributes", "in", "the", "given", "registered", "resource", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/db/api.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/db/api.py#L396-L450
def withdraw(session, uidentity, organization, from_date=MIN_PERIOD_DATE, to_date=MAX_PERIOD_DATE): """Withdraw a unique identity from an organization. Removes all the enrollments between the unique identity in `uidentity` and the given 'organization'. When a period of time is given using `from_date` and `to_date` parameters, the function will remove those periods on which `from_date` <= enrollment <= `to_date`. Default values for these dates are `MIN_PERIOD_DATE` and `MAX_PERIOD_DATE`. These dates cannot be `None`. :param session: database session :param uidentity: unique identity to withdraw :param organization: organization where the unique identity is withdrawn :param from_date: date when the enrollment starts :param to_date: date when the enrollment ends :return: number of removed enrollments :raises ValeError: when either `from_date` or `to_date` are `None`; when `from_date < MIN_PERIOD_DATE`; or `to_date > MAX_PERIOD_DATE` or `from_date > to_date`. """ if not from_date: raise ValueError("'from_date' cannot be None") if not to_date: raise ValueError("'to_date' cannot be None") if from_date < MIN_PERIOD_DATE or from_date > MAX_PERIOD_DATE: raise ValueError("'from_date' %s is out of bounds" % str(from_date)) if to_date < MIN_PERIOD_DATE or to_date > MAX_PERIOD_DATE: raise ValueError("'to_date' %s is out of bounds" % str(to_date)) if from_date > to_date: raise ValueError("'from_date' %s cannot be greater than %s" % (from_date, to_date)) enrollments = session.query(Enrollment).\ filter(Enrollment.uidentity == uidentity, Enrollment.organization == organization, from_date <= Enrollment.start, Enrollment.end <= to_date).all() ndeleted = 0 for enrollment in enrollments: session.delete(enrollment) ndeleted += 1 if ndeleted > 0: uidentity.last_modified = datetime.datetime.utcnow() session.flush() return ndeleted
[ "def", "withdraw", "(", "session", ",", "uidentity", ",", "organization", ",", "from_date", "=", "MIN_PERIOD_DATE", ",", "to_date", "=", "MAX_PERIOD_DATE", ")", ":", "if", "not", "from_date", ":", "raise", "ValueError", "(", "\"'from_date' cannot be None\"", ")", ...
Withdraw a unique identity from an organization. Removes all the enrollments between the unique identity in `uidentity` and the given 'organization'. When a period of time is given using `from_date` and `to_date` parameters, the function will remove those periods on which `from_date` <= enrollment <= `to_date`. Default values for these dates are `MIN_PERIOD_DATE` and `MAX_PERIOD_DATE`. These dates cannot be `None`. :param session: database session :param uidentity: unique identity to withdraw :param organization: organization where the unique identity is withdrawn :param from_date: date when the enrollment starts :param to_date: date when the enrollment ends :return: number of removed enrollments :raises ValeError: when either `from_date` or `to_date` are `None`; when `from_date < MIN_PERIOD_DATE`; or `to_date > MAX_PERIOD_DATE` or `from_date > to_date`.
[ "Withdraw", "a", "unique", "identity", "from", "an", "organization", "." ]
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L213-L243
def max_min_col_update(self): """ Determines the maximum and minimum number in each column. The result is a list whose k-th entry is [vmax, vmin], where vmax and vmin denote the maximum and minimum of the k-th column (ignoring NaN). This list is stored in self.max_min_col. If the k-th column has a non-numerical dtype, then the k-th entry is set to None. If the dtype is complex, then compute the maximum and minimum of the absolute values. If vmax equals vmin, then vmin is decreased by one. """ if self.df.shape[0] == 0: # If no rows to compute max/min then return return self.max_min_col = [] for dummy, col in self.df.iteritems(): if col.dtype in REAL_NUMBER_TYPES + COMPLEX_NUMBER_TYPES: if col.dtype in REAL_NUMBER_TYPES: vmax = col.max(skipna=True) vmin = col.min(skipna=True) else: vmax = col.abs().max(skipna=True) vmin = col.abs().min(skipna=True) if vmax != vmin: max_min = [vmax, vmin] else: max_min = [vmax, vmin - 1] else: max_min = None self.max_min_col.append(max_min)
[ "def", "max_min_col_update", "(", "self", ")", ":", "if", "self", ".", "df", ".", "shape", "[", "0", "]", "==", "0", ":", "# If no rows to compute max/min then return\r", "return", "self", ".", "max_min_col", "=", "[", "]", "for", "dummy", ",", "col", "in"...
Determines the maximum and minimum number in each column. The result is a list whose k-th entry is [vmax, vmin], where vmax and vmin denote the maximum and minimum of the k-th column (ignoring NaN). This list is stored in self.max_min_col. If the k-th column has a non-numerical dtype, then the k-th entry is set to None. If the dtype is complex, then compute the maximum and minimum of the absolute values. If vmax equals vmin, then vmin is decreased by one.
[ "Determines", "the", "maximum", "and", "minimum", "number", "in", "each", "column", ".", "The", "result", "is", "a", "list", "whose", "k", "-", "th", "entry", "is", "[", "vmax", "vmin", "]", "where", "vmax", "and", "vmin", "denote", "the", "maximum", "...
python
train
biolink/ontobio
ontobio/lexmap.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L531-L644
def weighted_axioms(self, x, y, xg): """ return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping between x and y. See kboom paper """ # TODO: allow additional weighting # weights are log odds w=log(p/(1-p)) # (Sub,Sup,Eq,Other) scope_pairs = [ ('label', 'label', 0.0, 0.0, 3.0,-0.8), ('label', 'exact', 0.0, 0.0, 2.5,-0.5), ('label', 'broad', -1.0, 1.0, 0.0, 0.0), ('label', 'narrow', 1.0,-1.0, 0.0, 0.0), ('label', 'related', 0.0, 0.0, 0.0, 0.0), ('exact', 'exact', 0.0, 0.0, 2.5,-0.5), ('exact', 'broad', -1.0, 1.0, 0.0, 0.0), ('exact', 'narrow', 1.0,-1.0, 0.0, 0.0), ('exact', 'related', 0.0, 0.0, 0.0, 0.0), ('related', 'broad', -0.5, 0.5, 0.0, 0.0), ('related', 'narrow', 0.5,-0.5, 0.0, 0.0), ('related', 'related', 0.0, 0.0, 0.0, 0.0), ('broad', 'broad', 0.0, 0.0, 0.0, 1.0), ('broad', 'narrow', -0.5, 0.5, 0.0, 0.2), ('narrow', 'narrow', 0.0, 0.0, 0.0, 0.0) ] # populate symmetric lookup matrix scope_map = defaultdict(dict) for (l,r,w1,w2,w3,w4) in scope_pairs: l = l.upper() r = r.upper() scope_map[l][r] = np.array((w1,w2,w3,w4)) scope_map[r][l] = np.array((w2,w1,w3,w4)) # TODO: get prior based on ontology pair # cumulative sum of weights WS = None pfx1 = self._id_to_ontology(x) pfx2 = self._id_to_ontology(y) for mw in self.config.get('match_weights', []): mpfx1 = mw.get('prefix1','') mpfx2 = mw.get('prefix2','') X = np.array(mw['weights']) if mpfx1 == pfx1 and mpfx2 == pfx2: WS = X elif mpfx2 == pfx1 and mpfx1 == pfx2: WS = self._flipweights(X) elif mpfx1 == pfx1 and mpfx2 == '' and WS is None: WS = X elif mpfx2 == pfx1 and mpfx1 == '' and WS is None: WS = self._flipweights(X) if WS is None: WS = np.array((0.0, 0.0, 0.0, 0.0)) # defaults WS += np.array(self.config.get('default_weights', [0.0, 0.0, 1.5, -0.1])) logging.info('WS defaults={}'.format(WS)) for xw in self.config.get('xref_weights', []): left = xw.get('left','') right = xw.get('right','') X = np.array(xw['weights']) if x == left and y == right: WS += X logging.info('MATCH: {} for {}-{}'.format(X, x, y)) elif y == left and x == right: WS += self._flipweights(X) logging.info('IMATCH: {}'.format(X)) smap = self.smap # TODO: symmetrical WT = np.array((0.0, 0.0, 0.0, 0.0)) WBESTMAX = np.array((0.0, 0.0, 0.0, 0.0)) n = 0 for sx in smap[x]: WBEST, _ = self._best_match_syn(sx, smap[y], scope_map) if WBEST is not None: WT += WBEST n += 1 if max(abs(WBEST)) > max(abs(WBESTMAX)): WBESTMAX = WBEST for sy in smap[y]: WBEST, _ = self._best_match_syn(sy, smap[x], scope_map) if WBEST is not None: WT += WBEST n += 1 # average best match if n > 0: logging.info('Adding BESTMAX={}'.format(WBESTMAX)) WS += WBESTMAX # TODO: xref, many to many WS += self._graph_weights(x, y, xg) # TODO: include additional defined weights, eg ORDO logging.info('Adding WS, gw={}'.format(WS)) # jaccard similarity (ss1,ss2) = xg[x][y][self.SIMSCORES] WS[3] += ((1-ss1) + (1-ss2)) / 2 # reciprocal best hits are higher confidence of equiv rs = xg[x][y]['reciprocal_score'] if rs == 4: WS[2] += 0.5 if rs == 0: WS[2] -= 0.2 #P = np.expit(WS) P = 1/(1+np.exp(-WS)) logging.info('Final WS={}, init P={}'.format(WS, P)) # probs should sum to 1.0 P = P / np.sum(P) return P
[ "def", "weighted_axioms", "(", "self", ",", "x", ",", "y", ",", "xg", ")", ":", "# TODO: allow additional weighting", "# weights are log odds w=log(p/(1-p))", "# (Sub,Sup,Eq,Other)", "scope_pairs", "=", "[", "(", "'label'", ",", "'label'", ",", "0.0", ",", "0.0", ...
return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping between x and y. See kboom paper
[ "return", "a", "tuple", "(", "sub", "sup", "equiv", "other", ")", "indicating", "estimated", "prior", "probabilities", "for", "an", "interpretation", "of", "a", "mapping", "between", "x", "and", "y", "." ]
python
train
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L1686-L1701
def install_package(self, path, recursive=False): # type: (str, bool) -> tuple """ Installs all the modules found in the given package (directory). It is a utility method working like :meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor accepting every module found. :param path: Path of the package (folder) :param recursive: If True, installs the modules found in sub-directories :return: A 2-tuple, with the list of installed bundles (:class:`~pelix.framework.Bundle`) and the list of the names of the modules which import failed. :raise ValueError: The given path is invalid """ return self.__framework.install_package(path, recursive)
[ "def", "install_package", "(", "self", ",", "path", ",", "recursive", "=", "False", ")", ":", "# type: (str, bool) -> tuple", "return", "self", ".", "__framework", ".", "install_package", "(", "path", ",", "recursive", ")" ]
Installs all the modules found in the given package (directory). It is a utility method working like :meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor accepting every module found. :param path: Path of the package (folder) :param recursive: If True, installs the modules found in sub-directories :return: A 2-tuple, with the list of installed bundles (:class:`~pelix.framework.Bundle`) and the list of the names of the modules which import failed. :raise ValueError: The given path is invalid
[ "Installs", "all", "the", "modules", "found", "in", "the", "given", "package", "(", "directory", ")", ".", "It", "is", "a", "utility", "method", "working", "like", ":", "meth", ":", "~pelix", ".", "framework", ".", "BundleContext", ".", "install_visiting", ...
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L3183-L3202
def is_child_of_vault(self, id_, vault_id): """Tests if a vault is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if the ``id`` is a child of ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=vault_id) return self._hierarchy_session.is_child(id_=vault_id, child_id=id_)
[ "def", "is_child_of_vault", "(", "self", ",", "id_", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_child_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_se...
Tests if a vault is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if the ``id`` is a child of ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "a", "vault", "is", "a", "direct", "child", "of", "another", "." ]
python
train
genialis/resolwe
resolwe/flow/executors/run.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L96-L104
async def update_data_status(self, **kwargs): """Update (PATCH) Data object. :param kwargs: The dictionary of :class:`~resolwe.flow.models.Data` attributes to be changed. """ await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={ ExecutorProtocol.UPDATE_CHANGESET: kwargs })
[ "async", "def", "update_data_status", "(", "self", ",", "*", "*", "kwargs", ")", ":", "await", "self", ".", "_send_manager_command", "(", "ExecutorProtocol", ".", "UPDATE", ",", "extra_fields", "=", "{", "ExecutorProtocol", ".", "UPDATE_CHANGESET", ":", "kwargs"...
Update (PATCH) Data object. :param kwargs: The dictionary of :class:`~resolwe.flow.models.Data` attributes to be changed.
[ "Update", "(", "PATCH", ")", "Data", "object", "." ]
python
train
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L48-L59
def _getRunningApps(cls): """Get a list of the running applications.""" def runLoopAndExit(): AppHelper.stopEventLoop() AppHelper.callLater(1, runLoopAndExit) AppHelper.runConsoleEventLoop() # Get a list of running applications ws = AppKit.NSWorkspace.sharedWorkspace() apps = ws.runningApplications() return apps
[ "def", "_getRunningApps", "(", "cls", ")", ":", "def", "runLoopAndExit", "(", ")", ":", "AppHelper", ".", "stopEventLoop", "(", ")", "AppHelper", ".", "callLater", "(", "1", ",", "runLoopAndExit", ")", "AppHelper", ".", "runConsoleEventLoop", "(", ")", "# Ge...
Get a list of the running applications.
[ "Get", "a", "list", "of", "the", "running", "applications", "." ]
python
valid
guaix-ucm/pyemir
emirdrp/tools/rect_wpoly_for_mos.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/rect_wpoly_for_mos.py#L65-L434
def main(args=None): # parse command-line options parser = argparse.ArgumentParser(prog='rect_wpoly_for_mos') # required arguments parser.add_argument("input_list", help="TXT file with list JSON files derived from " "longslit data") parser.add_argument("--fitted_bound_param", required=True, help="Input JSON with fitted boundary parameters", type=argparse.FileType('rt')) parser.add_argument("--out_MOSlibrary", required=True, help="Output JSON file with results", type=lambda x: arg_file_is_new(parser, x)) # optional arguments parser.add_argument("--debugplot", help="Integer indicating plotting & debugging options" " (default=0)", default=0, type=int, choices=DEBUGPLOT_CODES) parser.add_argument("--echo", help="Display full command line", action="store_true") args = parser.parse_args(args) if args.echo: print('\033[1m\033[31m% ' + ' '.join(sys.argv) + '\033[0m\n') # --- # Read input TXT file with list of JSON files list_json_files = list_fileinfo_from_txt(args.input_list) nfiles = len(list_json_files) if abs(args.debugplot) >= 10: print('>>> Number of input JSON files:', nfiles) for item in list_json_files: print(item) if nfiles < 2: raise ValueError("Insufficient number of input JSON files") # read fitted boundary parameters and check that all the longslit JSON # files have been computed using the same fitted boundary parameters refined_boundary_model = RefinedBoundaryModelParam._datatype_load( args.fitted_bound_param.name) for ifile in range(nfiles): coef_rect_wpoly = RectWaveCoeff._datatype_load( list_json_files[ifile].filename) uuid_tmp = coef_rect_wpoly.meta_info['origin']['bound_param'] if uuid_tmp[4:] != refined_boundary_model.uuid: print('Expected uuid:', refined_boundary_model.uuid) print('uuid for ifile #' + str(ifile + 1) + ": " + uuid_tmp) raise ValueError("Fitted boundary parameter uuid's do not match") # check consistency of grism, filter, DTU configuration and list of # valid slitlets coef_rect_wpoly_first_longslit = RectWaveCoeff._datatype_load( list_json_files[0].filename) filter_name = coef_rect_wpoly_first_longslit.tags['filter'] grism_name = coef_rect_wpoly_first_longslit.tags['grism'] dtu_conf = DtuConfiguration.define_from_dictionary( coef_rect_wpoly_first_longslit.meta_info['dtu_configuration'] ) list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in coef_rect_wpoly_first_longslit.missing_slitlets: list_valid_islitlets.remove(idel) for ifile in range(1, nfiles): coef_rect_wpoly = RectWaveCoeff._datatype_load( list_json_files[ifile].filename) filter_tmp = coef_rect_wpoly.tags['filter'] if filter_name != filter_tmp: print(filter_name) print(filter_tmp) raise ValueError("Unexpected different filter found") grism_tmp = coef_rect_wpoly.tags['grism'] if grism_name != grism_tmp: print(grism_name) print(grism_tmp) raise ValueError("Unexpected different grism found") coef_rect_wpoly = RectWaveCoeff._datatype_load( list_json_files[ifile].filename) dtu_conf_tmp = DtuConfiguration.define_from_dictionary( coef_rect_wpoly.meta_info['dtu_configuration'] ) if dtu_conf != dtu_conf_tmp: print(dtu_conf) print(dtu_conf_tmp) raise ValueError("Unexpected different DTU configurations found") list_valid_islitlets_tmp = list(range(1, EMIR_NBARS + 1)) for idel in coef_rect_wpoly.missing_slitlets: list_valid_islitlets_tmp.remove(idel) if list_valid_islitlets != list_valid_islitlets_tmp: print(list_valid_islitlets) print(list_valid_islitlets_tmp) raise ValueError("Unexpected different list of valid slitlets") # check consistency of horizontal bounding box limits (bb_nc1_orig and # bb_nc2_orig) and ymargin_bb, and store the values for each slitlet dict_bb_param = {} print("Checking horizontal bounding box limits and ymargin_bb:") for islitlet in list_valid_islitlets: islitlet_progress(islitlet, EMIR_NBARS) cslitlet = 'slitlet' + str(islitlet).zfill(2) dict_bb_param[cslitlet] = {} for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: value_initial = \ coef_rect_wpoly_first_longslit.contents[islitlet - 1][par] for ifile in range(1, nfiles): coef_rect_wpoly = RectWaveCoeff._datatype_load( list_json_files[ifile].filename) value_tmp = coef_rect_wpoly.contents[islitlet - 1][par] if value_initial != value_tmp: print(islitlet, value_initial, value_tmp) print(value_tmp) raise ValueError("Unexpected different " + par) dict_bb_param[cslitlet][par] = value_initial print('OK!') # --- # Read and store all the longslit data list_coef_rect_wpoly = [] for ifile in range(nfiles): coef_rect_wpoly = RectWaveCoeff._datatype_load( list_json_files[ifile].filename) list_coef_rect_wpoly.append(coef_rect_wpoly) # --- # Initialize structure to save results into an ouptut JSON file outdict = {} outdict['refined_boundary_model'] = refined_boundary_model.__getstate__() outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'rectification and wavelength calibration polynomial coefficients ' \ 'as a function of csu_bar_slit_center for MOS' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['wpoly_longslits'] = {} for ifile in range(nfiles): cdum = 'longslit_' + str(ifile + 1).zfill(3) + '_uuid' outdict['meta_info']['origin']['wpoly_longslits'][cdum] = \ list_coef_rect_wpoly[ifile].uuid outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # include bb_nc1_orig, bb_nc2_orig and ymargin_bb for each slitlet # (note that the values of bb_ns1_orig and bb_ns2_orig cannot be # computed at this stage because they depend on csu_bar_slit_center) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) outdict['contents'][cslitlet] = dict_bb_param[cslitlet] # check that order for rectification transformations is the same for all # the slitlets and longslit configurations order_check_list = [] for ifile in range(nfiles): tmpdict = list_coef_rect_wpoly[ifile].contents for islitlet in list_valid_islitlets: ttd_order = tmpdict[islitlet - 1]['ttd_order'] if ttd_order is not None: order_check_list.append(ttd_order) ttd_order_modeled = \ tmpdict[islitlet - 1]['ttd_order_longslit_model'] order_check_list.append(ttd_order_modeled) # remove duplicates in list order_no_duplicates = list(set(order_check_list)) if len(order_no_duplicates) != 1: print('order_no_duplicates:', order_no_duplicates) raise ValueError('tdd_order is not constant!') ttd_order = int(order_no_duplicates[0]) ncoef_rect = ncoef_fmap(ttd_order) if abs(args.debugplot) >= 10: print('>>> ttd_order........:', ttd_order) print('>>> ncoef_rect.......:', ncoef_rect) # check that polynomial degree in frontiers and spectrails are the same poldeg_check_list = [] for ifile in range(nfiles): tmpdict = list_coef_rect_wpoly[ifile].contents for islitlet in list_valid_islitlets: tmppoly = tmpdict[islitlet - 1]['frontier']['poly_coef_lower'] poldeg_check_list.append(len(tmppoly) - 1) tmppoly = tmpdict[islitlet - 1]['frontier']['poly_coef_upper'] poldeg_check_list.append(len(tmppoly) - 1) tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_lower'] poldeg_check_list.append(len(tmppoly) - 1) tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_middle'] poldeg_check_list.append(len(tmppoly) - 1) tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_upper'] poldeg_check_list.append(len(tmppoly) - 1) # remove duplicates in list poldeg_no_duplicates = list(set(poldeg_check_list)) if len(poldeg_no_duplicates) != 1: print('poldeg_no_duplicates:', poldeg_no_duplicates) raise ValueError('poldeg is not constant in frontiers and ' 'spectrails!') poldeg_spectrails = int(poldeg_no_duplicates[0]) if abs(args.debugplot) >= 10: print('>>> poldeg spectrails:', poldeg_spectrails) # check that polynomial degree of wavelength calibration is the same for # all the slitlets poldeg_check_list = [] for ifile in range(nfiles): tmpdict = list_coef_rect_wpoly[ifile].contents for islitlet in list_valid_islitlets: tmppoly = tmpdict[islitlet - 1]['wpoly_coeff'] poldeg_check_list.append(len(tmppoly) - 1) tmppoly = tmpdict[islitlet - 1]['wpoly_coeff_longslit_model'] poldeg_check_list.append(len(tmppoly) - 1) # remove duplicates in list poldeg_no_duplicates = list(set(poldeg_check_list)) if len(poldeg_no_duplicates) != 1: print('poldeg_no_duplicates:', poldeg_no_duplicates) raise ValueError('poldeg is not constant in wavelength calibration ' 'polynomials!') poldeg_wavecal = int(poldeg_no_duplicates[0]) if abs(args.debugplot) >= 10: print('>>> poldeg wavecal...:', poldeg_wavecal) # --- # csu_bar_slit_center values for each slitlet print("CSU_bar_slit_center values:") for islitlet in list_valid_islitlets: islitlet_progress(islitlet, EMIR_NBARS) cslitlet = 'slitlet' + str(islitlet).zfill(2) list_csu_bar_slit_center = [] for ifile in range(nfiles): tmpdict = list_coef_rect_wpoly[ifile].contents[islitlet - 1] csu_bar_slit_center = tmpdict['csu_bar_slit_center'] list_csu_bar_slit_center.append(csu_bar_slit_center) # check that list_csu_bar_slit_center is properly sorted if not np.all(list_csu_bar_slit_center[:-1] <= list_csu_bar_slit_center[1:]): print('cslitlet: ', cslitlet) print('list_csu_bar_slit_center: ', list_csu_bar_slit_center) raise ValueError('Unsorted list_csu_bar_slit_center') outdict['contents'][cslitlet]['list_csu_bar_slit_center'] = \ list_csu_bar_slit_center print('OK!') # --- # rectification polynomial coefficients # note: when aij and bij have not been computed, we use the modeled # version aij_longslit_model and bij_longslit_model print("Rectification polynomial coefficients:") for islitlet in list_valid_islitlets: islitlet_progress(islitlet, EMIR_NBARS) cslitlet = 'slitlet' + str(islitlet).zfill(2) outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ncoef_rect'] = ncoef_rect for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: for icoef in range(ncoef_rect): ccoef = str(icoef).zfill(2) list_cij = [] for ifile in range(nfiles): tmpdict = \ list_coef_rect_wpoly[ifile].contents[islitlet - 1] cij = tmpdict[keycoef] if cij is not None: list_cij.append(cij[icoef]) else: cij_modeled = tmpdict[keycoef + '_longslit_model'] if cij_modeled is None: raise ValueError("Unexpected cij_modeled=None!") else: list_cij.append(cij_modeled[icoef]) if abs(args.debugplot) >= 10: print("Warning: using " + keycoef + "_longslit_model for " + cslitlet + " in file " + list_json_files[ifile].filename) cdum = 'list_' + keycoef + '_' + ccoef outdict['contents'][cslitlet][cdum] = list_cij print('OK!') # --- # wavelength calibration polynomial coefficients # note: when wpoly_coeff have not been computed, we use the # wpoly_coeff_longslit_model print("Wavelength calibration polynomial coefficients:") for islitlet in list_valid_islitlets: islitlet_progress(islitlet, EMIR_NBARS) cslitlet = 'slitlet' + str(islitlet).zfill(2) outdict['contents'][cslitlet]['wpoly_degree'] = poldeg_wavecal for icoef in range(poldeg_wavecal + 1): ccoef = str(icoef).zfill(2) list_cij = [] for ifile in range(nfiles): tmpdict = list_coef_rect_wpoly[ifile].contents[islitlet - 1] cij = tmpdict['wpoly_coeff'] if cij is not None: list_cij.append(cij[icoef]) else: cij_modeled = tmpdict['wpoly_coeff_longslit_model'] if cij_modeled is None: raise ValueError("Unexpected cij_modeled=None!") else: list_cij.append(cij_modeled[icoef]) if abs(args.debugplot) >= 10: print("Warning: using wpoly_coeff_longslit_model" + " for " + cslitlet + " in file " + list_json_files[ifile].filename) outdict['contents'][cslitlet]['list_wpoly_coeff_' + ccoef] = \ list_cij print('OK!') # --- # OBSOLETE # Save resulting JSON structure ''' with open(args.out_MOSlibrary.name + '_old', 'w') as fstream: json.dump(outdict, fstream, indent=2, sort_keys=True) print('>>> Saving file ' + args.out_MOSlibrary.name + '_old') ''' # -- # Create object of type MasterRectWave with library of coefficients # for rectification and wavelength calibration master_rectwv = MasterRectWave(instrument='EMIR') master_rectwv.quality_control = numina.types.qc.QC.GOOD master_rectwv.tags['grism'] = grism_name master_rectwv.tags['filter'] = filter_name master_rectwv.meta_info['dtu_configuration'] = outdict['dtu_configuration'] master_rectwv.meta_info['refined_boundary_model'] = { 'parmodel': refined_boundary_model.meta_info['parmodel'] } master_rectwv.meta_info['refined_boundary_model'].update( outdict['refined_boundary_model']['contents'] ) master_rectwv.total_slitlets = EMIR_NBARS master_rectwv.meta_info['origin'] = { 'bound_param': 'uuid' + refined_boundary_model.uuid, 'longslit_frames': ['uuid:' + list_coef_rect_wpoly[ifile].uuid for ifile in range(nfiles)] } for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'bb_nc1_orig': 0, 'bb_nc2_orig': 0, 'ymargin_bb': 0, 'list_csu_bar_slit_center': [], 'ttd_order': 0, 'ncoef_rect': 0, 'wpolydegree': 0 }) master_rectwv.missing_slitlets.append(islitlet) master_rectwv.contents.append(dumdict) master_rectwv.writeto(args.out_MOSlibrary.name) print('>>> Saving file ' + args.out_MOSlibrary.name)
[ "def", "main", "(", "args", "=", "None", ")", ":", "# parse command-line options", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'rect_wpoly_for_mos'", ")", "# required arguments", "parser", ".", "add_argument", "(", "\"input_list\"", ",", "...
with open(args.out_MOSlibrary.name + '_old', 'w') as fstream: json.dump(outdict, fstream, indent=2, sort_keys=True) print('>>> Saving file ' + args.out_MOSlibrary.name + '_old')
[ "with", "open", "(", "args", ".", "out_MOSlibrary", ".", "name", "+", "_old", "w", ")", "as", "fstream", ":", "json", ".", "dump", "(", "outdict", "fstream", "indent", "=", "2", "sort_keys", "=", "True", ")", "print", "(", ">>>", "Saving", "file", "+...
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/GrupoL3.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/GrupoL3.py#L58-L80
def inserir(self, name): """Inserts a new Group L3 and returns its identifier. :param name: Group L3 name. String with a minimum 2 and maximum of 80 characters :return: Dictionary with the following structure: :: {'group_l3': {'id': < id_group_l3 >}} :raise InvalidParameterError: Name is null and invalid. :raise NomeGrupoL3DuplicadoError: There is already a registered Group L3 with the value of name. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ group_l3_map = dict() group_l3_map['name'] = name code, xml = self.submit({'group_l3': group_l3_map}, 'POST', 'groupl3/') return self.response(code, xml)
[ "def", "inserir", "(", "self", ",", "name", ")", ":", "group_l3_map", "=", "dict", "(", ")", "group_l3_map", "[", "'name'", "]", "=", "name", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'group_l3'", ":", "group_l3_map", "}", ",", "'POS...
Inserts a new Group L3 and returns its identifier. :param name: Group L3 name. String with a minimum 2 and maximum of 80 characters :return: Dictionary with the following structure: :: {'group_l3': {'id': < id_group_l3 >}} :raise InvalidParameterError: Name is null and invalid. :raise NomeGrupoL3DuplicadoError: There is already a registered Group L3 with the value of name. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Inserts", "a", "new", "Group", "L3", "and", "returns", "its", "identifier", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/bucket.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/bucket.py#L1195-L1211
def labels(self, mapping): """Set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels :type mapping: :class:`dict` :param mapping: Name-value pairs (string->string) labelling the bucket. """ # If any labels have been expressly removed, we need to track this # so that a future .patch() call can do the correct thing. existing = set([k for k in self.labels.keys()]) incoming = set([k for k in mapping.keys()]) self._label_removals = self._label_removals.union(existing.difference(incoming)) # Actually update the labels on the object. self._patch_property("labels", copy.deepcopy(mapping))
[ "def", "labels", "(", "self", ",", "mapping", ")", ":", "# If any labels have been expressly removed, we need to track this", "# so that a future .patch() call can do the correct thing.", "existing", "=", "set", "(", "[", "k", "for", "k", "in", "self", ".", "labels", ".",...
Set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels :type mapping: :class:`dict` :param mapping: Name-value pairs (string->string) labelling the bucket.
[ "Set", "labels", "assigned", "to", "this", "bucket", "." ]
python
train
pypa/bandersnatch
src/bandersnatch_filter_plugins/prerelease_name.py
https://github.com/pypa/bandersnatch/blob/8b702c3bc128c5a1cbdd18890adede2f7f17fad4/src/bandersnatch_filter_plugins/prerelease_name.py#L29-L35
def filter(self, info, releases): """ Remove all release versions that match any of the specificed patterns. """ for version in list(releases.keys()): if any(pattern.match(version) for pattern in self.patterns): del releases[version]
[ "def", "filter", "(", "self", ",", "info", ",", "releases", ")", ":", "for", "version", "in", "list", "(", "releases", ".", "keys", "(", ")", ")", ":", "if", "any", "(", "pattern", ".", "match", "(", "version", ")", "for", "pattern", "in", "self", ...
Remove all release versions that match any of the specificed patterns.
[ "Remove", "all", "release", "versions", "that", "match", "any", "of", "the", "specificed", "patterns", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/__init__.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L535-L573
def queryTs(ts, expression): """ Find the indices of the time series entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | matches = queryTs(ts, "archiveType == marine sediment") | matches = queryTs(ts, "geo_meanElev <= 2000") :param str expression: Expression :param list ts: Time series :return list _idx: Indices of entries that match the criteria """ # Make a copy of the ts. We're going to work directly on it. _idx = [] # User provided a single query string if isinstance(expressions, str): # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expressions) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) # User provided a list of multiple queries elif isinstance(expressions, list): # Loop for each query for expr in expressions: # Use some magic to turn the given string expression into a machine-usable comparative expression. expr_lst = translate_expression(expr) # Only proceed if the translation resulted in a usable expression. if expr_lst: # Return the new filtered time series. This will use the same time series # that filters down each loop. new_ts, _idx = get_matches(expr_lst, new_ts) return _idx
[ "def", "queryTs", "(", "ts", ",", "expression", ")", ":", "# Make a copy of the ts. We're going to work directly on it.", "_idx", "=", "[", "]", "# User provided a single query string", "if", "isinstance", "(", "expressions", ",", "str", ")", ":", "# Use some magic to tur...
Find the indices of the time series entries that match the given expression. | Example: | D = lipd.loadLipd() | ts = lipd.extractTs(D) | matches = queryTs(ts, "archiveType == marine sediment") | matches = queryTs(ts, "geo_meanElev <= 2000") :param str expression: Expression :param list ts: Time series :return list _idx: Indices of entries that match the criteria
[ "Find", "the", "indices", "of", "the", "time", "series", "entries", "that", "match", "the", "given", "expression", "." ]
python
train
brunato/lograptor
lograptor/filemap.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/filemap.py#L212-L224
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) for pathname in files: try: values = self._filemap[pathname] except KeyError: self._filemap[pathname] = items else: values.extend(items)
[ "def", "add", "(", "self", ",", "files", ",", "items", ")", ":", "if", "isinstance", "(", "files", ",", "(", "str", ",", "bytes", ")", ")", ":", "files", "=", "iter", "(", "[", "files", "]", ")", "for", "pathname", "in", "files", ":", "try", ":...
Add a list of files with a reference to a list of objects.
[ "Add", "a", "list", "of", "files", "with", "a", "reference", "to", "a", "list", "of", "objects", "." ]
python
train
securestate/termineter
lib/termineter/interface.py
https://github.com/securestate/termineter/blob/d657d25d97c7739e650b951c396404e857e56625/lib/termineter/interface.py#L535-L562
def do_show(self, args): """Valid parameters for the "show" command are: modules, options""" self.print_line('') if args.thing == 'modules': self.print_line('Modules' + os.linesep + '=======') headers = ('Name', 'Description') rows = [(module.path, module.description) for module in self.frmwk.modules.values()] else: if self.frmwk.current_module and args.thing == 'options': options = self.frmwk.current_module.options self.print_line('Module Options' + os.linesep + '==============') if self.frmwk.current_module and args.thing == 'advanced': options = self.frmwk.current_module.advanced_options self.print_line('Advanced Module Options' + os.linesep + '=======================') elif self.frmwk.current_module is None and args.thing == 'options': options = self.frmwk.options self.print_line('Framework Options' + os.linesep + '=================') elif self.frmwk.current_module is None and args.thing == 'advanced': options = self.frmwk.advanced_options self.print_line('Advanced Framework Options' + os.linesep + '==========================') headers = ('Name', 'Value', 'Description') raw_options = [options.get_option(name) for name in options] rows = [(option.name, str(option.value), option.help) for option in raw_options] rows = sorted(rows, key=lambda row: row[0]) self.print_line('') self.frmwk.print_table(rows, headers=headers, line_prefix=' ') self.print_line('') return
[ "def", "do_show", "(", "self", ",", "args", ")", ":", "self", ".", "print_line", "(", "''", ")", "if", "args", ".", "thing", "==", "'modules'", ":", "self", ".", "print_line", "(", "'Modules'", "+", "os", ".", "linesep", "+", "'======='", ")", "heade...
Valid parameters for the "show" command are: modules, options
[ "Valid", "parameters", "for", "the", "show", "command", "are", ":", "modules", "options" ]
python
train
paolodragone/pymzn
pymzn/dzn/marsh.py
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L394-L417
def rebase_array(d, recursive=False): """Transform an indexed dictionary (such as those returned by the dzn2dict function when parsing arrays) into an multi-dimensional list. Parameters ---------- d : dict The indexed dictionary to convert. bool : recursive Whether to rebase the array recursively. Returns ------- list A multi-dimensional list. """ arr = [] min_val, max_val = _extremes(d.keys()) for idx in range(min_val, max_val + 1): v = d[idx] if recursive and _is_dict(v): v = rebase_array(v) arr.append(v) return arr
[ "def", "rebase_array", "(", "d", ",", "recursive", "=", "False", ")", ":", "arr", "=", "[", "]", "min_val", ",", "max_val", "=", "_extremes", "(", "d", ".", "keys", "(", ")", ")", "for", "idx", "in", "range", "(", "min_val", ",", "max_val", "+", ...
Transform an indexed dictionary (such as those returned by the dzn2dict function when parsing arrays) into an multi-dimensional list. Parameters ---------- d : dict The indexed dictionary to convert. bool : recursive Whether to rebase the array recursively. Returns ------- list A multi-dimensional list.
[ "Transform", "an", "indexed", "dictionary", "(", "such", "as", "those", "returned", "by", "the", "dzn2dict", "function", "when", "parsing", "arrays", ")", "into", "an", "multi", "-", "dimensional", "list", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L117-L126
def get_config_set(self, section, option): """Returns a set with each value per config file in it. """ values = set() for cp, filename, fp in self.layers: filename = filename # pylint fp = fp # pylint if cp.has_option(section, option): values.add(cp.get(section, option)) return values
[ "def", "get_config_set", "(", "self", ",", "section", ",", "option", ")", ":", "values", "=", "set", "(", ")", "for", "cp", ",", "filename", ",", "fp", "in", "self", ".", "layers", ":", "filename", "=", "filename", "# pylint", "fp", "=", "fp", "# pyl...
Returns a set with each value per config file in it.
[ "Returns", "a", "set", "with", "each", "value", "per", "config", "file", "in", "it", "." ]
python
train
jalanb/pysyte
pysyte/decorators.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/decorators.py#L135-L143
def globber(main_method, globs): """Recognise globs in args""" import os from glob import glob def main(arguments): lists_of_paths = [_ for _ in arguments if glob(pathname, recursive=True)] return main_method(arguments, lists_of_paths) return main
[ "def", "globber", "(", "main_method", ",", "globs", ")", ":", "import", "os", "from", "glob", "import", "glob", "def", "main", "(", "arguments", ")", ":", "lists_of_paths", "=", "[", "_", "for", "_", "in", "arguments", "if", "glob", "(", "pathname", ",...
Recognise globs in args
[ "Recognise", "globs", "in", "args" ]
python
train
ClericPy/torequests
torequests/utils.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/utils.py#L1460-L1499
def find_one(cls, pattern, string, flags=0): """JS-like match object. Use index number to get groups, if not match or no group, will return ''. Basic Usage:: >>> from torequests.utils import find_one >>> string = "abcd" >>> find_one("a.*", string) <torequests.utils.RegMatch object at 0x0705F1D0> >>> find_one("a.*", string)[0] 'abcd' >>> find_one("a.*", string)[1] '' >>> find_one("a(.)", string)[0] 'ab' >>> find_one("a(.)", string)[1] 'b' >>> find_one("a(.)", string)[2] or "default" 'default' >>> import re >>> item = find_one("a(B)(C)", string, flags=re.I | re.S) >>> item <torequests.utils.RegMatch object at 0x0705F1D0> >>> item[0] 'abc' >>> item[1] 'b' >>> item[2] 'c' >>> item[3] '' >>> # import re >>> # re.findone = find_one >>> register_re_findone() >>> re.findone('a(b)', 'abcd')[1] or 'default' 'b' """ item = re.search(pattern, string, flags=flags) return cls(item)
[ "def", "find_one", "(", "cls", ",", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "item", "=", "re", ".", "search", "(", "pattern", ",", "string", ",", "flags", "=", "flags", ")", "return", "cls", "(", "item", ")" ]
JS-like match object. Use index number to get groups, if not match or no group, will return ''. Basic Usage:: >>> from torequests.utils import find_one >>> string = "abcd" >>> find_one("a.*", string) <torequests.utils.RegMatch object at 0x0705F1D0> >>> find_one("a.*", string)[0] 'abcd' >>> find_one("a.*", string)[1] '' >>> find_one("a(.)", string)[0] 'ab' >>> find_one("a(.)", string)[1] 'b' >>> find_one("a(.)", string)[2] or "default" 'default' >>> import re >>> item = find_one("a(B)(C)", string, flags=re.I | re.S) >>> item <torequests.utils.RegMatch object at 0x0705F1D0> >>> item[0] 'abc' >>> item[1] 'b' >>> item[2] 'c' >>> item[3] '' >>> # import re >>> # re.findone = find_one >>> register_re_findone() >>> re.findone('a(b)', 'abcd')[1] or 'default' 'b'
[ "JS", "-", "like", "match", "object", ".", "Use", "index", "number", "to", "get", "groups", "if", "not", "match", "or", "no", "group", "will", "return", "." ]
python
train
mosdef-hub/mbuild
mbuild/utils/geometry.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/utils/geometry.py#L6-L26
def calc_dihedral(point1, point2, point3, point4): """Calculates a dihedral angle Here, two planes are defined by (point1, point2, point3) and (point2, point3, point4). The angle between them is returned. Parameters ---------- point1, point2, point3, point4 : array-like, shape=(3,), dtype=float Four points that define two planes Returns ------- float The dihedral angle between the two planes defined by the four points. """ points = np.array([point1, point2, point3, point4]) x = np.cross(points[1] - points[0], points[2] - points[1]) y = np.cross(points[2] - points[1], points[3] - points[2]) return angle(x, y)
[ "def", "calc_dihedral", "(", "point1", ",", "point2", ",", "point3", ",", "point4", ")", ":", "points", "=", "np", ".", "array", "(", "[", "point1", ",", "point2", ",", "point3", ",", "point4", "]", ")", "x", "=", "np", ".", "cross", "(", "points",...
Calculates a dihedral angle Here, two planes are defined by (point1, point2, point3) and (point2, point3, point4). The angle between them is returned. Parameters ---------- point1, point2, point3, point4 : array-like, shape=(3,), dtype=float Four points that define two planes Returns ------- float The dihedral angle between the two planes defined by the four points.
[ "Calculates", "a", "dihedral", "angle" ]
python
train
6809/dragonlib
dragonlib/core/basic.py
https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/basic.py#L136-L149
def pformat_tokens(self, tokens): """ format a tokenized BASIC program line. Useful for debugging. returns a list of formated string lines. """ result = [] for token_value in self.iter_token_values(tokens): char = self.token2ascii(token_value) if token_value > 0xff: result.append("\t$%04x -> %s" % (token_value, repr(char))) else: result.append("\t $%02x -> %s" % (token_value, repr(char))) return result
[ "def", "pformat_tokens", "(", "self", ",", "tokens", ")", ":", "result", "=", "[", "]", "for", "token_value", "in", "self", ".", "iter_token_values", "(", "tokens", ")", ":", "char", "=", "self", ".", "token2ascii", "(", "token_value", ")", "if", "token_...
format a tokenized BASIC program line. Useful for debugging. returns a list of formated string lines.
[ "format", "a", "tokenized", "BASIC", "program", "line", ".", "Useful", "for", "debugging", ".", "returns", "a", "list", "of", "formated", "string", "lines", "." ]
python
train
anteater/anteater
anteater/src/virus_total.py
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L156-L172
def url_report(self, scan_url, apikey): """ Send URLS for list of past malicous associations """ url = self.base_url + "url/report" params = {"apikey": apikey, 'resource': scan_url} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, params=params, headers=self.headers) if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code) time.sleep(self.public_api_sleep_time)
[ "def", "url_report", "(", "self", ",", "scan_url", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"url/report\"", "params", "=", "{", "\"apikey\"", ":", "apikey", ",", "'resource'", ":", "scan_url", "}", "rate_limit_clear", "=", "self...
Send URLS for list of past malicous associations
[ "Send", "URLS", "for", "list", "of", "past", "malicous", "associations" ]
python
train
apache/incubator-heron
heronpy/api/metrics.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/metrics.py#L58-L62
def incr(self, key, to_add=1): """Increments the value of a given key by ``to_add``""" if key not in self.value: self.value[key] = CountMetric() self.value[key].incr(to_add)
[ "def", "incr", "(", "self", ",", "key", ",", "to_add", "=", "1", ")", ":", "if", "key", "not", "in", "self", ".", "value", ":", "self", ".", "value", "[", "key", "]", "=", "CountMetric", "(", ")", "self", ".", "value", "[", "key", "]", ".", "...
Increments the value of a given key by ``to_add``
[ "Increments", "the", "value", "of", "a", "given", "key", "by", "to_add" ]
python
valid
dmlc/xgboost
python-package/xgboost/core.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L445-L459
def _init_from_csc(self, csc): """ Initialize data from a CSC matrix. """ if len(csc.indices) != len(csc.data): raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr), c_array(ctypes.c_uint, csc.indices), c_array(ctypes.c_float, csc.data), ctypes.c_size_t(len(csc.indptr)), ctypes.c_size_t(len(csc.data)), ctypes.c_size_t(csc.shape[0]), ctypes.byref(handle))) self.handle = handle
[ "def", "_init_from_csc", "(", "self", ",", "csc", ")", ":", "if", "len", "(", "csc", ".", "indices", ")", "!=", "len", "(", "csc", ".", "data", ")", ":", "raise", "ValueError", "(", "'length mismatch: {} vs {}'", ".", "format", "(", "len", "(", "csc", ...
Initialize data from a CSC matrix.
[ "Initialize", "data", "from", "a", "CSC", "matrix", "." ]
python
train
threeML/astromodels
astromodels/core/tree.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/core/tree.py#L299-L329
def _find_instances(self, cls): """ Find all the instances of cls below this node. :return: a dictionary of instances of cls """ instances = collections.OrderedDict() for child_name, child in self._children.iteritems(): if isinstance(child, cls): key_name = ".".join(child._get_path()) instances[key_name] = child # Now check if the instance has children, # and if it does go deeper in the tree # NOTE: an empty dictionary evaluate as False if child._children: instances.update(child._find_instances(cls)) else: instances.update(child._find_instances(cls)) return instances
[ "def", "_find_instances", "(", "self", ",", "cls", ")", ":", "instances", "=", "collections", ".", "OrderedDict", "(", ")", "for", "child_name", ",", "child", "in", "self", ".", "_children", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "chil...
Find all the instances of cls below this node. :return: a dictionary of instances of cls
[ "Find", "all", "the", "instances", "of", "cls", "below", "this", "node", "." ]
python
train
alkivi-sas/python-alkivi-logger
alkivi/logger/logger.py
https://github.com/alkivi-sas/python-alkivi-logger/blob/e96d5a987a5c8789c51d4fa7541709e05b1f51e1/alkivi/logger/logger.py#L93-L96
def warning(self, message, *args, **kwargs): """Alias to warn """ self._log(logging.WARNING, message, *args, **kwargs)
[ "def", "warning", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_log", "(", "logging", ".", "WARNING", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Alias to warn
[ "Alias", "to", "warn" ]
python
train
materialsvirtuallab/monty
monty/io.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/io.py#L252-L260
def release(self): """ Get rid of the lock by deleting the lockfile. When working in a `with` statement, this gets automatically called at the end. """ if self.is_locked: os.close(self.fd) os.unlink(self.lockfile) self.is_locked = False
[ "def", "release", "(", "self", ")", ":", "if", "self", ".", "is_locked", ":", "os", ".", "close", "(", "self", ".", "fd", ")", "os", ".", "unlink", "(", "self", ".", "lockfile", ")", "self", ".", "is_locked", "=", "False" ]
Get rid of the lock by deleting the lockfile. When working in a `with` statement, this gets automatically called at the end.
[ "Get", "rid", "of", "the", "lock", "by", "deleting", "the", "lockfile", ".", "When", "working", "in", "a", "with", "statement", "this", "gets", "automatically", "called", "at", "the", "end", "." ]
python
train
Qiskit/qiskit-terra
qiskit/pulse/pulse_lib/continuous.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/pulse/pulse_lib/continuous.py#L75-L84
def cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray: """Continuous cosine wave. Args: times: Times to output wave for. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. phase: Pulse phase. """ return amp*np.cos(2*np.pi*freq*times+phase).astype(np.complex_)
[ "def", "cos", "(", "times", ":", "np", ".", "ndarray", ",", "amp", ":", "complex", ",", "freq", ":", "float", ",", "phase", ":", "float", "=", "0", ")", "->", "np", ".", "ndarray", ":", "return", "amp", "*", "np", ".", "cos", "(", "2", "*", "...
Continuous cosine wave. Args: times: Times to output wave for. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. phase: Pulse phase.
[ "Continuous", "cosine", "wave", "." ]
python
test
DataONEorg/d1_python
lib_common/src/d1_common/resource_map.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L371-L388
def setDocumentedBy(self, documented_pid, documenting_pid): """Add a CiTO, the Citation Typing Ontology, triple asserting that ``documented_pid`` isDocumentedBy ``documenting_pid``. Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid`` Args: documented_pid: str PID of a Science Object that is documented by ``documenting_pid``. documenting_pid: str PID of a Science Object that documents ``documented_pid``. """ self._check_initialized() documented_id = self.getObjectByPid(documented_pid) documenting_id = self.getObjectByPid(documenting_pid) self.add((documented_id, CITO.isDocumentedBy, documenting_id))
[ "def", "setDocumentedBy", "(", "self", ",", "documented_pid", ",", "documenting_pid", ")", ":", "self", ".", "_check_initialized", "(", ")", "documented_id", "=", "self", ".", "getObjectByPid", "(", "documented_pid", ")", "documenting_id", "=", "self", ".", "get...
Add a CiTO, the Citation Typing Ontology, triple asserting that ``documented_pid`` isDocumentedBy ``documenting_pid``. Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid`` Args: documented_pid: str PID of a Science Object that is documented by ``documenting_pid``. documenting_pid: str PID of a Science Object that documents ``documented_pid``.
[ "Add", "a", "CiTO", "the", "Citation", "Typing", "Ontology", "triple", "asserting", "that", "documented_pid", "isDocumentedBy", "documenting_pid", "." ]
python
train
saltstack/salt
salt/modules/highstate_doc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/highstate_doc.py#L424-L461
def render(jinja_template_text=None, jinja_template_function='highstate_doc.markdown_default_jinja_template', **kwargs): ''' Render highstate to a text format (default Markdown) if `jinja_template_text` is not set, `jinja_template_function` is used. jinja_template_text: jinja text that the render uses to create the document. jinja_template_function: a salt module call that returns template text. options: highstate_doc.markdown_basic_jinja_template highstate_doc.markdown_default_jinja_template highstate_doc.markdown_full_jinja_template ''' config = _get_config(**kwargs) lowstates = proccess_lowstates(**kwargs) # TODO: __env__, context = { 'saltenv': None, 'config': config, 'lowstates': lowstates, 'salt': __salt__, 'pillar': __pillar__, 'grains': __grains__, 'opts': __opts__, 'kwargs': kwargs, } template_text = jinja_template_text if template_text is None and jinja_template_function: template_text = __salt__[jinja_template_function](**kwargs) if template_text is None: raise Exception('No jinja template text') txt = tpl.render_jinja_tmpl(template_text, context, tmplpath=None) # after proccessing the template replace passwords or other data. rt = config.get('replace_text_regex') for r in rt: txt = re.sub(r, rt[r], txt) return txt
[ "def", "render", "(", "jinja_template_text", "=", "None", ",", "jinja_template_function", "=", "'highstate_doc.markdown_default_jinja_template'", ",", "*", "*", "kwargs", ")", ":", "config", "=", "_get_config", "(", "*", "*", "kwargs", ")", "lowstates", "=", "proc...
Render highstate to a text format (default Markdown) if `jinja_template_text` is not set, `jinja_template_function` is used. jinja_template_text: jinja text that the render uses to create the document. jinja_template_function: a salt module call that returns template text. options: highstate_doc.markdown_basic_jinja_template highstate_doc.markdown_default_jinja_template highstate_doc.markdown_full_jinja_template
[ "Render", "highstate", "to", "a", "text", "format", "(", "default", "Markdown", ")" ]
python
train
edaniszewski/pylint-quotes
pylint_quotes/checker.py
https://github.com/edaniszewski/pylint-quotes/blob/f13529541d6d787b5b37611a080937f5adba6357/pylint_quotes/checker.py#L364-L377
def _invalid_triple_quote(self, quote, row, col=None): """Add a message for an invalid triple quote. Args: quote: The quote characters that were found. row: The row number the quote characters were found on. col: The column the quote characters were found on. """ self.add_message( 'invalid-triple-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)), **self.get_offset(col) )
[ "def", "_invalid_triple_quote", "(", "self", ",", "quote", ",", "row", ",", "col", "=", "None", ")", ":", "self", ".", "add_message", "(", "'invalid-triple-quote'", ",", "line", "=", "row", ",", "args", "=", "(", "quote", ",", "TRIPLE_QUOTE_OPTS", ".", "...
Add a message for an invalid triple quote. Args: quote: The quote characters that were found. row: The row number the quote characters were found on. col: The column the quote characters were found on.
[ "Add", "a", "message", "for", "an", "invalid", "triple", "quote", "." ]
python
train
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L411-L414
def get_reports(self, **params): """https://developers.coinbase.com/api/v2#list-all-reports""" response = self._get('v2', 'reports', data=params) return self._make_api_object(response, Report)
[ "def", "get_reports", "(", "self", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'reports'", ",", "data", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "Report", ")" ]
https://developers.coinbase.com/api/v2#list-all-reports
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#list", "-", "all", "-", "reports" ]
python
train
ratt-ru/PyMORESANE
pymoresane/main.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/main.py#L53-L521
def moresane(self, subregion=None, scale_count=None, sigma_level=4, loop_gain=0.1, tolerance=0.75, accuracy=1e-6, major_loop_miter=100, minor_loop_miter=30, all_on_gpu=False, decom_mode="ser", core_count=1, conv_device='cpu', conv_mode='linear', extraction_mode='cpu', enforce_positivity=False, edge_suppression=False, edge_offset=0, flux_threshold=0, neg_comp=False, edge_excl=0, int_excl=0): """ Primary method for wavelet analysis and subsequent deconvolution. INPUTS: subregion (default=None): Size, in pixels, of the central region to be analyzed and deconvolved. scale_count (default=None): Maximum scale to be considered - maximum scale considered during initialisation. sigma_level (default=4) Number of sigma at which thresholding is to be performed. loop_gain (default=0.1): Loop gain for the deconvolution. tolerance (default=0.75): Tolerance level for object extraction. Significant objects contain wavelet coefficients greater than the tolerance multiplied by the maximum wavelet coefficient in the scale under consideration. accuracy (default=1e-6): Threshold on the standard deviation of the residual noise. Exit main loop when this threshold is reached. major_loop_miter (default=100): Maximum number of iterations allowed in the major loop. Exit condition. minor_loop_miter (default=30): Maximum number of iterations allowed in the minor loop. Serves as an exit condition when the SNR is does not reach a maximum. all_on_gpu (default=False): Boolean specifier to toggle all gpu modes on. decom_mode (default='ser'): Specifier for decomposition mode - serial, multiprocessing, or gpu. core_count (default=1): For multiprocessing, specifies the number of cores. conv_device (default='cpu'): Specifier for device to be used - cpu or gpu. conv_mode (default='linear'): Specifier for convolution mode - linear or circular. extraction_mode (default='cpu'): Specifier for mode to be used - cpu or gpu. enforce_positivity (default=False): Boolean specifier for whether or not a model must be strictly positive. edge_suppression (default=False): Boolean specifier for whether or not the edges are to be suprressed. edge_offset (default=0): Numeric value for an additional user-specified number of edge pixels to be ignored. This is added to the minimum suppression. flux_threshold (default=0): Float value, assumed to be in Jy, which specifies an approximate convolution depth. OUTPUTS: self.model (no default): Model extracted by the algorithm. self.residual (no default): Residual signal after deconvolution. """ # If neither subregion nor scale_count is specified, the following handles the assignment of default values. # The default value for subregion is the whole image. The default value for scale_count is the log to the # base two of the image dimensions minus one. logger.info("Starting...") if (self.dirty_data_shape[0]%2)==1: logger.error("Image size is uneven. Please use even dimensions.") raise ValueError("Image size is uneven. Please use even dimensions.") if (subregion is None)|(subregion>self.dirty_data_shape[0]): subregion = self.dirty_data_shape[0] logger.info("Assuming subregion is {}px.".format(self.dirty_data_shape[0])) if (scale_count is None) or (scale_count>(np.log2(self.dirty_data_shape[0])-1)): scale_count = int(np.log2(self.dirty_data_shape[0])-1) logger.info("Assuming maximum scale is {}.".format(scale_count)) if all_on_gpu: decom_mode = 'gpu' conv_device = 'gpu' extraction_mode = 'gpu' # The following creates arrays with dimensions equal to subregion and containing the values of the dirty # image and psf in their central subregions. subregion_slice = tuple([slice(self.dirty_data_shape[0]/2-subregion/2, self.dirty_data_shape[0]/2+subregion/2), slice(self.dirty_data_shape[1]/2-subregion/2, self.dirty_data_shape[1]/2+subregion/2)]) dirty_subregion = self.dirty_data[subregion_slice] if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): psf_subregion = self.psf_data[self.psf_data_shape[0]/2-subregion/2:self.psf_data_shape[0]/2+subregion/2, self.psf_data_shape[1]/2-subregion/2:self.psf_data_shape[1]/2+subregion/2] else: psf_subregion = self.psf_data[subregion_slice] # The following pre-loads the gpu with the fft of both the full PSF and the subregion of interest. If usegpu # is false, this simply precomputes the fft of the PSF. if conv_device=="gpu": if conv_mode=="circular": if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): psf_subregion_fft = conv.gpu_r2c_fft(psf_subregion, is_gpuarray=False, store_on_gpu=True) psf_slice = tuple([slice(self.psf_data_shape[0]/2-self.dirty_data_shape[0]/2, self.psf_data_shape[0]/2+self.dirty_data_shape[0]/2), slice(self.psf_data_shape[1]/2-self.dirty_data_shape[1]/2, self.psf_data_shape[1]/2+self.dirty_data_shape[1]/2)]) psf_data_fft = self.psf_data[psf_slice] psf_data_fft = conv.gpu_r2c_fft(psf_data_fft, is_gpuarray=False, store_on_gpu=True) else: psf_subregion_fft = conv.gpu_r2c_fft(psf_subregion, is_gpuarray=False, store_on_gpu=True) if psf_subregion.shape==self.psf_data_shape: psf_data_fft = psf_subregion_fft else: psf_data_fft = conv.gpu_r2c_fft(self.psf_data, is_gpuarray=False, store_on_gpu=True) if conv_mode=="linear": if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): if np.all(np.array(self.dirty_data_shape)==subregion): psf_subregion_fft = conv.gpu_r2c_fft(self.psf_data, is_gpuarray=False, store_on_gpu=True) psf_data_fft = psf_subregion_fft logger.info("Using double size PSF.") else: psf_slice = tuple([slice(self.psf_data_shape[0]/2-subregion, self.psf_data_shape[0]/2+subregion), slice(self.psf_data_shape[1]/2-subregion, self.psf_data_shape[1]/2+subregion)]) psf_subregion_fft = self.psf_data[psf_slice] psf_subregion_fft = conv.gpu_r2c_fft(psf_subregion_fft, is_gpuarray=False, store_on_gpu=True) psf_data_fft = conv.gpu_r2c_fft(self.psf_data, is_gpuarray=False, store_on_gpu=True) else: if np.all(np.array(self.dirty_data_shape)==subregion): psf_subregion_fft = conv.pad_array(self.psf_data) psf_subregion_fft = conv.gpu_r2c_fft(psf_subregion_fft, is_gpuarray=False, store_on_gpu=True) psf_data_fft = psf_subregion_fft else: psf_slice = tuple([slice(self.psf_data_shape[0]/2-subregion, self.psf_data_shape[0]/2+subregion), slice(self.psf_data_shape[1]/2-subregion, self.psf_data_shape[1]/2+subregion)]) psf_subregion_fft = self.psf_data[psf_slice] psf_subregion_fft = conv.gpu_r2c_fft(psf_subregion_fft, is_gpuarray=False, store_on_gpu=True) psf_data_fft = conv.pad_array(self.psf_data) psf_data_fft = conv.gpu_r2c_fft(psf_data_fft, is_gpuarray=False, store_on_gpu=True) elif conv_device=="cpu": if conv_mode=="circular": if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): psf_subregion_fft = np.fft.rfft2(psf_subregion) psf_slice = tuple([slice(self.psf_data_shape[0]/2-self.dirty_data_shape[0]/2, self.psf_data_shape[0]/2+self.dirty_data_shape[0]/2), slice(self.psf_data_shape[1]/2-self.dirty_data_shape[1]/2, self.psf_data_shape[1]/2+self.dirty_data_shape[1]/2)]) psf_data_fft = self.psf_data[psf_slice] psf_data_fft = np.fft.rfft2(psf_data_fft) else: psf_subregion_fft = np.fft.rfft2(psf_subregion) if psf_subregion.shape==self.psf_data_shape: psf_data_fft = psf_subregion_fft else: psf_data_fft = np.fft.rfft2(self.psf_data) if conv_mode=="linear": if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): if np.all(np.array(self.dirty_data_shape)==subregion): psf_subregion_fft = np.fft.rfft2(self.psf_data) psf_data_fft = psf_subregion_fft logger.info("Using double size PSF.") else: psf_slice = tuple([slice(self.psf_data_shape[0]/2-subregion, self.psf_data_shape[0]/2+subregion), slice(self.psf_data_shape[1]/2-subregion, self.psf_data_shape[1]/2+subregion)]) psf_subregion_fft = self.psf_data[psf_slice] psf_subregion_fft = np.fft.rfft2(psf_subregion_fft) psf_data_fft = np.fft.rfft2(self.psf_data) else: if np.all(np.array(self.dirty_data_shape)==subregion): psf_subregion_fft = conv.pad_array(self.psf_data) psf_subregion_fft = np.fft.rfft2(psf_subregion_fft) psf_data_fft = psf_subregion_fft else: psf_slice = tuple([slice(self.psf_data_shape[0]/2-subregion, self.psf_data_shape[0]/2+subregion), slice(self.psf_data_shape[1]/2-subregion, self.psf_data_shape[1]/2+subregion)]) psf_subregion_fft = self.psf_data[psf_slice] psf_subregion_fft = np.fft.rfft2(psf_subregion_fft) psf_data_fft = conv.pad_array(self.psf_data) psf_data_fft = np.fft.rfft2(psf_data_fft) # The following is a call to the first of the IUWT (Isotropic Undecimated Wavelet Transform) functions. This # returns the decomposition of the PSF. The norm of each scale is found - these correspond to the energies or # weighting factors which must be applied when locating maxima. ### REPLACE SCALECOUNT WITH: int(np.log2(self.dirty_data_shape[0])-1) psf_decomposition = iuwt.iuwt_decomposition(psf_subregion, scale_count, mode=decom_mode, core_count=core_count) psf_energies = np.empty([psf_decomposition.shape[0],1,1], dtype=np.float32) for i in range(psf_energies.shape[0]): psf_energies[i] = np.sqrt(np.sum(np.square(psf_decomposition[i,:,:]))) # INCORPORATE IF NECESSARY. POSSIBLY AT OUTER LEVEL # psf_decomposition = psf_decomposition/psf_energies # print(np.unravel_index(np.argmax(psf_decomposition), psf_decomposition.shape)[0]) ######################################################MAJOR LOOP###################################################### major_loop_niter = 0 max_coeff = 1 model = np.zeros_like(self.dirty_data) std_current = 1000 std_last = 1 std_ratio = 1 min_scale = 0 # The current minimum scale of interest. If this ever equals or exceeds the scale_count # value, it will also break the following loop. # In the case that edge_supression is desired, the following sets up a masking array. if edge_suppression: edge_corruption = 0 suppression_array = np.zeros([scale_count,subregion,subregion],np.float32) for i in range(scale_count): edge_corruption += 2*2**i if edge_offset>edge_corruption: suppression_array[i,edge_offset:-edge_offset, edge_offset:-edge_offset] = 1 else: suppression_array[i,edge_corruption:-edge_corruption, edge_corruption:-edge_corruption] = 1 elif edge_offset>0: suppression_array = np.zeros([scale_count,subregion,subregion],np.float32) suppression_array[:,edge_offset:-edge_offset, edge_offset:-edge_offset] = 1 # The following is the major loop. Its exit conditions are reached if if the number of major loop iterations # exceeds a user defined value, the maximum wavelet coefficient is zero or the standard deviation of the # residual drops below a user specified accuracy threshold. while (((major_loop_niter<major_loop_miter) & (max_coeff>0)) & ((std_ratio>accuracy) & (np.max(dirty_subregion)>flux_threshold))): # The first interior loop allows for the model to be re-estimated at a higher scale in the case of a poor # SNR. If, however, a better job cannot be done, the loop will terminate. while (min_scale<scale_count): # This is the IUWT decomposition of the dirty image subregion up to scale_count, followed by a # thresholding of the resulting wavelet coefficients based on the MAD estimator. This is a denoising # operation. if min_scale==0: dirty_decomposition = iuwt.iuwt_decomposition(dirty_subregion, scale_count, 0, decom_mode, core_count) thresholds = tools.estimate_threshold(dirty_decomposition, edge_excl, int_excl) if self.mask_name is not None: dirty_decomposition = iuwt.iuwt_decomposition(dirty_subregion*self.mask[subregion_slice], scale_count, 0, decom_mode, core_count) dirty_decomposition_thresh = tools.apply_threshold(dirty_decomposition, thresholds, sigma_level=sigma_level) # If edge_supression is desired, the following simply masks out the offending wavelet coefficients. if edge_suppression|(edge_offset>0): dirty_decomposition_thresh *= suppression_array # The following calculates and stores the normalised maximum at each scale. normalised_scale_maxima = np.empty_like(psf_energies) for i in range(dirty_decomposition_thresh.shape[0]): normalised_scale_maxima[i] = np.max(dirty_decomposition_thresh[i,:,:])/psf_energies[i] # The following stores the index, scale and value of the global maximum coefficient. max_index = np.argmax(normalised_scale_maxima[min_scale:,:,:]) + min_scale max_scale = max_index + 1 max_coeff = normalised_scale_maxima[max_index,0,0] # This is an escape condition for the loop. If the maximum coefficient is zero, then there is no # useful information left in the wavelets and MORESANE is complete. if max_coeff == 0: logger.info("No significant wavelet coefficients detected.") break logger.info("Minimum scale = {}".format(min_scale)) logger.info("Maximum scale = {}".format(max_scale)) # The following constitutes a major change to the original implementation - the aim is to establish # as soon as possible which scales are to be omitted on the current iteration. This attempts to find # a local maxima or empty scales below the maximum scale. If either is found, that scale all those # below it are ignored. scale_adjust = 0 for i in range(max_index-1,-1,-1): # if max_index > 1: # if (normalised_scale_maxima[i,0,0] > normalised_scale_maxima[i+1,0,0]): # scale_adjust = i + 1 # logger.info("Scale {} contains a local maxima. Ignoring scales <= {}" # .format(scale_adjust, scale_adjust)) # break if (normalised_scale_maxima[i,0,0] == 0): scale_adjust = i + 1 logger.info("Scale {} is empty. Ignoring scales <= {}".format(scale_adjust, scale_adjust)) break # We choose to only consider scales up to the scale containing the maximum wavelet coefficient, # and ignore scales at or below the scale adjustment. thresh_slice = dirty_decomposition_thresh[scale_adjust:max_scale,:,:] # The following is a call to the externally defined source extraction function. It returns an array # populated with the wavelet coefficients of structures of interest in the image. This basically refers # to objects containing a maximum wavelet coefficient within some user-specified tolerance of the # maximum at that scale. extracted_sources, extracted_sources_mask = \ tools.source_extraction(thresh_slice, tolerance, mode=extraction_mode, store_on_gpu=all_on_gpu, neg_comp=neg_comp) # for blah in range(extracted_sources.shape[0]): # # plt.imshow(extracted_sources[blah,:,:], # interpolation="none") # plt.show() # The wavelet coefficients of the extracted sources are recomposed into a single image, # which should contain only the structures of interest. recomposed_sources = iuwt.iuwt_recomposition(extracted_sources, scale_adjust, decom_mode, core_count) ######################################################MINOR LOOP###################################################### x = np.zeros_like(recomposed_sources) r = recomposed_sources.copy() p = recomposed_sources.copy() minor_loop_niter = 0 snr_last = 0 snr_current = 0 # The following is the minor loop of the algorithm. In particular, we make use of the conjugate # gradient descent method to optimise our model. The variables have been named in order to appear # consistent with the algorithm. while (minor_loop_niter<minor_loop_miter): Ap = conv.fft_convolve(p, psf_subregion_fft, conv_device, conv_mode, store_on_gpu=all_on_gpu) Ap = iuwt.iuwt_decomposition(Ap, max_scale, scale_adjust, decom_mode, core_count, store_on_gpu=all_on_gpu) Ap = extracted_sources_mask*Ap Ap = iuwt.iuwt_recomposition(Ap, scale_adjust, decom_mode, core_count) alpha_denominator = np.dot(p.reshape(1,-1),Ap.reshape(-1,1))[0,0] alpha_numerator = np.dot(r.reshape(1,-1),r.reshape(-1,1))[0,0] alpha = alpha_numerator/alpha_denominator xn = x + alpha*p # The following enforces the positivity constraint which necessitates some recalculation. if (np.min(xn)<0) & (enforce_positivity): xn[xn<0] = 0 p = (xn-x)/alpha Ap = conv.fft_convolve(p, psf_subregion_fft, conv_device, conv_mode, store_on_gpu=all_on_gpu) Ap = iuwt.iuwt_decomposition(Ap, max_scale, scale_adjust, decom_mode, core_count, store_on_gpu=all_on_gpu) Ap = extracted_sources_mask*Ap Ap = iuwt.iuwt_recomposition(Ap, scale_adjust, decom_mode, core_count) rn = r - alpha*Ap beta_numerator = np.dot(rn.reshape(1,-1), rn.reshape(-1,1))[0,0] beta_denominator = np.dot(r.reshape(1,-1), r.reshape(-1,1))[0,0] beta = beta_numerator/beta_denominator p = rn + beta*p model_sources = conv.fft_convolve(xn, psf_subregion_fft, conv_device, conv_mode, store_on_gpu=all_on_gpu) model_sources = iuwt.iuwt_decomposition(model_sources, max_scale, scale_adjust, decom_mode, core_count, store_on_gpu=all_on_gpu) model_sources = extracted_sources_mask*model_sources if all_on_gpu: model_sources = model_sources.get() # We compare our model to the sources extracted from the data. snr_last = snr_current snr_current = tools.snr_ratio(extracted_sources, model_sources) minor_loop_niter += 1 logger.debug("SNR at iteration {0} = {1}".format(minor_loop_niter, snr_current)) # The following flow control determines whether or not the model is adequate and if a # recalculation is required. if (minor_loop_niter==1)&(snr_current>40): logger.info("SNR too large on first iteration - false detection. " "Incrementing the minimum scale.") min_scale += 1 break if snr_current>40: logger.info("Model has reached <1% error - exiting minor loop.") x = xn min_scale = 0 break if (minor_loop_niter>2)&(snr_current<=snr_last): if (snr_current>10.5): logger.info("SNR has decreased - Model has reached ~{}% error - exiting minor loop." \ .format(int(100/np.power(10,snr_current/20)))) min_scale = 0 break else: logger.info("SNR has decreased - SNR too small. Incrementing the minimum scale.") min_scale += 1 break r = rn x = xn logger.info("{} minor loop iterations performed.".format(minor_loop_niter)) if ((minor_loop_niter==minor_loop_miter)&(snr_current>10.5)): logger.info("Maximum number of minor loop iterations exceeded. Model reached ~{}% error." \ .format(int(100/np.power(10,snr_current/20)))) min_scale = 0 break if (min_scale==0): break ###################################################END OF MINOR LOOP################################################### if min_scale==scale_count: logger.info("All scales are performing poorly - stopping.") break # The following handles the deconvolution step. The model convolved with the psf is subtracted from the # dirty image to give the residual. if max_coeff>0: # x[abs(x)<0.8*np.max(np.abs(x))] = 0 model[subregion_slice] += loop_gain*x residual = self.dirty_data - conv.fft_convolve(model, psf_data_fft, conv_device, conv_mode) # The following assesses whether or not the residual has improved. std_last = std_current std_current = np.std(residual[subregion_slice]) std_ratio = (std_last-std_current)/std_last # If the most recent deconvolution step is poor, the following reverts the changes so that the # previous model and residual are preserved. if std_ratio<0: logger.info("Residual has worsened - reverting changes.") model[subregion_slice] -= loop_gain*x residual = self.dirty_data - conv.fft_convolve(model, psf_data_fft, conv_device, conv_mode) # The current residual becomes the dirty image for the subsequent iteration. dirty_subregion = residual[subregion_slice] major_loop_niter += 1 logger.info("{} major loop iterations performed.".format(major_loop_niter)) # The following condition will only trigger if MORESANE did no work - this is an exit condition for the # by-scale approach. if (major_loop_niter==0): logger.info("Current MORESANE iteration did no work - finished.") self.complete = True break # If MORESANE did work at the current iteration, the following simply updates the values in the class # variables self.model and self.residual. if major_loop_niter>0: self.model += model self.residual = residual
[ "def", "moresane", "(", "self", ",", "subregion", "=", "None", ",", "scale_count", "=", "None", ",", "sigma_level", "=", "4", ",", "loop_gain", "=", "0.1", ",", "tolerance", "=", "0.75", ",", "accuracy", "=", "1e-6", ",", "major_loop_miter", "=", "100", ...
Primary method for wavelet analysis and subsequent deconvolution. INPUTS: subregion (default=None): Size, in pixels, of the central region to be analyzed and deconvolved. scale_count (default=None): Maximum scale to be considered - maximum scale considered during initialisation. sigma_level (default=4) Number of sigma at which thresholding is to be performed. loop_gain (default=0.1): Loop gain for the deconvolution. tolerance (default=0.75): Tolerance level for object extraction. Significant objects contain wavelet coefficients greater than the tolerance multiplied by the maximum wavelet coefficient in the scale under consideration. accuracy (default=1e-6): Threshold on the standard deviation of the residual noise. Exit main loop when this threshold is reached. major_loop_miter (default=100): Maximum number of iterations allowed in the major loop. Exit condition. minor_loop_miter (default=30): Maximum number of iterations allowed in the minor loop. Serves as an exit condition when the SNR is does not reach a maximum. all_on_gpu (default=False): Boolean specifier to toggle all gpu modes on. decom_mode (default='ser'): Specifier for decomposition mode - serial, multiprocessing, or gpu. core_count (default=1): For multiprocessing, specifies the number of cores. conv_device (default='cpu'): Specifier for device to be used - cpu or gpu. conv_mode (default='linear'): Specifier for convolution mode - linear or circular. extraction_mode (default='cpu'): Specifier for mode to be used - cpu or gpu. enforce_positivity (default=False): Boolean specifier for whether or not a model must be strictly positive. edge_suppression (default=False): Boolean specifier for whether or not the edges are to be suprressed. edge_offset (default=0): Numeric value for an additional user-specified number of edge pixels to be ignored. This is added to the minimum suppression. flux_threshold (default=0): Float value, assumed to be in Jy, which specifies an approximate convolution depth. OUTPUTS: self.model (no default): Model extracted by the algorithm. self.residual (no default): Residual signal after deconvolution.
[ "Primary", "method", "for", "wavelet", "analysis", "and", "subsequent", "deconvolution", "." ]
python
train
Robin8Put/pmes
ams/utils/tornado_components/mongo.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/ams/utils/tornado_components/mongo.py#L36-L58
async def read(self, *_id): """Read data from database table. Accepts ids of entries. Returns list of results if success or string with error code and explanation. read(*id) => [(result), (result)] (if success) read(*id) => [] (if missed) read() => {"error":400, "reason":"Missed required fields"} """ if not _id: return {"error":400, "reason":"Missed required fields"} result = [] for i in _id: document = await self.collection.find_one({"id":i}) try: result.append({i:document[i] for i in document if i != "_id"}) except: continue return result
[ "async", "def", "read", "(", "self", ",", "*", "_id", ")", ":", "if", "not", "_id", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields\"", "}", "result", "=", "[", "]", "for", "i", "in", "_id", ":", "docume...
Read data from database table. Accepts ids of entries. Returns list of results if success or string with error code and explanation. read(*id) => [(result), (result)] (if success) read(*id) => [] (if missed) read() => {"error":400, "reason":"Missed required fields"}
[ "Read", "data", "from", "database", "table", ".", "Accepts", "ids", "of", "entries", ".", "Returns", "list", "of", "results", "if", "success", "or", "string", "with", "error", "code", "and", "explanation", "." ]
python
train
mbedmicro/pyOCD
pyocd/target/pack/flash_algo.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/flash_algo.py#L177-L188
def _extract_symbols(self, symbols, default=None): """! @brief Fill 'symbols' field with required flash algo symbols""" to_ret = {} for symbol in symbols: symbolInfo = self.elf.symbol_decoder.get_symbol_for_name(symbol) if symbolInfo is None: if default is not None: to_ret[symbol] = default continue raise FlashAlgoException("Missing symbol %s" % symbol) to_ret[symbol] = symbolInfo.address return to_ret
[ "def", "_extract_symbols", "(", "self", ",", "symbols", ",", "default", "=", "None", ")", ":", "to_ret", "=", "{", "}", "for", "symbol", "in", "symbols", ":", "symbolInfo", "=", "self", ".", "elf", ".", "symbol_decoder", ".", "get_symbol_for_name", "(", ...
! @brief Fill 'symbols' field with required flash algo symbols
[ "!" ]
python
train
sqreen/PyMiniRacer
py_mini_racer/py_mini_racer.py
https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L168-L180
def call(self, identifier, *args, **kwargs): """ Call the named function with provided arguments You can pass a custom JSON encoder by passing it in the encoder keyword only argument. """ encoder = kwargs.get('encoder', None) timeout = kwargs.get('timeout', 0) max_memory = kwargs.get('max_memory', 0) json_args = json.dumps(args, separators=(',', ':'), cls=encoder) js = "{identifier}.apply(this, {json_args})" return self.eval(js.format(identifier=identifier, json_args=json_args), timeout, max_memory)
[ "def", "call", "(", "self", ",", "identifier", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "encoder", "=", "kwargs", ".", "get", "(", "'encoder'", ",", "None", ")", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "0", ")", "...
Call the named function with provided arguments You can pass a custom JSON encoder by passing it in the encoder keyword only argument.
[ "Call", "the", "named", "function", "with", "provided", "arguments", "You", "can", "pass", "a", "custom", "JSON", "encoder", "by", "passing", "it", "in", "the", "encoder", "keyword", "only", "argument", "." ]
python
train
Hackerfleet/hfos
hfos/component.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/component.py#L255-L267
def _read_config(self): """Read this component's configuration from the database""" try: self.config = self.componentmodel.find_one( {'name': self.uniquename}) except ServerSelectionTimeoutError: # pragma: no cover self.log("No database access! Check if mongodb is running " "correctly.", lvl=critical) if self.config: self.log("Configuration read.", lvl=verbose) else: self.log("No configuration found.", lvl=warn)
[ "def", "_read_config", "(", "self", ")", ":", "try", ":", "self", ".", "config", "=", "self", ".", "componentmodel", ".", "find_one", "(", "{", "'name'", ":", "self", ".", "uniquename", "}", ")", "except", "ServerSelectionTimeoutError", ":", "# pragma: no co...
Read this component's configuration from the database
[ "Read", "this", "component", "s", "configuration", "from", "the", "database" ]
python
train
SheffieldML/GPy
GPy/core/symbolic.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/symbolic.py#L255-L283
def update_expression_list(self): """Extract a list of expressions from the dictionary of expressions.""" self.expression_list = [] # code arrives in dictionary, but is passed in this list self.expression_keys = [] # Keep track of the dictionary keys. self.expression_order = [] # This may be unecessary. It's to give ordering for cse for fname, fexpressions in self.expressions.items(): for type, texpressions in fexpressions.items(): if type == 'function': self.expression_list.append(texpressions) self.expression_keys.append([fname, type]) self.expression_order.append(1) elif type[-10:] == 'derivative': for dtype, expression in texpressions.items(): self.expression_list.append(expression) self.expression_keys.append([fname, type, dtype]) if type[:-10] == 'first_' or type[:-10] == '': self.expression_order.append(3) #sym.count_ops(self.expressions[type][dtype])) elif type[:-10] == 'second_': self.expression_order.append(4) #sym.count_ops(self.expressions[type][dtype])) elif type[:-10] == 'third_': self.expression_order.append(5) #sym.count_ops(self.expressions[type][dtype])) else: self.expression_list.append(fexpressions[type]) self.expression_keys.append([fname, type]) self.expression_order.append(2) # This step may be unecessary. # Not 100% sure if the sub expression elimination is order sensitive. This step orders the list with the 'function' code first and derivatives after. self.expression_order, self.expression_list, self.expression_keys = zip(*sorted(zip(self.expression_order, self.expression_list, self.expression_keys)))
[ "def", "update_expression_list", "(", "self", ")", ":", "self", ".", "expression_list", "=", "[", "]", "# code arrives in dictionary, but is passed in this list", "self", ".", "expression_keys", "=", "[", "]", "# Keep track of the dictionary keys.", "self", ".", "expressi...
Extract a list of expressions from the dictionary of expressions.
[ "Extract", "a", "list", "of", "expressions", "from", "the", "dictionary", "of", "expressions", "." ]
python
train
astropy/astropy-helpers
astropy_helpers/setup_helpers.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/setup_helpers.py#L357-L369
def update_package_files(srcdir, extensions, package_data, packagenames, package_dirs): """ This function is deprecated and maintained for backward compatibility with affiliated packages. Affiliated packages should update their setup.py to use `get_package_info` instead. """ info = get_package_info(srcdir) extensions.extend(info['ext_modules']) package_data.update(info['package_data']) packagenames = list(set(packagenames + info['packages'])) package_dirs.update(info['package_dir'])
[ "def", "update_package_files", "(", "srcdir", ",", "extensions", ",", "package_data", ",", "packagenames", ",", "package_dirs", ")", ":", "info", "=", "get_package_info", "(", "srcdir", ")", "extensions", ".", "extend", "(", "info", "[", "'ext_modules'", "]", ...
This function is deprecated and maintained for backward compatibility with affiliated packages. Affiliated packages should update their setup.py to use `get_package_info` instead.
[ "This", "function", "is", "deprecated", "and", "maintained", "for", "backward", "compatibility", "with", "affiliated", "packages", ".", "Affiliated", "packages", "should", "update", "their", "setup", ".", "py", "to", "use", "get_package_info", "instead", "." ]
python
train
proycon/clam
clam/common/parameters.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/parameters.py#L582-L594
def set(self, value): """This parameter method attempts to set a specific value for this parameter. The value will be validated first, and if it can not be set. An error message will be set in the error property of this parameter""" if self.validate(value): #print "Parameter " + self.id + " successfully set to " + repr(value) self.hasvalue = True if isinstance(value, float): self.value = round(value) else: self.value = int(value) return True else: #print "Parameter " + self.id + " COULD NOT BE set to " + repr(value) return False
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "self", ".", "validate", "(", "value", ")", ":", "#print \"Parameter \" + self.id + \" successfully set to \" + repr(value)", "self", ".", "hasvalue", "=", "True", "if", "isinstance", "(", "value", ",", "f...
This parameter method attempts to set a specific value for this parameter. The value will be validated first, and if it can not be set. An error message will be set in the error property of this parameter
[ "This", "parameter", "method", "attempts", "to", "set", "a", "specific", "value", "for", "this", "parameter", ".", "The", "value", "will", "be", "validated", "first", "and", "if", "it", "can", "not", "be", "set", ".", "An", "error", "message", "will", "b...
python
train
lalinsky/python-phoenixdb
phoenixdb/cursor.py
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L86-L102
def close(self): """Closes the cursor. No further operations are allowed once the cursor is closed. If the cursor is used in a ``with`` statement, this method will be automatically called at the end of the ``with`` block. """ if self._closed: raise ProgrammingError('the cursor is already closed') if self._id is not None: self._connection._client.close_statement(self._connection._id, self._id) self._id = None self._signature = None self._column_data_types = [] self._frame = None self._pos = None self._closed = True
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "raise", "ProgrammingError", "(", "'the cursor is already closed'", ")", "if", "self", ".", "_id", "is", "not", "None", ":", "self", ".", "_connection", ".", "_client", ".", "close_s...
Closes the cursor. No further operations are allowed once the cursor is closed. If the cursor is used in a ``with`` statement, this method will be automatically called at the end of the ``with`` block.
[ "Closes", "the", "cursor", ".", "No", "further", "operations", "are", "allowed", "once", "the", "cursor", "is", "closed", "." ]
python
train
django-parler/django-parler
parler/models.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/models.py#L678-L700
def save_translations(self, *args, **kwargs): """ The method to save all translations. This can be overwritten to implement any custom additions. This method calls :func:`save_translation` for every fetched language. :param args: Any custom arguments to pass to :func:`save`. :param kwargs: Any custom arguments to pass to :func:`save`. """ # Copy cache, new objects (e.g. fallbacks) might be fetched if users override save_translation() # Not looping over the cache, but using _parler_meta so the translations are processed in the order of inheritance. local_caches = self._translations_cache.copy() for meta in self._parler_meta: local_cache = local_caches[meta.model] translations = list(local_cache.values()) # Save all translated objects which were fetched. # This also supports switching languages several times, and save everything in the end. for translation in translations: if is_missing(translation): # Skip fallback markers continue self.save_translation(translation, *args, **kwargs)
[ "def", "save_translations", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Copy cache, new objects (e.g. fallbacks) might be fetched if users override save_translation()", "# Not looping over the cache, but using _parler_meta so the translations are processed in the...
The method to save all translations. This can be overwritten to implement any custom additions. This method calls :func:`save_translation` for every fetched language. :param args: Any custom arguments to pass to :func:`save`. :param kwargs: Any custom arguments to pass to :func:`save`.
[ "The", "method", "to", "save", "all", "translations", ".", "This", "can", "be", "overwritten", "to", "implement", "any", "custom", "additions", ".", "This", "method", "calls", ":", "func", ":", "save_translation", "for", "every", "fetched", "language", "." ]
python
train
astraw/stdeb
stdeb/util.py
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L261-L269
def get_date_822(): """return output of 822-date command""" cmd = '/bin/date' if not os.path.exists(cmd): raise ValueError('%s command does not exist.'%cmd) args = [cmd,'-R'] result = get_cmd_stdout(args).strip() result = normstr(result) return result
[ "def", "get_date_822", "(", ")", ":", "cmd", "=", "'/bin/date'", "if", "not", "os", ".", "path", ".", "exists", "(", "cmd", ")", ":", "raise", "ValueError", "(", "'%s command does not exist.'", "%", "cmd", ")", "args", "=", "[", "cmd", ",", "'-R'", "]"...
return output of 822-date command
[ "return", "output", "of", "822", "-", "date", "command" ]
python
train
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L504-L535
def initialize_optimizer(self, triggered_directly=True): """Initialize the model optimizer. If ``self.optimizer__lr`` is not set, use ``self.lr`` instead. Parameters ---------- triggered_directly : bool (default=True) Only relevant when optimizer is re-initialized. Initialization of the optimizer can be triggered directly (e.g. when lr was changed) or indirectly (e.g. when the module was re-initialized). If and only if the former happens, the user should receive a message informing them about the parameters that caused the re-initialization. """ args, kwargs = self._get_params_for_optimizer( 'optimizer', self.module_.named_parameters()) if self.initialized_ and self.verbose: msg = self._format_reinit_msg( "optimizer", kwargs, triggered_directly=triggered_directly) print(msg) if 'lr' not in kwargs: kwargs['lr'] = self.lr self.optimizer_ = self.optimizer(*args, **kwargs) self._register_virtual_param( ['optimizer__param_groups__*__*', 'optimizer__*', 'lr'], optimizer_setter, )
[ "def", "initialize_optimizer", "(", "self", ",", "triggered_directly", "=", "True", ")", ":", "args", ",", "kwargs", "=", "self", ".", "_get_params_for_optimizer", "(", "'optimizer'", ",", "self", ".", "module_", ".", "named_parameters", "(", ")", ")", "if", ...
Initialize the model optimizer. If ``self.optimizer__lr`` is not set, use ``self.lr`` instead. Parameters ---------- triggered_directly : bool (default=True) Only relevant when optimizer is re-initialized. Initialization of the optimizer can be triggered directly (e.g. when lr was changed) or indirectly (e.g. when the module was re-initialized). If and only if the former happens, the user should receive a message informing them about the parameters that caused the re-initialization.
[ "Initialize", "the", "model", "optimizer", ".", "If", "self", ".", "optimizer__lr", "is", "not", "set", "use", "self", ".", "lr", "instead", "." ]
python
train
twilio/twilio-python
twilio/rest/studio/v1/flow/engagement/step/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/engagement/step/__init__.py#L255-L269
def step_context(self): """ Access the step_context :returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList """ if self._step_context is None: self._step_context = StepContextList( self._version, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], step_sid=self._solution['sid'], ) return self._step_context
[ "def", "step_context", "(", "self", ")", ":", "if", "self", ".", "_step_context", "is", "None", ":", "self", ".", "_step_context", "=", "StepContextList", "(", "self", ".", "_version", ",", "flow_sid", "=", "self", ".", "_solution", "[", "'flow_sid'", "]",...
Access the step_context :returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextList
[ "Access", "the", "step_context" ]
python
train
HDI-Project/MLPrimitives
mlprimitives/candidates/timeseries_errors.py
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/timeseries_errors.py#L128-L183
def compute_threshold(smoothed_errors, error_buffer, sd_limit=12.0): """Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies. """ mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) max_epsilon = 0 sd_threshold = sd_limit # The treshold is determined dynamically by testing multiple Zs. # z is drawn from an ordered set of positive values representing the # number of standard deviations above mean(smoothed_errors) # here we iterate in increments of 0.5 on the range that the NASA paper found to be good for z in np.arange(2.5, sd_limit, 0.5): epsilon = mu + (sigma * z) below_epsilon, below_indices, above_epsilon = [], [], [] for i in range(len(smoothed_errors)): e = smoothed_errors[i] if e < epsilon: # save to compute delta mean and delta std # these are important for epsilon calculation below_epsilon.append(e) below_indices.append(i) if e > epsilon: # above_epsilon values are anomalies for j in range(0, error_buffer): if (i + j) not in above_epsilon and (i + j) < len(smoothed_errors): above_epsilon.append(i + j) if (i - j) not in above_epsilon and (i - j) >= 0: above_epsilon.append(i - j) if len(above_epsilon) == 0: continue # generate sequences above_epsilon = sorted(list(set(above_epsilon))) groups = [list(group) for group in mit.consecutive_groups(above_epsilon)] above_sequences = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]] mean_perc_decrease = (mu - np.mean(below_epsilon)) / mu sd_perc_decrease = (sigma - np.std(below_epsilon)) / sigma epsilon = (mean_perc_decrease + sd_perc_decrease) /\ (len(above_sequences)**2 + len(above_epsilon)) # update the largest epsilon we've seen so far if epsilon > max_epsilon: sd_threshold = z max_epsilon = epsilon # sd_threshold can be multiplied by sigma to get epsilon return max_epsilon, sd_threshold
[ "def", "compute_threshold", "(", "smoothed_errors", ",", "error_buffer", ",", "sd_limit", "=", "12.0", ")", ":", "mu", "=", "np", ".", "mean", "(", "smoothed_errors", ")", "sigma", "=", "np", ".", "std", "(", "smoothed_errors", ")", "max_epsilon", "=", "0"...
Helper method for `extract_anomalies` method. Calculates the epsilon (threshold) for anomalies.
[ "Helper", "method", "for", "extract_anomalies", "method", ".", "Calculates", "the", "epsilon", "(", "threshold", ")", "for", "anomalies", "." ]
python
train
Yubico/yubikey-manager
ykman/cli/piv.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/piv.py#L635-L670
def change_pin(ctx, pin, new_pin): """ Change the PIN code. The PIN must be between 6 and 8 characters long, and supports any type of alphanumeric characters. For cross-platform compatibility, numeric digits are recommended. """ controller = ctx.obj['controller'] if not pin: pin = _prompt_pin(ctx, prompt='Enter your current PIN') if not new_pin: new_pin = click.prompt( 'Enter your new PIN', default='', hide_input=True, show_default=False, confirmation_prompt=True, err=True) if not _valid_pin_length(pin): ctx.fail('Current PIN must be between 6 and 8 characters long.') if not _valid_pin_length(new_pin): ctx.fail('New PIN must be between 6 and 8 characters long.') try: controller.change_pin(pin, new_pin) click.echo('New PIN set.') except AuthenticationBlocked as e: logger.debug('PIN is blocked.', exc_info=e) ctx.fail('PIN is blocked.') except WrongPin as e: logger.debug( 'Failed to change PIN, %d tries left', e.tries_left, exc_info=e) ctx.fail('PIN change failed - %d tries left.' % e.tries_left)
[ "def", "change_pin", "(", "ctx", ",", "pin", ",", "new_pin", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "if", "not", "pin", ":", "pin", "=", "_prompt_pin", "(", "ctx", ",", "prompt", "=", "'Enter your current PIN'", ")", ...
Change the PIN code. The PIN must be between 6 and 8 characters long, and supports any type of alphanumeric characters. For cross-platform compatibility, numeric digits are recommended.
[ "Change", "the", "PIN", "code", "." ]
python
train
foremast/foremast
src/foremast/awslambda/cloudwatch_event/destroy_cloudwatch_event/destroy_cloudwatch_event.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/cloudwatch_event/destroy_cloudwatch_event/destroy_cloudwatch_event.py#L26-L45
def destroy_cloudwatch_event(app='', env='dev', region=''): """Destroy Cloudwatch event subscription. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion. """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('events') event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region) for rule in event_rules: cloudwatch_client.remove_targets(Rule=rule, Ids=[app]) return True
[ "def", "destroy_cloudwatch_event", "(", "app", "=", "''", ",", "env", "=", "'dev'", ",", "region", "=", "''", ")", ":", "session", "=", "boto3", ".", "Session", "(", "profile_name", "=", "env", ",", "region_name", "=", "region", ")", "cloudwatch_client", ...
Destroy Cloudwatch event subscription. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion.
[ "Destroy", "Cloudwatch", "event", "subscription", "." ]
python
train
MacHu-GWU/angora-project
angora/text/strtemplate.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/text/strtemplate.py#L98-L102
def box_show(text, width=100, height=3, corner="+", horizontal="-", vertical="|"): """Print a formatted ascii text box. """ print(StrTemplate.box(text=text, width=width, height=height, corner=corner, horizontal=horizontal, vertical=vertical))
[ "def", "box_show", "(", "text", ",", "width", "=", "100", ",", "height", "=", "3", ",", "corner", "=", "\"+\"", ",", "horizontal", "=", "\"-\"", ",", "vertical", "=", "\"|\"", ")", ":", "print", "(", "StrTemplate", ".", "box", "(", "text", "=", "te...
Print a formatted ascii text box.
[ "Print", "a", "formatted", "ascii", "text", "box", "." ]
python
train
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L656-L663
def loadtitlefont(self): """Auxiliary method to load font if not yet done.""" if self.titlefont == None: # print 'the bloody fonts dir is????', fontsdir # print 'pero esto que hace??', os.path.join(fontsdir, "courR18.pil") # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/f2n_fonts/courR18.pil # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts self.titlefont = imft.load_path(os.path.join(fontsdir, "courR18.pil"))
[ "def", "loadtitlefont", "(", "self", ")", ":", "if", "self", ".", "titlefont", "==", "None", ":", "# print 'the bloody fonts dir is????', fontsdir", "# print 'pero esto que hace??', os.path.join(fontsdir, \"courR18.pil\")", "# /home/vital/Workspace/p...
Auxiliary method to load font if not yet done.
[ "Auxiliary", "method", "to", "load", "font", "if", "not", "yet", "done", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L1373-L1379
def refresh_save_all_action(self): """Enable 'Save All' if there are files to be saved""" editorstack = self.get_current_editorstack() if editorstack: state = any(finfo.editor.document().isModified() or finfo.newly_created for finfo in editorstack.data) self.save_all_action.setEnabled(state)
[ "def", "refresh_save_all_action", "(", "self", ")", ":", "editorstack", "=", "self", ".", "get_current_editorstack", "(", ")", "if", "editorstack", ":", "state", "=", "any", "(", "finfo", ".", "editor", ".", "document", "(", ")", ".", "isModified", "(", ")...
Enable 'Save All' if there are files to be saved
[ "Enable", "Save", "All", "if", "there", "are", "files", "to", "be", "saved" ]
python
train
quodlibet/mutagen
mutagen/asf/__init__.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/asf/__init__.py#L228-L251
def load(self, filething): """load(filething) Args: filething (filething) Raises: mutagen.MutagenError """ fileobj = filething.fileobj self.info = ASFInfo() self.tags = ASFTags() self._tags = {} self._header = HeaderObject.parse_full(self, fileobj) for guid in [ContentDescriptionObject.GUID, ExtendedContentDescriptionObject.GUID, MetadataObject.GUID, MetadataLibraryObject.GUID]: self.tags.extend(self._tags.pop(guid, [])) assert not self._tags
[ "def", "load", "(", "self", ",", "filething", ")", ":", "fileobj", "=", "filething", ".", "fileobj", "self", ".", "info", "=", "ASFInfo", "(", ")", "self", ".", "tags", "=", "ASFTags", "(", ")", "self", ".", "_tags", "=", "{", "}", "self", ".", "...
load(filething) Args: filething (filething) Raises: mutagen.MutagenError
[ "load", "(", "filething", ")" ]
python
train
tanghaibao/goatools
goatools/go_enrichment.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L532-L535
def print_results(self, results, min_ratio=None, indent=False, pval=0.05, prt=sys.stdout): """Print GOEA results with some additional statistics calculated.""" results_adj = self.get_adj_records(results, min_ratio, pval) self.print_results_adj(results_adj, indent, prt)
[ "def", "print_results", "(", "self", ",", "results", ",", "min_ratio", "=", "None", ",", "indent", "=", "False", ",", "pval", "=", "0.05", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "results_adj", "=", "self", ".", "get_adj_records", "(", "result...
Print GOEA results with some additional statistics calculated.
[ "Print", "GOEA", "results", "with", "some", "additional", "statistics", "calculated", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/skype.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L818-L830
def SendVoicemail(self, Username): """Sends a voicemail to a specified user. :Parameters: Username : str Skypename of the user. :note: Should return a `Voicemail` object. This is not implemented yet. """ if self._Api.protocol >= 6: self._DoCommand('CALLVOICEMAIL %s' % Username) else: self._DoCommand('VOICEMAIL %s' % Username)
[ "def", "SendVoicemail", "(", "self", ",", "Username", ")", ":", "if", "self", ".", "_Api", ".", "protocol", ">=", "6", ":", "self", ".", "_DoCommand", "(", "'CALLVOICEMAIL %s'", "%", "Username", ")", "else", ":", "self", ".", "_DoCommand", "(", "'VOICEMA...
Sends a voicemail to a specified user. :Parameters: Username : str Skypename of the user. :note: Should return a `Voicemail` object. This is not implemented yet.
[ "Sends", "a", "voicemail", "to", "a", "specified", "user", "." ]
python
train
nigma/django-twilio-sms
src/decorators.py
https://github.com/nigma/django-twilio-sms/blob/386999c3da545e001cb8977c78b67408e33aba11/src/decorators.py#L23-L103
def twilio_view(f): """This decorator provides several helpful shortcuts for writing Twilio views. - It ensures that only requests from Twilio are passed through. This helps protect you from forged requests. - It ensures your view is exempt from CSRF checks via Django's @csrf_exempt decorator. This is necessary for any view that accepts POST requests from outside the local domain (eg: Twilio's servers). - It allows your view to (optionally) return TwiML to pass back to Twilio's servers instead of building a ``HttpResponse`` object manually. - It allows your view to (optionally) return any ``twilio.Verb`` object instead of building a ``HttpResponse`` object manually. Usage:: from twilio.twiml import Response @twilio_view def my_view(request): r = Response() r.sms("Thanks for the SMS message!") return r """ @csrf_exempt @wraps(f) def decorator(request, *args, **kwargs): # Attempt to gather all required information to allow us to check the # incoming HTTP request for forgery. If any of this information is not # available, then we'll throw a HTTP 403 error (forbidden). # Ensure the request method is POST if request.method != "POST": logger.error("Twilio: Expected POST request", extra={"request": request}) return HttpResponseNotAllowed(request.method) if not getattr(settings, "TWILIO_SKIP_SIGNATURE_VALIDATION"): # Validate the request try: validator = RequestValidator(settings.TWILIO_AUTH_TOKEN) url = request.build_absolute_uri() # Ensure the original requested url is tested for validation # Prevents breakage when processed behind a proxy server if "HTTP_X_FORWARDED_SERVER" in request.META: protocol = "https" if request.META["HTTP_X_TWILIO_SSL"] == "Enabled" else "http" url = "{0}://{1}{2}".format( protocol, request.META["HTTP_X_FORWARDED_SERVER"], request.META["REQUEST_URI"] ) signature = request.META["HTTP_X_TWILIO_SIGNATURE"] except (AttributeError, KeyError) as e: logger.exception("Twilio: Missing META param", extra={"request": request}) return HttpResponseForbidden("Missing META param: %s" % e) # Now that we have all the required information to perform forgery # checks, we'll actually do the forgery check. if not validator.validate(url, request.POST, signature): logger.error( "Twilio: Invalid url signature %s - %s - %s", url, request.POST, signature, extra={"request": request} ) return HttpResponseForbidden("Invalid signature") # Run the wrapped view, and capture the data returned. response = f(request, *args, **kwargs) # If the view returns a string (or a ``twilio.Verb`` object), we'll # assume it is XML TwilML data and pass it back with the appropriate # mimetype. We won't check the XML data because that would be too time # consuming for every request. Instead, we'll let the errors pass # through to be dealt with by the developer. if isinstance(response, six.text_type): return HttpResponse(response, mimetype="application/xml") elif isinstance(response, Verb): return HttpResponse(force_text(response), mimetype="application/xml") else: return response return decorator
[ "def", "twilio_view", "(", "f", ")", ":", "@", "csrf_exempt", "@", "wraps", "(", "f", ")", "def", "decorator", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Attempt to gather all required information to allow us to check the", "# incomi...
This decorator provides several helpful shortcuts for writing Twilio views. - It ensures that only requests from Twilio are passed through. This helps protect you from forged requests. - It ensures your view is exempt from CSRF checks via Django's @csrf_exempt decorator. This is necessary for any view that accepts POST requests from outside the local domain (eg: Twilio's servers). - It allows your view to (optionally) return TwiML to pass back to Twilio's servers instead of building a ``HttpResponse`` object manually. - It allows your view to (optionally) return any ``twilio.Verb`` object instead of building a ``HttpResponse`` object manually. Usage:: from twilio.twiml import Response @twilio_view def my_view(request): r = Response() r.sms("Thanks for the SMS message!") return r
[ "This", "decorator", "provides", "several", "helpful", "shortcuts", "for", "writing", "Twilio", "views", "." ]
python
test
Asana/python-asana
asana/resources/attachments.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/attachments.py#L7-L10
def create_on_task(self, task_id, file_content, file_name, file_content_type=None, **options): """Upload an attachment for a task. Accepts a file object or string, file name, and optional file Content-Type""" path = '/tasks/%d/attachments' % (task_id) return self.client.request('post', path, files=[('file', (file_name, file_content, file_content_type))], **options)
[ "def", "create_on_task", "(", "self", ",", "task_id", ",", "file_content", ",", "file_name", ",", "file_content_type", "=", "None", ",", "*", "*", "options", ")", ":", "path", "=", "'/tasks/%d/attachments'", "%", "(", "task_id", ")", "return", "self", ".", ...
Upload an attachment for a task. Accepts a file object or string, file name, and optional file Content-Type
[ "Upload", "an", "attachment", "for", "a", "task", ".", "Accepts", "a", "file", "object", "or", "string", "file", "name", "and", "optional", "file", "Content", "-", "Type" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L38-L49
def system_monitor_fan_alert_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") fan = ET.SubElement(system_monitor, "fan") alert = ET.SubElement(fan, "alert") state = ET.SubElement(alert, "state") state.text = kwargs.pop('state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "system_monitor_fan_alert_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "system_monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"system-monitor\"", ",", "xmlns", "=",...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
amzn/ion-python
amazon/ion/reader_managed.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_managed.py#L40-L52
def resolve(self, token): """Attempts to resolve the :class:`SymbolToken` against the current table. If the ``text`` is not None, the token is returned, otherwise, a token in the table is attempted to be retrieved. If not token is found, then this method will raise. """ if token.text is not None: return token resolved_token = self.symbol_table.get(token.sid, None) if resolved_token is None: raise IonException('Out of range SID: %d' % token.sid) return resolved_token
[ "def", "resolve", "(", "self", ",", "token", ")", ":", "if", "token", ".", "text", "is", "not", "None", ":", "return", "token", "resolved_token", "=", "self", ".", "symbol_table", ".", "get", "(", "token", ".", "sid", ",", "None", ")", "if", "resolve...
Attempts to resolve the :class:`SymbolToken` against the current table. If the ``text`` is not None, the token is returned, otherwise, a token in the table is attempted to be retrieved. If not token is found, then this method will raise.
[ "Attempts", "to", "resolve", "the", ":", "class", ":", "SymbolToken", "against", "the", "current", "table", "." ]
python
train
apache/airflow
airflow/jobs.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2071-L2101
def _task_instances_for_dag_run(self, dag_run, session=None): """ Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session """ tasks_to_run = {} if dag_run is None: return tasks_to_run # check if we have orphaned tasks self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session) # for some reason if we don't refresh the reference to run is lost dag_run.refresh_from_db() make_transient(dag_run) # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf for ti in dag_run.get_task_instances(): # all tasks part of the backfill are scheduled to run if ti.state == State.NONE: ti.set_state(State.SCHEDULED, session=session) if ti.state != State.REMOVED: tasks_to_run[ti.key] = ti return tasks_to_run
[ "def", "_task_instances_for_dag_run", "(", "self", ",", "dag_run", ",", "session", "=", "None", ")", ":", "tasks_to_run", "=", "{", "}", "if", "dag_run", "is", "None", ":", "return", "tasks_to_run", "# check if we have orphaned tasks", "self", ".", "reset_state_fo...
Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session
[ "Returns", "a", "map", "of", "task", "instance", "key", "to", "task", "instance", "object", "for", "the", "tasks", "to", "run", "in", "the", "given", "dag", "run", "." ]
python
test
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L106-L118
def get_vnetwork_hosts_output_vnetwork_hosts_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_hosts = ET.Element("get_vnetwork_hosts") config = get_vnetwork_hosts output = ET.SubElement(get_vnetwork_hosts, "output") vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts") mac = ET.SubElement(vnetwork_hosts, "mac") mac.text = kwargs.pop('mac') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_hosts_output_vnetwork_hosts_mac", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_hosts", "=", "ET", ".", "Element", "(", "\"get_vnetwork_hosts\"", ")", "config", "="...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pantsbuild/pex
pex/util.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/util.py#L135-L142
def zip_hash(cls, zf, prefix=''): """Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.""" prefix_length = len(prefix) names = sorted(name[prefix_length:] for name in zf.namelist() if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/')) def stream_factory(name): return zf.open(prefix + name) return cls._compute_hash(names, stream_factory)
[ "def", "zip_hash", "(", "cls", ",", "zf", ",", "prefix", "=", "''", ")", ":", "prefix_length", "=", "len", "(", "prefix", ")", "names", "=", "sorted", "(", "name", "[", "prefix_length", ":", "]", "for", "name", "in", "zf", ".", "namelist", "(", ")"...
Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.
[ "Return", "the", "hash", "of", "the", "contents", "of", "a", "zipfile", "comparable", "with", "a", "cls", ".", "dir_hash", "." ]
python
train
agoragames/kairos
kairos/timeseries.py
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/timeseries.py#L971-L979
def _condense(self, data): ''' Condense by returning the last real value of the gauge. ''' if data: data = filter(None,data.values()) if data: return data[-1] return None
[ "def", "_condense", "(", "self", ",", "data", ")", ":", "if", "data", ":", "data", "=", "filter", "(", "None", ",", "data", ".", "values", "(", ")", ")", "if", "data", ":", "return", "data", "[", "-", "1", "]", "return", "None" ]
Condense by returning the last real value of the gauge.
[ "Condense", "by", "returning", "the", "last", "real", "value", "of", "the", "gauge", "." ]
python
train
python-visualization/folium
folium/utilities.py
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/utilities.py#L321-L340
def iter_coords(obj): """ Returns all the coordinate tuples from a geometry or feature. """ if isinstance(obj, (tuple, list)): coords = obj elif 'features' in obj: coords = [geom['geometry']['coordinates'] for geom in obj['features']] elif 'geometry' in obj: coords = obj['geometry']['coordinates'] else: coords = obj.get('coordinates', obj) for coord in coords: if isinstance(coord, (float, int)): yield tuple(coords) break else: for f in iter_coords(coord): yield f
[ "def", "iter_coords", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "tuple", ",", "list", ")", ")", ":", "coords", "=", "obj", "elif", "'features'", "in", "obj", ":", "coords", "=", "[", "geom", "[", "'geometry'", "]", "[", "'coor...
Returns all the coordinate tuples from a geometry or feature.
[ "Returns", "all", "the", "coordinate", "tuples", "from", "a", "geometry", "or", "feature", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2055-L2083
def download(self, obj, directory, structure=True): """ Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters. """ if not os.path.isdir(directory): raise exc.FolderNotFound("The directory '%s' does not exist." % directory) obj_name = utils.get_name(obj) path, fname = os.path.split(obj_name) if structure: fullpath = os.path.join(directory, path) if not os.path.exists(fullpath): os.makedirs(fullpath) target = os.path.join(fullpath, fname) else: target = os.path.join(directory, fname) with open(target, "wb") as dl: content = self.fetch(obj) try: dl.write(content) except UnicodeEncodeError: encoding = pyrax.get_encoding() dl.write(content.encode(encoding))
[ "def", "download", "(", "self", ",", "obj", ",", "directory", ",", "structure", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "raise", "exc", ".", "FolderNotFound", "(", "\"The directory '%s' does not exis...
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
[ "Fetches", "the", "object", "from", "storage", "and", "writes", "it", "to", "the", "specified", "directory", ".", "The", "directory", "must", "exist", "before", "calling", "this", "method", "." ]
python
train
Metatab/geoid
geoid/util.py
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/util.py#L40-L51
def isimplify(geoids): """Iteratively simplify until the set stops getting smaller. """ s0 = list(geoids) for i in range(10): s1 = simplify(s0) if len(s1) == len(s0): return s1 s0 = s1
[ "def", "isimplify", "(", "geoids", ")", ":", "s0", "=", "list", "(", "geoids", ")", "for", "i", "in", "range", "(", "10", ")", ":", "s1", "=", "simplify", "(", "s0", ")", "if", "len", "(", "s1", ")", "==", "len", "(", "s0", ")", ":", "return"...
Iteratively simplify until the set stops getting smaller.
[ "Iteratively", "simplify", "until", "the", "set", "stops", "getting", "smaller", "." ]
python
train
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L116-L140
def name_globals(s, remove_params=None): """ Returns a list of the global parameter names. Parameters ---------- s : :class:`peri.states.ImageState` The state to name the globals of. remove_params : Set or None A set of unique additional parameters to remove from the globals list. Returns ------- all_params : list The list of the global parameter names, with each of remove_params removed. """ all_params = s.params for p in s.param_particle(np.arange(s.obj_get_positions().shape[0])): all_params.remove(p) if remove_params is not None: for p in set(remove_params): all_params.remove(p) return all_params
[ "def", "name_globals", "(", "s", ",", "remove_params", "=", "None", ")", ":", "all_params", "=", "s", ".", "params", "for", "p", "in", "s", ".", "param_particle", "(", "np", ".", "arange", "(", "s", ".", "obj_get_positions", "(", ")", ".", "shape", "...
Returns a list of the global parameter names. Parameters ---------- s : :class:`peri.states.ImageState` The state to name the globals of. remove_params : Set or None A set of unique additional parameters to remove from the globals list. Returns ------- all_params : list The list of the global parameter names, with each of remove_params removed.
[ "Returns", "a", "list", "of", "the", "global", "parameter", "names", "." ]
python
valid
ronhanson/python-tbx
tbx/process.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/process.py#L198-L256
def daemonize(umask=0, work_dir="/", max_fd=1024, redirect="/dev/null"): """ When this function is called, the process is daemonized (by forking + killing its parent). It becomes a background task. It is useful to release the console. """ if not redirect: redirect = "/dev/null" if hasattr(os, "devnull"): redirect = os.devnull try: pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) # first child if pid == 0: os.setsid() try: # Fork a second child. pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) # The second child. if pid == 0: os.chdir(work_dir) os.umask(umask) else: # exit first child os._exit(0) else: # Exit parent os._exit(0) #killing inherited file descriptors import resource maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = max_fd # close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ignored pass os.open(redirect, os.O_RDWR) # standard input # Duplicate standard os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) return os.getpid()
[ "def", "daemonize", "(", "umask", "=", "0", ",", "work_dir", "=", "\"/\"", ",", "max_fd", "=", "1024", ",", "redirect", "=", "\"/dev/null\"", ")", ":", "if", "not", "redirect", ":", "redirect", "=", "\"/dev/null\"", "if", "hasattr", "(", "os", ",", "\"...
When this function is called, the process is daemonized (by forking + killing its parent). It becomes a background task. It is useful to release the console.
[ "When", "this", "function", "is", "called", "the", "process", "is", "daemonized", "(", "by", "forking", "+", "killing", "its", "parent", ")", ".", "It", "becomes", "a", "background", "task", ".", "It", "is", "useful", "to", "release", "the", "console", "...
python
train
dmtucker/keysmith
keysmith.py
https://github.com/dmtucker/keysmith/blob/a0d7388e0f4e36baac93bece933b0e8d7b3c6e3c/keysmith.py#L26-L62
def build_parser(parser: argparse.ArgumentParser) -> None: """Build a parser for CLI arguments and options.""" parser.add_argument( '--delimiter', help='a delimiter for the samples (teeth) in the key', default=' ', ) parser.add_argument( '--encoding', help='the encoding of the population file', default='utf-8', ) parser.add_argument( '--nsamples', '-n', help='the number of random samples to take', type=int, default=6, dest='nteeth', ) parser.add_argument( '--population', '-p', help='{0}, or a path to a file of line-delimited items'.format( ', '.join(POPULATIONS.keys()), ), default='/usr/share/dict/words', ) parser.add_argument( '--stats', help='show statistics for the key', default=False, action='store_true', ) parser.add_argument( '--version', action='version', version='%(prog)s {0}'.format(__version__), )
[ "def", "build_parser", "(", "parser", ":", "argparse", ".", "ArgumentParser", ")", "->", "None", ":", "parser", ".", "add_argument", "(", "'--delimiter'", ",", "help", "=", "'a delimiter for the samples (teeth) in the key'", ",", "default", "=", "' '", ",", ")", ...
Build a parser for CLI arguments and options.
[ "Build", "a", "parser", "for", "CLI", "arguments", "and", "options", "." ]
python
train
pypa/pipenv
pipenv/vendor/pathlib2/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1126-L1131
def is_absolute(self): """True if the path is absolute (has both a root and, if applicable, a drive).""" if not self._root: return False return not self._flavour.has_drv or bool(self._drv)
[ "def", "is_absolute", "(", "self", ")", ":", "if", "not", "self", ".", "_root", ":", "return", "False", "return", "not", "self", ".", "_flavour", ".", "has_drv", "or", "bool", "(", "self", ".", "_drv", ")" ]
True if the path is absolute (has both a root and, if applicable, a drive).
[ "True", "if", "the", "path", "is", "absolute", "(", "has", "both", "a", "root", "and", "if", "applicable", "a", "drive", ")", "." ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L1632-L1639
def get_fields(self, db_name, table_name): """ Parameters: - db_name - table_name """ self.send_get_fields(db_name, table_name) return self.recv_get_fields()
[ "def", "get_fields", "(", "self", ",", "db_name", ",", "table_name", ")", ":", "self", ".", "send_get_fields", "(", "db_name", ",", "table_name", ")", "return", "self", ".", "recv_get_fields", "(", ")" ]
Parameters: - db_name - table_name
[ "Parameters", ":", "-", "db_name", "-", "table_name" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/wxhorizon_ui.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/wxhorizon_ui.py#L191-L207
def adjustNorthPointer(self): '''Adjust the position and orientation of the north pointer.''' self.headingNorthText.set_size(self.fontSize) headingRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,0.0,self.heading)+self.axes.transData self.headingNorthText.set_transform(headingRotate) if (self.heading > 90) and (self.heading < 270): headRot = self.heading-180 else: headRot = self.heading self.headingNorthText.set_rotation(headRot) self.headingNorthTri.set_transform(headingRotate) # Adjust if overlapping with heading pointer if (self.heading <= 10.0) or (self.heading >= 350.0): self.headingNorthText.set_text('') else: self.headingNorthText.set_text('N')
[ "def", "adjustNorthPointer", "(", "self", ")", ":", "self", ".", "headingNorthText", ".", "set_size", "(", "self", ".", "fontSize", ")", "headingRotate", "=", "mpl", ".", "transforms", ".", "Affine2D", "(", ")", ".", "rotate_deg_around", "(", "0.0", ",", "...
Adjust the position and orientation of the north pointer.
[ "Adjust", "the", "position", "and", "orientation", "of", "the", "north", "pointer", "." ]
python
train
saltstack/salt
salt/modules/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L1330-L1380
def create_nat_gateway(subnet_id=None, subnet_name=None, allocation_id=None, region=None, key=None, keyid=None, profile=None): ''' Create a NAT Gateway within an existing subnet. If allocation_id is specified, the elastic IP address it references is associated with the gateway. Otherwise, a new allocation_id is created and used. This function requires boto3 to be installed. Returns the nat gateway id if the nat gateway was created and returns False if the nat gateway was not created. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt myminion boto_vpc.create_nat_gateway subnet_name=mysubnet ''' try: if all((subnet_id, subnet_name)): raise SaltInvocationError('Only one of subnet_name or subnet_id may be ' 'provided.') if subnet_name: subnet_id = _get_resource_id('subnet', subnet_name, region=region, key=key, keyid=keyid, profile=profile) if not subnet_id: return {'created': False, 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}} else: if not _get_resource('subnet', resource_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile): return {'created': False, 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_id)}} conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) if not allocation_id: address = conn3.allocate_address(Domain='vpc') allocation_id = address.get('AllocationId') # Have to go to boto3 to create NAT gateway r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id) return {'created': True, 'id': r.get('NatGateway', {}).get('NatGatewayId')} except BotoServerError as e: return {'created': False, 'error': __utils__['boto.get_error'](e)}
[ "def", "create_nat_gateway", "(", "subnet_id", "=", "None", ",", "subnet_name", "=", "None", ",", "allocation_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try"...
Create a NAT Gateway within an existing subnet. If allocation_id is specified, the elastic IP address it references is associated with the gateway. Otherwise, a new allocation_id is created and used. This function requires boto3 to be installed. Returns the nat gateway id if the nat gateway was created and returns False if the nat gateway was not created. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt myminion boto_vpc.create_nat_gateway subnet_name=mysubnet
[ "Create", "a", "NAT", "Gateway", "within", "an", "existing", "subnet", ".", "If", "allocation_id", "is", "specified", "the", "elastic", "IP", "address", "it", "references", "is", "associated", "with", "the", "gateway", ".", "Otherwise", "a", "new", "allocation...
python
train
etscrivner/nose-perfdump
perfdump/models.py
https://github.com/etscrivner/nose-perfdump/blob/a203a68495d30346fab43fb903cb60cd29b17d49/perfdump/models.py#L159-L163
def get_cursor(cls): """Return a message list cursor that returns sqlite3.Row objects""" db = SqliteConnection.get() db.row_factory = sqlite3.Row return db.cursor()
[ "def", "get_cursor", "(", "cls", ")", ":", "db", "=", "SqliteConnection", ".", "get", "(", ")", "db", ".", "row_factory", "=", "sqlite3", ".", "Row", "return", "db", ".", "cursor", "(", ")" ]
Return a message list cursor that returns sqlite3.Row objects
[ "Return", "a", "message", "list", "cursor", "that", "returns", "sqlite3", ".", "Row", "objects" ]
python
train
CalebBell/thermo
thermo/eos.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/eos.py#L1090-L1115
def Yu_Lu(self, T, full=True, quick=True): r'''Method to calculate `a_alpha` and its first and second derivatives according to Yu and Lu (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Four coefficients needed. .. math:: \alpha = 10^{c_{4} \left(- \frac{T}{Tc} + 1\right) \left( \frac{T^{2} c_{3}}{Tc^{2}} + \frac{T c_{2}}{Tc} + c_{1}\right)} References ---------- .. [1] Yu, Jin-Min, and Benjamin C. -Y. Lu. "A Three-Parameter Cubic Equation of State for Asymmetric Mixture Density Calculations." Fluid Phase Equilibria 34, no. 1 (January 1, 1987): 1-19. doi:10.1016/0378-3812(87)85047-1. ''' c1, c2, c3, c4 = self.alpha_function_coeffs T, Tc, a = self.T, self.Tc, self.a a_alpha = a*10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1)) if not full: return a_alpha else: da_alpha_dT = a*(10**(c4*(-T/Tc + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*(c4*(-T/Tc + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*(T**2*c3/Tc**2 + T*c2/Tc + c1)/Tc)*log(10)) d2a_alpha_dT2 = a*(10**(-c4*(T/Tc - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1))*c4*(-4*T*c3/Tc - 2*c2 - 2*c3*(T/Tc - 1) + c4*(T**2*c3/Tc**2 + T*c2/Tc + c1 + (T/Tc - 1)*(2*T*c3/Tc + c2))**2*log(10))*log(10)/Tc**2) return a_alpha, da_alpha_dT, d2a_alpha_dT2
[ "def", "Yu_Lu", "(", "self", ",", "T", ",", "full", "=", "True", ",", "quick", "=", "True", ")", ":", "c1", ",", "c2", ",", "c3", ",", "c4", "=", "self", ".", "alpha_function_coeffs", "T", ",", "Tc", ",", "a", "=", "self", ".", "T", ",", "sel...
r'''Method to calculate `a_alpha` and its first and second derivatives according to Yu and Lu (1987) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Four coefficients needed. .. math:: \alpha = 10^{c_{4} \left(- \frac{T}{Tc} + 1\right) \left( \frac{T^{2} c_{3}}{Tc^{2}} + \frac{T c_{2}}{Tc} + c_{1}\right)} References ---------- .. [1] Yu, Jin-Min, and Benjamin C. -Y. Lu. "A Three-Parameter Cubic Equation of State for Asymmetric Mixture Density Calculations." Fluid Phase Equilibria 34, no. 1 (January 1, 1987): 1-19. doi:10.1016/0378-3812(87)85047-1.
[ "r", "Method", "to", "calculate", "a_alpha", "and", "its", "first", "and", "second", "derivatives", "according", "to", "Yu", "and", "Lu", "(", "1987", ")", "[", "1", "]", "_", ".", "Returns", "a_alpha", "da_alpha_dT", "and", "d2a_alpha_dT2", ".", "See", ...
python
valid
ECRL/ecabc
ecabc/bees.py
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/bees.py#L78-L100
def calculate_positions(self, first_bee_val, second_bee_val, value_range): '''Calculate the new value/position for two given bee values Args: first_bee_val (int or float): value from the first bee second_bee_val (int or float): value from the second bee value_ranges (tuple): "(value type, (min_val, max_val))" for the given value Returns: int or float: new value ''' value = first_bee_val + np.random.uniform(-1, 1) \ * (first_bee_val - second_bee_val) if value_range[0] == 'int': value = int(value) if value > value_range[1][1]: value = value_range[1][1] if value < value_range[1][0]: value = value_range[1][0] return value
[ "def", "calculate_positions", "(", "self", ",", "first_bee_val", ",", "second_bee_val", ",", "value_range", ")", ":", "value", "=", "first_bee_val", "+", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "*", "(", "first_bee_val", "-", "...
Calculate the new value/position for two given bee values Args: first_bee_val (int or float): value from the first bee second_bee_val (int or float): value from the second bee value_ranges (tuple): "(value type, (min_val, max_val))" for the given value Returns: int or float: new value
[ "Calculate", "the", "new", "value", "/", "position", "for", "two", "given", "bee", "values" ]
python
train
saltstack/salt
salt/ext/ipaddress.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L967-L1027
def subnets(self, prefixlen_diff=1, new_prefix=None): """The subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network) """ if self._prefixlen == self._max_prefixlen: yield self return if new_prefix is not None: if new_prefix < self._prefixlen: raise ValueError('new prefix must be longer') if prefixlen_diff != 1: raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = new_prefix - self._prefixlen if prefixlen_diff < 0: raise ValueError('prefix length diff must be > 0') new_prefixlen = self._prefixlen + prefixlen_diff if new_prefixlen > self._max_prefixlen: raise ValueError( 'prefix length diff %d is invalid for netblock %s' % ( new_prefixlen, self)) first = self.__class__('%s/%s' % (self.network_address, self._prefixlen + prefixlen_diff)) yield first current = first while True: broadcast = current.broadcast_address if broadcast == self.broadcast_address: return new_addr = self._address_class(int(broadcast) + 1) current = self.__class__('%s/%s' % (new_addr, new_prefixlen)) yield current
[ "def", "subnets", "(", "self", ",", "prefixlen_diff", "=", "1", ",", "new_prefix", "=", "None", ")", ":", "if", "self", ".", "_prefixlen", "==", "self", ".", "_max_prefixlen", ":", "yield", "self", "return", "if", "new_prefix", "is", "not", "None", ":", ...
The subnets which join to make the current subnet. In the case that self contains only one IP (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 for IPv6), yield an iterator with just ourself. Args: prefixlen_diff: An integer, the amount the prefix length should be increased by. This should not be set if new_prefix is also set. new_prefix: The desired new prefix length. This must be a larger number (smaller prefix) than the existing prefix. This should not be set if prefixlen_diff is also set. Returns: An iterator of IPv(4|6) objects. Raises: ValueError: The prefixlen_diff is too small or too large. OR prefixlen_diff and new_prefix are both set or new_prefix is a smaller number than the current prefix (smaller number means a larger network)
[ "The", "subnets", "which", "join", "to", "make", "the", "current", "subnet", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L327-L341
def delete_build(self, project, build_id): """DeleteBuild. Deletes a build. :param str project: Project ID or project name :param int build_id: The ID of the build. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') self._send(http_method='DELETE', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values)
[ "def", "delete_build", "(", "self", ",", "project", ",", "build_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ...
DeleteBuild. Deletes a build. :param str project: Project ID or project name :param int build_id: The ID of the build.
[ "DeleteBuild", ".", "Deletes", "a", "build", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "build_id", ":", "The", "ID", "of", "the", "build", "." ]
python
train
alecthomas/injector
injector/__init__.py
https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L293-L304
def bind(self, interface, to=None, scope=None): """Bind an interface to an implementation. :param interface: Interface or :func:`Key` to bind. :param to: Instance or class to bind to, or an explicit :class:`Provider` subclass. :param scope: Optional :class:`Scope` in which to bind. """ if type(interface) is type and issubclass(interface, (BaseMappingKey, BaseSequenceKey)): return self.multibind(interface, to, scope=scope) key = BindingKey.create(interface) self._bindings[key] = self.create_binding(interface, to, scope)
[ "def", "bind", "(", "self", ",", "interface", ",", "to", "=", "None", ",", "scope", "=", "None", ")", ":", "if", "type", "(", "interface", ")", "is", "type", "and", "issubclass", "(", "interface", ",", "(", "BaseMappingKey", ",", "BaseSequenceKey", ")"...
Bind an interface to an implementation. :param interface: Interface or :func:`Key` to bind. :param to: Instance or class to bind to, or an explicit :class:`Provider` subclass. :param scope: Optional :class:`Scope` in which to bind.
[ "Bind", "an", "interface", "to", "an", "implementation", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/indices.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/indices.py#L395-L413
def put_alias(self, index, name, body=None, params=None): """ Create an alias for a specific index/indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. :arg name: The name of the alias to be created or updated :arg body: The settings for the alias, such as `routing` or `filter` :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit timeout for the operation """ for param in (index, name): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path(index, "_alias", name), params=params, body=body )
[ "def", "put_alias", "(", "self", ",", "index", ",", "name", ",", "body", "=", "None", ",", "params", "=", "None", ")", ":", "for", "param", "in", "(", "index", ",", "name", ")", ":", "if", "param", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", ...
Create an alias for a specific index/indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. :arg name: The name of the alias to be created or updated :arg body: The settings for the alias, such as `routing` or `filter` :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit timeout for the operation
[ "Create", "an", "alias", "for", "a", "specific", "index", "/", "indices", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "indices", "-", "aliases", ".", ...
python
train
relwell/corenlp-xml-lib
corenlp_xml/document.py
https://github.com/relwell/corenlp-xml-lib/blob/9b0f8c912ba3ecedd34473f74a9f2d033a75baf9/corenlp_xml/document.py#L246-L258
def basic_dependencies(self): """ Accesses basic dependencies from the XML output :getter: Returns the dependency graph for basic dependencies :type: corenlp_xml.dependencies.DependencyGraph """ if self._basic_dependencies is None: deps = self._element.xpath('dependencies[@type="basic-dependencies"]') if len(deps) > 0: self._basic_dependencies = DependencyGraph(deps[0]) return self._basic_dependencies
[ "def", "basic_dependencies", "(", "self", ")", ":", "if", "self", ".", "_basic_dependencies", "is", "None", ":", "deps", "=", "self", ".", "_element", ".", "xpath", "(", "'dependencies[@type=\"basic-dependencies\"]'", ")", "if", "len", "(", "deps", ")", ">", ...
Accesses basic dependencies from the XML output :getter: Returns the dependency graph for basic dependencies :type: corenlp_xml.dependencies.DependencyGraph
[ "Accesses", "basic", "dependencies", "from", "the", "XML", "output" ]
python
train
quantmind/pulsar
pulsar/utils/pylib/events.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/events.py#L156-L172
def copy_many_times_events(self, other): '''Copy :ref:`many times events <many-times-event>` from ``other``. All many times events of ``other`` are copied to this handler provided the events handlers already exist. ''' events = self.events() other_events = other.events() if events and other_events: for name, event in other_events.items(): handlers = event.handlers() if not event.onetime() and handlers: ev = events.get(name) # If the event is available add it if ev: for callback in handlers: ev.bind(callback)
[ "def", "copy_many_times_events", "(", "self", ",", "other", ")", ":", "events", "=", "self", ".", "events", "(", ")", "other_events", "=", "other", ".", "events", "(", ")", "if", "events", "and", "other_events", ":", "for", "name", ",", "event", "in", ...
Copy :ref:`many times events <many-times-event>` from ``other``. All many times events of ``other`` are copied to this handler provided the events handlers already exist.
[ "Copy", ":", "ref", ":", "many", "times", "events", "<many", "-", "times", "-", "event", ">", "from", "other", "." ]
python
train
gwpy/gwpy
gwpy/timeseries/timeseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1581-L1672
def gate(self, tzero=1.0, tpad=0.5, whiten=True, threshold=50., cluster_window=0.5, **whiten_kwargs): """Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show() """ try: from scipy.signal import find_peaks except ImportError as exc: exc.args = ("Must have scipy>=1.1.0 to utilize this method.",) raise # Find points to gate based on a threshold data = self.whiten(**whiten_kwargs) if whiten else self window_samples = cluster_window * data.sample_rate.value gates = find_peaks(abs(data.value), height=threshold, distance=window_samples)[0] out = self.copy() # Iterate over list of indices to gate and apply each one nzero = int(abs(tzero) * self.sample_rate.value) npad = int(abs(tpad) * self.sample_rate.value) half = nzero + npad ntotal = 2 * half for gate in gates: # Set the boundaries for windowed data in the original time series left_idx = max(0, gate - half) right_idx = min(gate + half, len(self.value) - 1) # Choose which part of the window will replace the data # This must be done explicitly for edge cases where a window # overlaps index 0 or the end of the time series left_idx_window = half - (gate - left_idx) right_idx_window = half + (right_idx - gate) window = 1 - planck(ntotal, nleft=npad, nright=npad) window = window[left_idx_window:right_idx_window] out[left_idx:right_idx] *= window return out
[ "def", "gate", "(", "self", ",", "tzero", "=", "1.0", ",", "tpad", "=", "0.5", ",", "whiten", "=", "True", ",", "threshold", "=", "50.", ",", "cluster_window", "=", "0.5", ",", "*", "*", "whiten_kwargs", ")", ":", "try", ":", "from", "scipy", ".", ...
Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show()
[ "Removes", "high", "amplitude", "peaks", "from", "data", "using", "inverse", "Planck", "window", ".", "Points", "will", "be", "discovered", "automatically", "using", "a", "provided", "threshold", "and", "clustered", "within", "a", "provided", "time", "window", "...
python
train
nerdvegas/rez
src/rez/utils/graph_utils.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L153-L198
def prune_graph(graph_str, package_name): """Prune a package graph so it only contains nodes accessible from the given package. Args: graph_str (str): Dot-language graph string. package_name (str): Name of package of interest. Returns: Pruned graph, as a string. """ # find nodes of interest g = read_dot(graph_str) nodes = set() for node, attrs in g.node_attr.iteritems(): attr = [x for x in attrs if x[0] == "label"] if attr: label = attr[0][1] try: req_str = _request_from_label(label) request = PackageRequest(req_str) except PackageRequestError: continue if request.name == package_name: nodes.add(node) if not nodes: raise ValueError("The package %r does not appear in the graph." % package_name) # find nodes upstream from these nodes g_rev = g.reverse() accessible_nodes = set() access = accessibility(g_rev) for node in nodes: nodes_ = access.get(node, []) accessible_nodes |= set(nodes_) # remove inaccessible nodes inaccessible_nodes = set(g.nodes()) - accessible_nodes for node in inaccessible_nodes: g.del_node(node) return write_dot(g)
[ "def", "prune_graph", "(", "graph_str", ",", "package_name", ")", ":", "# find nodes of interest", "g", "=", "read_dot", "(", "graph_str", ")", "nodes", "=", "set", "(", ")", "for", "node", ",", "attrs", "in", "g", ".", "node_attr", ".", "iteritems", "(", ...
Prune a package graph so it only contains nodes accessible from the given package. Args: graph_str (str): Dot-language graph string. package_name (str): Name of package of interest. Returns: Pruned graph, as a string.
[ "Prune", "a", "package", "graph", "so", "it", "only", "contains", "nodes", "accessible", "from", "the", "given", "package", "." ]
python
train
YeoLab/anchor
anchor/infotheory.py
https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L12-L29
def bin_range_strings(bins, fmt=':g'): """Given a list of bins, make a list of strings of those bin ranges Parameters ---------- bins : list_like List of anything, usually values of bin edges Returns ------- bin_ranges : list List of bin ranges >>> bin_range_strings((0, 0.5, 1)) ['0-0.5', '0.5-1'] """ return [('{' + fmt + '}-{' + fmt + '}').format(i, j) for i, j in zip(bins, bins[1:])]
[ "def", "bin_range_strings", "(", "bins", ",", "fmt", "=", "':g'", ")", ":", "return", "[", "(", "'{'", "+", "fmt", "+", "'}-{'", "+", "fmt", "+", "'}'", ")", ".", "format", "(", "i", ",", "j", ")", "for", "i", ",", "j", "in", "zip", "(", "bin...
Given a list of bins, make a list of strings of those bin ranges Parameters ---------- bins : list_like List of anything, usually values of bin edges Returns ------- bin_ranges : list List of bin ranges >>> bin_range_strings((0, 0.5, 1)) ['0-0.5', '0.5-1']
[ "Given", "a", "list", "of", "bins", "make", "a", "list", "of", "strings", "of", "those", "bin", "ranges" ]
python
train
psss/did
did/base.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/base.py#L261-L266
def last_quarter(): """ Return start and end date of this quarter. """ since, until = Date.this_quarter() since = since.date - delta(months=3) until = until.date - delta(months=3) return Date(since), Date(until)
[ "def", "last_quarter", "(", ")", ":", "since", ",", "until", "=", "Date", ".", "this_quarter", "(", ")", "since", "=", "since", ".", "date", "-", "delta", "(", "months", "=", "3", ")", "until", "=", "until", ".", "date", "-", "delta", "(", "months"...
Return start and end date of this quarter.
[ "Return", "start", "and", "end", "date", "of", "this", "quarter", "." ]
python
train