repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
google/grr
grr/server/grr_response_server/timeseries.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/timeseries.py#L73-L89
def FilterRange(self, start_time=None, stop_time=None): """Filter the series to lie between start_time and stop_time. Removes all values of the series which are outside of some time range. Args: start_time: If set, timestamps before start_time will be dropped. stop_time: If set, timestamps at or past stop_time will be dropped. """ start_time = self._NormalizeTime(start_time) stop_time = self._NormalizeTime(stop_time) self.data = [ p for p in self.data if (start_time is None or p[1] >= start_time) and (stop_time is None or p[1] < stop_time) ]
[ "def", "FilterRange", "(", "self", ",", "start_time", "=", "None", ",", "stop_time", "=", "None", ")", ":", "start_time", "=", "self", ".", "_NormalizeTime", "(", "start_time", ")", "stop_time", "=", "self", ".", "_NormalizeTime", "(", "stop_time", ")", "s...
Filter the series to lie between start_time and stop_time. Removes all values of the series which are outside of some time range. Args: start_time: If set, timestamps before start_time will be dropped. stop_time: If set, timestamps at or past stop_time will be dropped.
[ "Filter", "the", "series", "to", "lie", "between", "start_time", "and", "stop_time", "." ]
python
train
ASMfreaK/habitipy
habitipy/api.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L389-L397
def render_docstring(self): """make a nice docstring for ipython""" res = '{{{self.method}}} {self.uri} {self.title}\n'.format(self=self) if self.params: for group, params in self.params.items(): res += '\n' + group + ' params:\n' for param in params.values(): res += param.render_docstring() return res
[ "def", "render_docstring", "(", "self", ")", ":", "res", "=", "'{{{self.method}}} {self.uri} {self.title}\\n'", ".", "format", "(", "self", "=", "self", ")", "if", "self", ".", "params", ":", "for", "group", ",", "params", "in", "self", ".", "params", ".", ...
make a nice docstring for ipython
[ "make", "a", "nice", "docstring", "for", "ipython" ]
python
train
murphy214/berrl
build/lib/berrl/pipegeohash.py
https://github.com/murphy214/berrl/blob/ce4d060cc7db74c32facc538fa1d7030f1a27467/build/lib/berrl/pipegeohash.py#L265-L331
def make_geohash_tables(table,listofprecisions,**kwargs): ''' sort_by - field to sort by for each group return_squares - boolean arg if true returns a list of squares instead of writing out to table ''' return_squares = False sort_by = 'COUNT' # logic for accepting kwarg inputs for key,value in kwargs.iteritems(): if key == 'sort_by': sort_by = value if key == 'return_squares': return_squares = value # getting header header = df2list(table)[0] # getting columns columns = header[10:] # getting original table originaltable = table if not sort_by == 'COUNT': originaltable = originaltable.sort([sort_by],ascending=[0]) listofprecisions = sorted(listofprecisions,reverse=True) # making total table to hold a list of dfs if return_squares == True and listofprecisions[-1] == 8: total_list = [table] elif return_squares == True: total_list = [] for row in listofprecisions: precision = int(row) table = originaltable table['GEOHASH'] = table.GEOHASH.str[:precision] table = table[['GEOHASH','COUNT']+columns].groupby(['GEOHASH'],sort=True).sum() table = table.sort([sort_by],ascending=[0]) table = table.reset_index() newsquares = [header] # iterating through each square here for row in df2list(table)[1:]: # getting points points = get_points_geohash(row[0]) # making new row newrow = [row[0]] + points + row[1:] # appending to newsquares newsquares.append(newrow) # taking newsquares to dataframe table = list2df(newsquares) if return_squares == True: total_list.append(table) else: table.to_csv('squares' + str(precision) + '.csv',index=False) if return_squares == True: return total_list else: print 'Wrote output squares tables to csv files.'
[ "def", "make_geohash_tables", "(", "table", ",", "listofprecisions", ",", "*", "*", "kwargs", ")", ":", "return_squares", "=", "False", "sort_by", "=", "'COUNT'", "# logic for accepting kwarg inputs", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", ...
sort_by - field to sort by for each group return_squares - boolean arg if true returns a list of squares instead of writing out to table
[ "sort_by", "-", "field", "to", "sort", "by", "for", "each", "group", "return_squares", "-", "boolean", "arg", "if", "true", "returns", "a", "list", "of", "squares", "instead", "of", "writing", "out", "to", "table" ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L6299-L6305
def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0)
[ "def", "PrintCategories", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "''", ".", "join", "(", "' %s\\n'", "%", "cat", "for", "cat", "in", "_ERROR_CATEGORIES", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter.
[ "Prints", "a", "list", "of", "all", "the", "error", "-", "categories", "used", "by", "error", "messages", "." ]
python
valid
manahl/arctic
arctic/store/_ndarray_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/_ndarray_store.py#L198-L208
def _fw_pointers_convert_append_to_write(previous_version): """ This method decides whether to convert an append to a full write in order to avoid data integrity errors """ # Switching from ENABLED --> DISABLED/HYBRID when appending can cause integrity errors for subsequent reads: # - Assume the last write was done with ENABLED (segments don't have parent references updated). # - Subsequent appends were done in DISABLED/HYBRID (append segments have parent references). # - Reading with DISABLED won't "see" the first write's segments. prev_fw_config = get_fwptr_config(previous_version) # Convert to a full-write, which force-updates all segments with parent references. return prev_fw_config is FwPointersCfg.ENABLED and ARCTIC_FORWARD_POINTERS_CFG is not FwPointersCfg.ENABLED
[ "def", "_fw_pointers_convert_append_to_write", "(", "previous_version", ")", ":", "# Switching from ENABLED --> DISABLED/HYBRID when appending can cause integrity errors for subsequent reads:", "# - Assume the last write was done with ENABLED (segments don't have parent references updated).", "# ...
This method decides whether to convert an append to a full write in order to avoid data integrity errors
[ "This", "method", "decides", "whether", "to", "convert", "an", "append", "to", "a", "full", "write", "in", "order", "to", "avoid", "data", "integrity", "errors" ]
python
train
mgoral/subconvert
src/subconvert/gui/tools/Synchronizer.py
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/tools/Synchronizer.py#L317-L325
def _findRow(subNo, model): """Finds a row in a given model which has a column with a given number.""" items = model.findItems(str(subNo)) if len(items) == 0: return None if len(items) > 1: raise IndexError("Too many items with sub number %s" % subNo) return items[0].row()
[ "def", "_findRow", "(", "subNo", ",", "model", ")", ":", "items", "=", "model", ".", "findItems", "(", "str", "(", "subNo", ")", ")", "if", "len", "(", "items", ")", "==", "0", ":", "return", "None", "if", "len", "(", "items", ")", ">", "1", ":...
Finds a row in a given model which has a column with a given number.
[ "Finds", "a", "row", "in", "a", "given", "model", "which", "has", "a", "column", "with", "a", "given", "number", "." ]
python
train
BetterWorks/django-anonymizer
anonymizer/replacers.py
https://github.com/BetterWorks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L187-L191
def unique_lorem(anon, obj, field, val): """ Generates a unique paragraph of lorem ipsum text """ return anon.faker.unique_lorem(field=field)
[ "def", "unique_lorem", "(", "anon", ",", "obj", ",", "field", ",", "val", ")", ":", "return", "anon", ".", "faker", ".", "unique_lorem", "(", "field", "=", "field", ")" ]
Generates a unique paragraph of lorem ipsum text
[ "Generates", "a", "unique", "paragraph", "of", "lorem", "ipsum", "text" ]
python
train
SoCo/SoCo
soco/core.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L1892-L1919
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos, update_id=0): """Move a track to a new position within a Sonos Playlist. This is a convenience method for :py:meth:`reorder_sonos_playlist`. Example:: device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1) Args: sonos_playlist (:py:class:`~.soco.data_structures.DidlPlaylistContainer`): Sonos playlist object or the item_id (str) of the Sonos playlist. track (int): **0**-based position of the track to move. The first track is track 0, just like indexing into a Python list. new_pos (int): **0**-based location to move the track. update_id (int): Optional update counter for the object. If left at the default of 0, it will be looked up. Returns: dict: See :py:meth:`reorder_sonos_playlist` Raises: SoCoUPnPException: See :py:meth:`reorder_sonos_playlist` """ return self.reorder_sonos_playlist(sonos_playlist, int(track), int(new_pos), update_id)
[ "def", "move_in_sonos_playlist", "(", "self", ",", "sonos_playlist", ",", "track", ",", "new_pos", ",", "update_id", "=", "0", ")", ":", "return", "self", ".", "reorder_sonos_playlist", "(", "sonos_playlist", ",", "int", "(", "track", ")", ",", "int", "(", ...
Move a track to a new position within a Sonos Playlist. This is a convenience method for :py:meth:`reorder_sonos_playlist`. Example:: device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1) Args: sonos_playlist (:py:class:`~.soco.data_structures.DidlPlaylistContainer`): Sonos playlist object or the item_id (str) of the Sonos playlist. track (int): **0**-based position of the track to move. The first track is track 0, just like indexing into a Python list. new_pos (int): **0**-based location to move the track. update_id (int): Optional update counter for the object. If left at the default of 0, it will be looked up. Returns: dict: See :py:meth:`reorder_sonos_playlist` Raises: SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
[ "Move", "a", "track", "to", "a", "new", "position", "within", "a", "Sonos", "Playlist", ".", "This", "is", "a", "convenience", "method", "for", ":", "py", ":", "meth", ":", "reorder_sonos_playlist", "." ]
python
train
materialsproject/pymatgen
pymatgen/core/surface.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/surface.py#L1834-L1880
def center_slab(slab): """ The goal here is to ensure the center of the slab region is centered close to c=0.5. This makes it easier to find the surface sites and apply operations like doping. There are three cases where the slab in not centered: 1. The slab region is completely between two vacuums in the box but not necessarily centered. We simply shift the slab by the difference in its center of mass and 0.5 along the c direction. 2. The slab completely spills outside the box from the bottom and into the top. This makes it incredibly difficult to locate surface sites. We iterate through all sites that spill over (z>c) and shift all sites such that this specific site is now on the other side. Repeat for all sites with z>c. 3. This is a simpler case of scenario 2. Either the top or bottom slab sites are at c=0 or c=1. Treat as scenario 2. Args: slab (Slab): Slab structure to center Returns: Returns a centered slab structure """ # get a reasonable r cutoff to sample neighbors bdists = sorted([nn[1] for nn in slab.get_neighbors(slab[0], 10) if nn[1] > 0]) r = bdists[0] * 3 all_indices = [i for i, site in enumerate(slab)] # check if structure is case 2 or 3, shift all the # sites up to the other side until it is case 1 for site in slab: if any([nn[1] > slab.lattice.c for nn in slab.get_neighbors(site, r)]): shift = 1 - site.frac_coords[2] + 0.05 slab.translate_sites(all_indices, [0, 0, shift]) # now the slab is case 1, shift the center of mass of the slab to 0.5 weights = [s.species.weight for s in slab] center_of_mass = np.average(slab.frac_coords, weights=weights, axis=0) shift = 0.5 - center_of_mass[2] slab.translate_sites(all_indices, [0, 0, shift]) return slab
[ "def", "center_slab", "(", "slab", ")", ":", "# get a reasonable r cutoff to sample neighbors", "bdists", "=", "sorted", "(", "[", "nn", "[", "1", "]", "for", "nn", "in", "slab", ".", "get_neighbors", "(", "slab", "[", "0", "]", ",", "10", ")", "if", "nn...
The goal here is to ensure the center of the slab region is centered close to c=0.5. This makes it easier to find the surface sites and apply operations like doping. There are three cases where the slab in not centered: 1. The slab region is completely between two vacuums in the box but not necessarily centered. We simply shift the slab by the difference in its center of mass and 0.5 along the c direction. 2. The slab completely spills outside the box from the bottom and into the top. This makes it incredibly difficult to locate surface sites. We iterate through all sites that spill over (z>c) and shift all sites such that this specific site is now on the other side. Repeat for all sites with z>c. 3. This is a simpler case of scenario 2. Either the top or bottom slab sites are at c=0 or c=1. Treat as scenario 2. Args: slab (Slab): Slab structure to center Returns: Returns a centered slab structure
[ "The", "goal", "here", "is", "to", "ensure", "the", "center", "of", "the", "slab", "region", "is", "centered", "close", "to", "c", "=", "0", ".", "5", ".", "This", "makes", "it", "easier", "to", "find", "the", "surface", "sites", "and", "apply", "ope...
python
train
ergo/ziggurat_foundations
ziggurat_foundations/migrations/env.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/migrations/env.py#L74-L95
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine(get_url()) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table="alembic_ziggurat_foundations_version", transaction_per_migration=True, ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
[ "def", "run_migrations_online", "(", ")", ":", "engine", "=", "create_engine", "(", "get_url", "(", ")", ")", "connection", "=", "engine", ".", "connect", "(", ")", "context", ".", "configure", "(", "connection", "=", "connection", ",", "target_metadata", "=...
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
[ "Run", "migrations", "in", "online", "mode", "." ]
python
train
apache/airflow
airflow/hooks/webhdfs_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L56-L79
def get_conn(self): """ Establishes a connection depending on the security mode set via config or environment variable. :return: a hdfscli InsecureClient or KerberosClient object. :rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient """ connections = self.get_connections(self.webhdfs_conn_id) for connection in connections: try: self.log.debug('Trying namenode %s', connection.host) client = self._get_client(connection) client.status('/') self.log.debug('Using namenode %s for hook', connection.host) return client except HdfsError as hdfs_error: self.log.debug('Read operation on namenode %s failed with error: %s', connection.host, hdfs_error) hosts = [connection.host for connection in connections] error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format( hosts='\n'.join(hosts)) raise AirflowWebHDFSHookException(error_message)
[ "def", "get_conn", "(", "self", ")", ":", "connections", "=", "self", ".", "get_connections", "(", "self", ".", "webhdfs_conn_id", ")", "for", "connection", "in", "connections", ":", "try", ":", "self", ".", "log", ".", "debug", "(", "'Trying namenode %s'", ...
Establishes a connection depending on the security mode set via config or environment variable. :return: a hdfscli InsecureClient or KerberosClient object. :rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
[ "Establishes", "a", "connection", "depending", "on", "the", "security", "mode", "set", "via", "config", "or", "environment", "variable", "." ]
python
test
MKLab-ITI/reveal-graph-embedding
reveal_graph_embedding/datautil/score_rw_util.py
https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/score_rw_util.py#L76-L90
def write_average_score_row(fp, score_name, scores): """ Simple utility function that writes an average score row in a file designated by a file pointer. Inputs: - fp: A file pointer. - score_name: What it says on the tin. - scores: An array of average score values corresponding to each of the training set percentages. """ row = "--" + score_name + "--" fp.write(row) for vector in scores: row = list(vector) row = [str(score) for score in row] row = "\n" + "\t".join(row) fp.write(row)
[ "def", "write_average_score_row", "(", "fp", ",", "score_name", ",", "scores", ")", ":", "row", "=", "\"--\"", "+", "score_name", "+", "\"--\"", "fp", ".", "write", "(", "row", ")", "for", "vector", "in", "scores", ":", "row", "=", "list", "(", "vector...
Simple utility function that writes an average score row in a file designated by a file pointer. Inputs: - fp: A file pointer. - score_name: What it says on the tin. - scores: An array of average score values corresponding to each of the training set percentages.
[ "Simple", "utility", "function", "that", "writes", "an", "average", "score", "row", "in", "a", "file", "designated", "by", "a", "file", "pointer", "." ]
python
train
saltstack/salt
salt/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L3809-L3844
def _handle_iorder(self, state): ''' Take a state and apply the iorder system ''' if self.opts['state_auto_order']: for name in state: for s_dec in state[name]: if not isinstance(s_dec, six.string_types): # PyDSL OrderedDict? continue if not isinstance(state[name], dict): # Include's or excludes as lists? continue if not isinstance(state[name][s_dec], list): # Bad syntax, let the verify seq pick it up later on continue found = False if s_dec.startswith('_'): continue for arg in state[name][s_dec]: if isinstance(arg, dict): if arg: if next(six.iterkeys(arg)) == 'order': found = True if not found: if not isinstance(state[name][s_dec], list): # quite certainly a syntax error, managed elsewhere continue state[name][s_dec].append( {'order': self.iorder} ) self.iorder += 1 return state
[ "def", "_handle_iorder", "(", "self", ",", "state", ")", ":", "if", "self", ".", "opts", "[", "'state_auto_order'", "]", ":", "for", "name", "in", "state", ":", "for", "s_dec", "in", "state", "[", "name", "]", ":", "if", "not", "isinstance", "(", "s_...
Take a state and apply the iorder system
[ "Take", "a", "state", "and", "apply", "the", "iorder", "system" ]
python
train
angr/angr
angr/analyses/decompiler/clinic.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/decompiler/clinic.py#L169-L179
def _simplify_block(self, ail_block, stack_pointer_tracker=None): """ Simplify a single AIL block. :param ailment.Block ail_block: The AIL block to simplify. :param stack_pointer_tracker: The RegisterDeltaTracker analysis instance. :return: A simplified AIL block. """ simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker) return simp.result_block
[ "def", "_simplify_block", "(", "self", ",", "ail_block", ",", "stack_pointer_tracker", "=", "None", ")", ":", "simp", "=", "self", ".", "project", ".", "analyses", ".", "AILBlockSimplifier", "(", "ail_block", ",", "stack_pointer_tracker", "=", "stack_pointer_track...
Simplify a single AIL block. :param ailment.Block ail_block: The AIL block to simplify. :param stack_pointer_tracker: The RegisterDeltaTracker analysis instance. :return: A simplified AIL block.
[ "Simplify", "a", "single", "AIL", "block", "." ]
python
train
datastax/python-driver
cassandra/cluster.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L4187-L4206
def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE): """ Fetches and returns the query trace of the last response, or `None` if tracing was not enabled. Note that this may raise an exception if there are problems retrieving the trace details from Cassandra. If the trace is not available after `max_wait`, :exc:`cassandra.query.TraceUnavailable` will be raised. If the ResponseFuture is not done (async execution) and you try to retrieve the trace, :exc:`cassandra.query.TraceUnavailable` will be raised. `query_cl` is the consistency level used to poll the trace tables. """ if self._final_result is _NOT_SET and self._final_exception is None: raise TraceUnavailable( "Trace information was not available. The ResponseFuture is not done.") if self._query_traces: return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl)
[ "def", "get_query_trace", "(", "self", ",", "max_wait", "=", "None", ",", "query_cl", "=", "ConsistencyLevel", ".", "LOCAL_ONE", ")", ":", "if", "self", ".", "_final_result", "is", "_NOT_SET", "and", "self", ".", "_final_exception", "is", "None", ":", "raise...
Fetches and returns the query trace of the last response, or `None` if tracing was not enabled. Note that this may raise an exception if there are problems retrieving the trace details from Cassandra. If the trace is not available after `max_wait`, :exc:`cassandra.query.TraceUnavailable` will be raised. If the ResponseFuture is not done (async execution) and you try to retrieve the trace, :exc:`cassandra.query.TraceUnavailable` will be raised. `query_cl` is the consistency level used to poll the trace tables.
[ "Fetches", "and", "returns", "the", "query", "trace", "of", "the", "last", "response", "or", "None", "if", "tracing", "was", "not", "enabled", "." ]
python
train
intelligenia/modeltranslation
modeltranslation/transcache.py
https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/transcache.py#L34-L37
def _create_key(lang, instance): """Crea la clave única de la caché""" model_name = instance.__class__.__name__ return "{0}__{1}_{2}".format(lang,model_name,instance.id)
[ "def", "_create_key", "(", "lang", ",", "instance", ")", ":", "model_name", "=", "instance", ".", "__class__", ".", "__name__", "return", "\"{0}__{1}_{2}\"", ".", "format", "(", "lang", ",", "model_name", ",", "instance", ".", "id", ")" ]
Crea la clave única de la caché
[ "Crea", "la", "clave", "única", "de", "la", "caché" ]
python
train
mfcloud/python-zvm-sdk
smtLayer/generalUtils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/generalUtils.py#L25-L78
def cvtToBlocks(rh, diskSize): """ Convert a disk storage value to a number of blocks. Input: Request Handle Size of disk in bytes Output: Results structure: overallRC - Overall return code for the function: 0 - Everything went ok 4 - Input validation error rc - Return code causing the return. Same as overallRC. rs - Reason code causing the return. errno - Errno value causing the return. Always zero. Converted value in blocks """ rh.printSysLog("Enter generalUtils.cvtToBlocks") blocks = 0 results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0} blocks = diskSize.strip().upper() lastChar = blocks[-1] if lastChar == 'G' or lastChar == 'M': # Convert the bytes to blocks byteSize = blocks[:-1] if byteSize == '': # The size of the disk is not valid. msg = msgs.msg['0200'][1] % (modId, blocks) rh.printLn("ES", msg) results = msgs.msg['0200'][0] else: try: if lastChar == 'M': blocks = (float(byteSize) * 1024 * 1024) / 512 elif lastChar == 'G': blocks = (float(byteSize) * 1024 * 1024 * 1024) / 512 blocks = str(int(math.ceil(blocks))) except Exception: # Failed to convert to a number of blocks. msg = msgs.msg['0201'][1] % (modId, byteSize) rh.printLn("ES", msg) results = msgs.msg['0201'][0] elif blocks.strip('1234567890'): # Size is not an integer size of blocks. msg = msgs.msg['0202'][1] % (modId, blocks) rh.printLn("ES", msg) results = msgs.msg['0202'][0] rh.printSysLog("Exit generalUtils.cvtToBlocks, rc: " + str(results['overallRC'])) return results, blocks
[ "def", "cvtToBlocks", "(", "rh", ",", "diskSize", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter generalUtils.cvtToBlocks\"", ")", "blocks", "=", "0", "results", "=", "{", "'overallRC'", ":", "0", ",", "'rc'", ":", "0", ",", "'rs'", ":", "0", ",", "...
Convert a disk storage value to a number of blocks. Input: Request Handle Size of disk in bytes Output: Results structure: overallRC - Overall return code for the function: 0 - Everything went ok 4 - Input validation error rc - Return code causing the return. Same as overallRC. rs - Reason code causing the return. errno - Errno value causing the return. Always zero. Converted value in blocks
[ "Convert", "a", "disk", "storage", "value", "to", "a", "number", "of", "blocks", "." ]
python
train
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L99-L111
def _remove_files(files): """ Remove all given files. Args: files (list): List of filenames, which will be removed. """ logger.debug("Request for file removal (_remove_files()).") for fn in files: if os.path.exists(fn): logger.debug("Removing '%s'." % fn) os.remove(fn)
[ "def", "_remove_files", "(", "files", ")", ":", "logger", ".", "debug", "(", "\"Request for file removal (_remove_files()).\"", ")", "for", "fn", "in", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "logger", ".", "debug", "(", ...
Remove all given files. Args: files (list): List of filenames, which will be removed.
[ "Remove", "all", "given", "files", "." ]
python
train
JNRowe/upoints
upoints/nmea.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/nmea.py#L70-L85
def parse_latitude(latitude, hemisphere): """Parse a NMEA-formatted latitude pair. Args: latitude (str): Latitude in DDMM.MMMM hemisphere (str): North or South Returns: float: Decimal representation of latitude """ latitude = int(latitude[:2]) + float(latitude[2:]) / 60 if hemisphere == 'S': latitude = -latitude elif not hemisphere == 'N': raise ValueError('Incorrect North/South value %r' % hemisphere) return latitude
[ "def", "parse_latitude", "(", "latitude", ",", "hemisphere", ")", ":", "latitude", "=", "int", "(", "latitude", "[", ":", "2", "]", ")", "+", "float", "(", "latitude", "[", "2", ":", "]", ")", "/", "60", "if", "hemisphere", "==", "'S'", ":", "latit...
Parse a NMEA-formatted latitude pair. Args: latitude (str): Latitude in DDMM.MMMM hemisphere (str): North or South Returns: float: Decimal representation of latitude
[ "Parse", "a", "NMEA", "-", "formatted", "latitude", "pair", "." ]
python
train
pandas-dev/pandas
pandas/plotting/_misc.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L452-L563
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, use_columns=False, xticks=None, colormap=None, axvlines=True, axvlines_kwds=None, sort_labels=False, **kwds): """Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names cols : list, optional A list of column names to use ax : matplotlib.axis, optional matplotlib axis object color : list or tuple, optional Colors to use for the different classes use_columns : bool, optional If true, columns will be used as xticks xticks : list or tuple, optional A list of values to use for xticks colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines sort_labels : bool, False Sort class_column labels, useful when assigning colors .. versionadded:: 0.20.0 kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlib.axis.Axes` Examples -------- >>> from matplotlib import pyplot as plt >>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master' '/pandas/tests/data/iris.csv') >>> pd.plotting.parallel_coordinates( df, 'Name', color=('#556270', '#4ECDC4', '#C7F464')) >>> plt.show() """ if axvlines_kwds is None: axvlines_kwds = {'linewidth': 1, 'color': 'black'} import matplotlib.pyplot as plt n = len(frame) classes = frame[class_column].drop_duplicates() class_col = frame[class_column] if cols is None: df = frame.drop(class_column, axis=1) else: df = frame[cols] used_legends = set() ncols = len(df.columns) # determine values to use for xticks if use_columns is True: if not np.all(np.isreal(list(df.columns))): raise ValueError('Columns must be numeric to be used as xticks') x = df.columns elif xticks is not None: if not np.all(np.isreal(xticks)): raise ValueError('xticks specified must be numeric') elif len(xticks) != ncols: raise ValueError('Length of xticks must match number of columns') x = xticks else: x = lrange(ncols) if ax is None: ax = plt.gca() color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) if sort_labels: classes = sorted(classes) color_values = sorted(color_values) colors = dict(zip(classes, color_values)) for i in range(n): y = df.iloc[i].values kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(x, y, color=colors[kls], label=label, **kwds) else: ax.plot(x, y, color=colors[kls], **kwds) if axvlines: for i in x: ax.axvline(i, **axvlines_kwds) ax.set_xticks(x) ax.set_xticklabels(df.columns) ax.set_xlim(x[0], x[-1]) ax.legend(loc='upper right') ax.grid() return ax
[ "def", "parallel_coordinates", "(", "frame", ",", "class_column", ",", "cols", "=", "None", ",", "ax", "=", "None", ",", "color", "=", "None", ",", "use_columns", "=", "False", ",", "xticks", "=", "None", ",", "colormap", "=", "None", ",", "axvlines", ...
Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names cols : list, optional A list of column names to use ax : matplotlib.axis, optional matplotlib axis object color : list or tuple, optional Colors to use for the different classes use_columns : bool, optional If true, columns will be used as xticks xticks : list or tuple, optional A list of values to use for xticks colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines sort_labels : bool, False Sort class_column labels, useful when assigning colors .. versionadded:: 0.20.0 kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlib.axis.Axes` Examples -------- >>> from matplotlib import pyplot as plt >>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master' '/pandas/tests/data/iris.csv') >>> pd.plotting.parallel_coordinates( df, 'Name', color=('#556270', '#4ECDC4', '#C7F464')) >>> plt.show()
[ "Parallel", "coordinates", "plotting", "." ]
python
train
pydata/numexpr
numexpr/necompiler.py
https://github.com/pydata/numexpr/blob/364bac13d84524e0e01db892301b2959d822dcff/numexpr/necompiler.py#L273-L306
def stringToExpression(s, types, context): """Given a string, convert it to a tree of ExpressionNode's. """ old_ctx = expressions._context.get_current_context() try: expressions._context.set_new_context(context) # first compile to a code object to determine the names if context.get('truediv', False): flags = __future__.division.compiler_flag else: flags = 0 c = compile(s, '<expr>', 'eval', flags) # make VariableNode's for the names names = {} for name in c.co_names: if name == "None": names[name] = None elif name == "True": names[name] = True elif name == "False": names[name] = False else: t = types.get(name, default_type) names[name] = expressions.VariableNode(name, type_to_kind[t]) names.update(expressions.functions) # now build the expression ex = eval(c, names) if expressions.isConstant(ex): ex = expressions.ConstantNode(ex, expressions.getKind(ex)) elif not isinstance(ex, expressions.ExpressionNode): raise TypeError("unsupported expression type: %s" % type(ex)) finally: expressions._context.set_new_context(old_ctx) return ex
[ "def", "stringToExpression", "(", "s", ",", "types", ",", "context", ")", ":", "old_ctx", "=", "expressions", ".", "_context", ".", "get_current_context", "(", ")", "try", ":", "expressions", ".", "_context", ".", "set_new_context", "(", "context", ")", "# f...
Given a string, convert it to a tree of ExpressionNode's.
[ "Given", "a", "string", "convert", "it", "to", "a", "tree", "of", "ExpressionNode", "s", "." ]
python
train
rcsb/mmtf-python
mmtf/codecs/decoders/decoders.py
https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/codecs/decoders/decoders.py#L1-L14
def run_length_decode(in_array): """A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array""" switch=False out_array=[] for item in in_array: if switch==False: this_item = item switch=True else: switch=False out_array.extend([this_item]*int(item)) return out_array
[ "def", "run_length_decode", "(", "in_array", ")", ":", "switch", "=", "False", "out_array", "=", "[", "]", "for", "item", "in", "in_array", ":", "if", "switch", "==", "False", ":", "this_item", "=", "item", "switch", "=", "True", "else", ":", "switch", ...
A function to run length decode an int array. :param in_array: the input array of integers :return the decoded array
[ "A", "function", "to", "run", "length", "decode", "an", "int", "array", ".", ":", "param", "in_array", ":", "the", "input", "array", "of", "integers", ":", "return", "the", "decoded", "array" ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L107-L113
def set_progress(self, scan_id, progress): """ Sets scan_id scan's progress. """ if progress > 0 and progress <= 100: self.scans_table[scan_id]['progress'] = progress if progress == 100: self.scans_table[scan_id]['end_time'] = int(time.time())
[ "def", "set_progress", "(", "self", ",", "scan_id", ",", "progress", ")", ":", "if", "progress", ">", "0", "and", "progress", "<=", "100", ":", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'progress'", "]", "=", "progress", "if", "progress", ...
Sets scan_id scan's progress.
[ "Sets", "scan_id", "scan", "s", "progress", "." ]
python
train
bwohlberg/sporco
sporco/admm/admm.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/admm.py#L578-L600
def display_start(self): """Set up status display if option selected. NB: this method assumes that the first entry is the iteration count and the last is the rho value. """ if self.opt['Verbose']: # If AutoRho option enabled rho is included in iteration status if self.opt['AutoRho', 'Enabled']: hdrtxt = type(self).hdrtxt() else: hdrtxt = type(self).hdrtxt()[0:-1] # Call utility function to construct status display formatting hdrstr, fmtstr, nsep = common.solve_status_str( hdrtxt, fwdth0=type(self).fwiter, fprec=type(self).fpothr) # Print header and separator strings if self.opt['StatusHeader']: print(hdrstr) print("-" * nsep) else: fmtstr, nsep = '', 0 return fmtstr, nsep
[ "def", "display_start", "(", "self", ")", ":", "if", "self", ".", "opt", "[", "'Verbose'", "]", ":", "# If AutoRho option enabled rho is included in iteration status", "if", "self", ".", "opt", "[", "'AutoRho'", ",", "'Enabled'", "]", ":", "hdrtxt", "=", "type",...
Set up status display if option selected. NB: this method assumes that the first entry is the iteration count and the last is the rho value.
[ "Set", "up", "status", "display", "if", "option", "selected", ".", "NB", ":", "this", "method", "assumes", "that", "the", "first", "entry", "is", "the", "iteration", "count", "and", "the", "last", "is", "the", "rho", "value", "." ]
python
train
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L2597-L2627
def makeThumbnail(cls, inputFile, person, format, smaller): """ Make a thumbnail of a mugshot image and store it on disk. @param inputFile: The image to thumbnail. @type inputFile: C{file} @param person: The person this mugshot thumbnail is associated with. @type person: L{Person} @param format: The format of the data in C{inputFile}. @type format: C{str} (e.g. I{jpeg}) @param smaller: Thumbnails are available in two sizes. if C{smaller} is C{True}, then the thumbnail will be in the smaller of the two sizes. @type smaller: C{bool} @return: path to the thumbnail. @rtype: L{twisted.python.filepath.FilePath} """ dirsegs = ['mugshots', str(person.storeID)] if smaller: dirsegs.insert(1, 'smaller') size = cls.smallerSize else: size = cls.size atomicOutputFile = person.store.newFile(*dirsegs) makeThumbnail(inputFile, atomicOutputFile, size, format) atomicOutputFile.close() return atomicOutputFile.finalpath
[ "def", "makeThumbnail", "(", "cls", ",", "inputFile", ",", "person", ",", "format", ",", "smaller", ")", ":", "dirsegs", "=", "[", "'mugshots'", ",", "str", "(", "person", ".", "storeID", ")", "]", "if", "smaller", ":", "dirsegs", ".", "insert", "(", ...
Make a thumbnail of a mugshot image and store it on disk. @param inputFile: The image to thumbnail. @type inputFile: C{file} @param person: The person this mugshot thumbnail is associated with. @type person: L{Person} @param format: The format of the data in C{inputFile}. @type format: C{str} (e.g. I{jpeg}) @param smaller: Thumbnails are available in two sizes. if C{smaller} is C{True}, then the thumbnail will be in the smaller of the two sizes. @type smaller: C{bool} @return: path to the thumbnail. @rtype: L{twisted.python.filepath.FilePath}
[ "Make", "a", "thumbnail", "of", "a", "mugshot", "image", "and", "store", "it", "on", "disk", "." ]
python
train
SheffieldML/GPy
GPy/kern/src/todo/poly.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/poly.py#L102-L109
def gradients_X(self, dL_dK, X, X2, target): """Derivative of the covariance matrix with respect to X""" self._K_computations(X, X2) arg = self._K_poly_arg if X2 is None: target += 2*self.weight_variance*self.degree*self.variance*(((X[None,:, :])) *(arg**(self.degree-1))[:, :, None]*dL_dK[:, :, None]).sum(1) else: target += self.weight_variance*self.degree*self.variance*(((X2[None,:, :])) *(arg**(self.degree-1))[:, :, None]*dL_dK[:, :, None]).sum(1)
[ "def", "gradients_X", "(", "self", ",", "dL_dK", ",", "X", ",", "X2", ",", "target", ")", ":", "self", ".", "_K_computations", "(", "X", ",", "X2", ")", "arg", "=", "self", ".", "_K_poly_arg", "if", "X2", "is", "None", ":", "target", "+=", "2", "...
Derivative of the covariance matrix with respect to X
[ "Derivative", "of", "the", "covariance", "matrix", "with", "respect", "to", "X" ]
python
train
openearth/mmi-python
mmi/runner.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/runner.py#L329-L381
def create_sockets(self): """create zmq sockets""" ports = self.ports context = zmq.Context() poller = zmq.Poller() # Socket to handle init data rep = context.socket(zmq.REP) # this was inconsequent: here REQ is for the client, we reply with REP. # PULL and PUB is seen from here, not from the client. # Is now renamed to PUSH and SUB: everything is seen from outside. if "REQ" in ports: rep.bind( "tcp://*:{port}".format(port=ports["REQ"]) ) else: ports["REQ"] = rep.bind_to_random_port( "tcp://*" ) pull = context.socket(zmq.PULL) if "PUSH" in ports: pull.bind( "tcp://*:{port}".format(port=ports["PUSH"]) ) else: ports["PUSH"] = pull.bind_to_random_port( "tcp://*" ) # for sending model messages pub = context.socket(zmq.PUB) if "SUB" in ports: pub.bind( "tcp://*:{port}".format(port=ports["SUB"]) ) else: ports["SUB"] = pub.bind_to_random_port( "tcp://*" ) poller.register(rep, zmq.POLLIN) poller.register(pull, zmq.POLLIN) sockets = dict( poller=poller, rep=rep, pull=pull, pub=pub ) return sockets
[ "def", "create_sockets", "(", "self", ")", ":", "ports", "=", "self", ".", "ports", "context", "=", "zmq", ".", "Context", "(", ")", "poller", "=", "zmq", ".", "Poller", "(", ")", "# Socket to handle init data", "rep", "=", "context", ".", "socket", "(",...
create zmq sockets
[ "create", "zmq", "sockets" ]
python
train
f3at/feat
src/feat/models/model.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/model.py#L1227-L1238
def initiate(self): """If the returned deferred is fired with None, the item will be disabled as if did not exists.""" if not self.model.officer.is_item_allowed(self.model, self._name): return defer.succeed(None) if not callable(self._enabled): d = defer.succeed(self._enabled) else: context = self.model.make_context(key=self.name) d = self._enabled(None, context) return d.addCallback(lambda f: self if f else None)
[ "def", "initiate", "(", "self", ")", ":", "if", "not", "self", ".", "model", ".", "officer", ".", "is_item_allowed", "(", "self", ".", "model", ",", "self", ".", "_name", ")", ":", "return", "defer", ".", "succeed", "(", "None", ")", "if", "not", "...
If the returned deferred is fired with None, the item will be disabled as if did not exists.
[ "If", "the", "returned", "deferred", "is", "fired", "with", "None", "the", "item", "will", "be", "disabled", "as", "if", "did", "not", "exists", "." ]
python
train
oanda/v20-python
src/v20/account.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/account.py#L1684-L1828
def configure( self, accountID, **kwargs ): """ Set the client-configurable portions of an Account. Args: accountID: Account Identifier alias: Client-defined alias (name) for the Account marginRate: The string representation of a decimal number. Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'PATCH', '/v3/accounts/{accountID}/configuration' ) request.set_path_param( 'accountID', accountID ) body = EntityDict() if 'alias' in kwargs: body.set('alias', kwargs['alias']) if 'marginRate' in kwargs: body.set('marginRate', kwargs['marginRate']) request.set_body_dict(body.dict) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('clientConfigureTransaction') is not None: parsed_body['clientConfigureTransaction'] = \ self.ctx.transaction.ClientConfigureTransaction.from_dict( jbody['clientConfigureTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
[ "def", "configure", "(", "self", ",", "accountID", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'PATCH'", ",", "'/v3/accounts/{accountID}/configuration'", ")", "request", ".", "set_path_param", "(", "'accountID'", ",", "accountID", ")", ...
Set the client-configurable portions of an Account. Args: accountID: Account Identifier alias: Client-defined alias (name) for the Account marginRate: The string representation of a decimal number. Returns: v20.response.Response containing the results from submitting the request
[ "Set", "the", "client", "-", "configurable", "portions", "of", "an", "Account", "." ]
python
train
log2timeline/plaso
plaso/analysis/tagging.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/tagging.py#L35-L56
def _AttemptAutoDetectTagFile(self, analysis_mediator): """Detects which tag file is most appropriate. Args: analysis_mediator (AnalysisMediator): analysis mediator. Returns: bool: True if a tag file is autodetected. """ self._autodetect_tag_file_attempt = True if not analysis_mediator.data_location: return False operating_system = analysis_mediator.operating_system.lower() filename = self._OS_TAG_FILES.get(operating_system, None) if not filename: return False logger.info('Using auto detected tag file: {0:s}'.format(filename)) tag_file_path = os.path.join(analysis_mediator.data_location, filename) self.SetAndLoadTagFile(tag_file_path) return True
[ "def", "_AttemptAutoDetectTagFile", "(", "self", ",", "analysis_mediator", ")", ":", "self", ".", "_autodetect_tag_file_attempt", "=", "True", "if", "not", "analysis_mediator", ".", "data_location", ":", "return", "False", "operating_system", "=", "analysis_mediator", ...
Detects which tag file is most appropriate. Args: analysis_mediator (AnalysisMediator): analysis mediator. Returns: bool: True if a tag file is autodetected.
[ "Detects", "which", "tag", "file", "is", "most", "appropriate", "." ]
python
train
asweigart/pytweening
pytweening/__init__.py
https://github.com/asweigart/pytweening/blob/20d74368e53dc7d0f77c810b624b2c90994f099d/pytweening/__init__.py#L498-L508
def easeInBack(n, s=1.70158): """A tween function that backs up first at the start and then goes to the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """ _checkRange(n) return n * n * ((s + 1) * n - s)
[ "def", "easeInBack", "(", "n", ",", "s", "=", "1.70158", ")", ":", "_checkRange", "(", "n", ")", "return", "n", "*", "n", "*", "(", "(", "s", "+", "1", ")", "*", "n", "-", "s", ")" ]
A tween function that backs up first at the start and then goes to the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
[ "A", "tween", "function", "that", "backs", "up", "first", "at", "the", "start", "and", "then", "goes", "to", "the", "destination", "." ]
python
train
bwengals/ccsnmultivar
ccsnmultivar/designmatrix.py
https://github.com/bwengals/ccsnmultivar/blob/dbadf52e728e0ce922cbc147864e693c2c2d305c/ccsnmultivar/designmatrix.py#L273-L339
def _parse_formula(formula): """ Parse formula into a dictionary formula_dict[variable_name] = [encoding, dropped_name] Parse interactions into a list inter_list = [[A,B], [A,C], [A,B,C]] formula = "A + beta + A*beta | Dev(A,drop=1), Poly(beta,degree=3)" """ #TODO: DEAL WITH BSPLINE HAS MULTIPLE ARGUEMENTS (NOT JUST ONE) # break formula apart from encoding instructions formula,instr = formula.replace(' ','').split('|') # break formula apart at + sign formula_terms = formula.split('+') # examine the instructions term, first split by ), instr = instr.split('),') # elements in the instr list will match the number of non interaction # elements in formula # go through each formula term, making a dictionary whose key is variable name formula_dict = {} encoding = [] other_arg = [] inter_list = [] for term in formula_terms: if "*" in term: # then this is an interaction term, make 'list of lists' term = term.split('*') inter_list.append(term) else: # then this is not an interaction term, make blank dictionary formula_dict[term] = [encoding, other_arg] # loop through instructions, parse each term for term in instr: # remove punctuation in term term = re.sub('[()]','',term) # check for each encoding type if "Dev" in term: # remove first three letters (Dev) term = term[3:] # split on comma var_name,arg = term.split(',') # split on equals sign in arg, take part after drop_name = arg.split('=')[1] # put var_name and drop_name into proper key in formula_dict formula_dict[var_name] = ["Dev",drop_name] elif "Dum" in term: # remove first three letters (Dum) term = term[3:] # split on comma var_name,arg = term.split(',') # split on equals sign in arg, take part after ref_name = arg.split('=')[1] # put var_name and drop_name into proper key in formula_dict formula_dict[var_name] = ["Dum",ref_name] elif "Poly" in term: # remove first four letters (Poly) term = term[4:] # split on comma var_name,arg = term.split(',') # split on equals sign in arg, take part after degree = arg.split('=')[1] # put var_name and drop_name into proper key in formula_dict formula_dict[var_name] = ["Poly",degree] else: raise Exception("Unknown Encoding") return formula_dict,inter_list
[ "def", "_parse_formula", "(", "formula", ")", ":", "#TODO: DEAL WITH BSPLINE HAS MULTIPLE ARGUEMENTS (NOT JUST ONE)", "# break formula apart from encoding instructions", "formula", ",", "instr", "=", "formula", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "("...
Parse formula into a dictionary formula_dict[variable_name] = [encoding, dropped_name] Parse interactions into a list inter_list = [[A,B], [A,C], [A,B,C]] formula = "A + beta + A*beta | Dev(A,drop=1), Poly(beta,degree=3)"
[ "Parse", "formula", "into", "a", "dictionary", "formula_dict", "[", "variable_name", "]", "=", "[", "encoding", "dropped_name", "]", "Parse", "interactions", "into", "a", "list", "inter_list", "=", "[[", "A", "B", "]", "[", "A", "C", "]", "[", "A", "B", ...
python
train
cimatosa/progression
progression/terminal.py
https://github.com/cimatosa/progression/blob/82cf74a25a47f9bda96157cc2c88e5975c20b41d/progression/terminal.py#L95-L131
def terminal_reserve(progress_obj, terminal_obj=None, identifier=None): """ Registers the terminal (stdout) for printing. Useful to prevent multiple processes from writing progress bars to stdout. One process (server) prints to stdout and a couple of subprocesses do not print to the same stdout, because the server has reserved it. Of course, the clients have to be nice and check with terminal_reserve first if they should (not) print. Nothing is locked. Returns ------- True if reservation was successful (or if we have already reserved this tty), False if there already is a reservation from another instance. """ if terminal_obj is None: terminal_obj = sys.stdout if identifier is None: identifier = '' if terminal_obj in TERMINAL_RESERVATION: # terminal was already registered log.debug("this terminal %s has already been added to reservation list", terminal_obj) if TERMINAL_RESERVATION[terminal_obj] is progress_obj: log.debug("we %s have already reserved this terminal %s", progress_obj, terminal_obj) return True else: log.debug("someone else %s has already reserved this terminal %s", TERMINAL_RESERVATION[terminal_obj], terminal_obj) return False else: # terminal not yet registered log.debug("terminal %s was reserved for us %s", terminal_obj, progress_obj) TERMINAL_RESERVATION[terminal_obj] = progress_obj return True
[ "def", "terminal_reserve", "(", "progress_obj", ",", "terminal_obj", "=", "None", ",", "identifier", "=", "None", ")", ":", "if", "terminal_obj", "is", "None", ":", "terminal_obj", "=", "sys", ".", "stdout", "if", "identifier", "is", "None", ":", "identifier...
Registers the terminal (stdout) for printing. Useful to prevent multiple processes from writing progress bars to stdout. One process (server) prints to stdout and a couple of subprocesses do not print to the same stdout, because the server has reserved it. Of course, the clients have to be nice and check with terminal_reserve first if they should (not) print. Nothing is locked. Returns ------- True if reservation was successful (or if we have already reserved this tty), False if there already is a reservation from another instance.
[ "Registers", "the", "terminal", "(", "stdout", ")", "for", "printing", "." ]
python
train
nickoala/telepot
telepot/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L890-L901
def editMessageCaption(self, msg_identifier, caption=None, parse_mode=None, reply_markup=None): """ See: https://core.telegram.org/bots/api#editmessagecaption :param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText` """ p = _strip(locals(), more=['msg_identifier']) p.update(_dismantle_message_identifier(msg_identifier)) return self._api_request('editMessageCaption', _rectify(p))
[ "def", "editMessageCaption", "(", "self", ",", "msg_identifier", ",", "caption", "=", "None", ",", "parse_mode", "=", "None", ",", "reply_markup", "=", "None", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'msg_identifier...
See: https://core.telegram.org/bots/api#editmessagecaption :param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#editmessagecaption" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_machine_tree.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machine_tree.py#L300-L336
def update(self, changed_state_model=None, with_expand=False): """Checks if all states are in tree and if tree has states which were deleted :param changed_state_model: Model that row has to be updated :param with_expand: The expand flag for the tree """ if not self.view_is_registered: return # define initial state-model for update if changed_state_model is None: # reset all parent_row_iter = None self.state_row_iter_dict_by_state_path.clear() self.tree_store.clear() if self._selected_sm_model: changed_state_model = self._selected_sm_model.root_state else: return else: # pick if changed_state_model.state.is_root_state: parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()] else: if changed_state_model.state.is_root_state_of_library: # because either lib-state or lib-state-root is in tree the next higher hierarchy state is updated changed_upper_state_m = changed_state_model.parent.parent else: changed_upper_state_m = changed_state_model.parent # TODO check the work around of the next 2 lines while refactoring -> it is a check to be more robust while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path: # show Warning because because avoided method states_update logger.warning("Take a parent state because this is not in.") changed_upper_state_m = changed_upper_state_m.parent parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()] # do recursive update self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand)
[ "def", "update", "(", "self", ",", "changed_state_model", "=", "None", ",", "with_expand", "=", "False", ")", ":", "if", "not", "self", ".", "view_is_registered", ":", "return", "# define initial state-model for update", "if", "changed_state_model", "is", "None", ...
Checks if all states are in tree and if tree has states which were deleted :param changed_state_model: Model that row has to be updated :param with_expand: The expand flag for the tree
[ "Checks", "if", "all", "states", "are", "in", "tree", "and", "if", "tree", "has", "states", "which", "were", "deleted" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L599-L603
def channels_voice_agent_ticket_display_create(self, agent_id, ticket_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/voice-api/partner_edition#open-ticket-in-agents-browser" api_path = "/api/v2/channels/voice/agents/{agent_id}/tickets/{ticket_id}/display.json" api_path = api_path.format(agent_id=agent_id, ticket_id=ticket_id) return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "channels_voice_agent_ticket_display_create", "(", "self", ",", "agent_id", ",", "ticket_id", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/channels/voice/agents/{agent_id}/tickets/{ticket_id}/display.json\"", "api_path", "=", "api_path"...
https://developer.zendesk.com/rest_api/docs/voice-api/partner_edition#open-ticket-in-agents-browser
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "voice", "-", "api", "/", "partner_edition#open", "-", "ticket", "-", "in", "-", "agents", "-", "browser" ]
python
train
box/rotunicode
rotunicode/rotunicode.py
https://github.com/box/rotunicode/blob/6149b6bb5bb50d322db248acfdb910dc3cb1bcc2/rotunicode/rotunicode.py#L50-L73
def encode(cls, string, errors='strict'): """Return the encoded version of a string. :param string: The input string to encode. :type string: `basestring` :param errors: The error handling scheme. Only 'strict' is supported. :type errors: `basestring` :return: Tuple of encoded string and number of input bytes consumed. :rtype: `tuple` (`unicode`, `int`) """ if errors != 'strict': raise UnicodeError('Unsupported error handling {0}'.format(errors)) unicode_string = cls._ensure_unicode_string(string) encoded = unicode_string.translate(cls._encoding_table) return encoded, len(string)
[ "def", "encode", "(", "cls", ",", "string", ",", "errors", "=", "'strict'", ")", ":", "if", "errors", "!=", "'strict'", ":", "raise", "UnicodeError", "(", "'Unsupported error handling {0}'", ".", "format", "(", "errors", ")", ")", "unicode_string", "=", "cls...
Return the encoded version of a string. :param string: The input string to encode. :type string: `basestring` :param errors: The error handling scheme. Only 'strict' is supported. :type errors: `basestring` :return: Tuple of encoded string and number of input bytes consumed. :rtype: `tuple` (`unicode`, `int`)
[ "Return", "the", "encoded", "version", "of", "a", "string", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/Plots/output_plots.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Plots/output_plots.py#L639-L655
def plot_main(self, with_caustics=False, image_names=False): """ print the main plots together in a joint frame :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) self.data_plot(ax=axes[0, 0]) self.model_plot(ax=axes[0, 1], image_names=True) self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6) self.source_plot(ax=axes[1, 0], deltaPix_source=0.01, numPix=100, with_caustics=with_caustics) self.convergence_plot(ax=axes[1, 1], v_max=1) self.magnification_plot(ax=axes[1, 2]) f.tight_layout() f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) return f, axes
[ "def", "plot_main", "(", "self", ",", "with_caustics", "=", "False", ",", "image_names", "=", "False", ")", ":", "f", ",", "axes", "=", "plt", ".", "subplots", "(", "2", ",", "3", ",", "figsize", "=", "(", "16", ",", "8", ")", ")", "self", ".", ...
print the main plots together in a joint frame :return:
[ "print", "the", "main", "plots", "together", "in", "a", "joint", "frame" ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/openephys.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/openephys.py#L194-L219
def _read_date(settings_file): """Get the data from the settings.xml file Parameters ---------- settings_file : Path path to settings.xml inside open-ephys folder Returns ------- datetime start time of the recordings Notes ----- The start time is present in the header of each file. This might be useful if 'settings.xml' is not present. """ root = ElementTree.parse(settings_file).getroot() for e0 in root: if e0.tag == 'INFO': for e1 in e0: if e1.tag == 'DATE': break return datetime.strptime(e1.text, '%d %b %Y %H:%M:%S')
[ "def", "_read_date", "(", "settings_file", ")", ":", "root", "=", "ElementTree", ".", "parse", "(", "settings_file", ")", ".", "getroot", "(", ")", "for", "e0", "in", "root", ":", "if", "e0", ".", "tag", "==", "'INFO'", ":", "for", "e1", "in", "e0", ...
Get the data from the settings.xml file Parameters ---------- settings_file : Path path to settings.xml inside open-ephys folder Returns ------- datetime start time of the recordings Notes ----- The start time is present in the header of each file. This might be useful if 'settings.xml' is not present.
[ "Get", "the", "data", "from", "the", "settings", ".", "xml", "file" ]
python
train
devricks/soft_drf
soft_drf/auth/utilities.py
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/auth/utilities.py#L13-L27
def jwt_get_secret_key(payload=None): """ For enchanced security you may use secret key on user itself. This way you have an option to logout only this user if: - token is compromised - password is changed - etc. """ User = get_user_model() # noqa if api_settings.JWT_GET_USER_SECRET_KEY: user = User.objects.get(pk=payload.get('user_id')) key = str(api_settings.JWT_GET_USER_SECRET_KEY(user)) return key return api_settings.JWT_SECRET_KEY
[ "def", "jwt_get_secret_key", "(", "payload", "=", "None", ")", ":", "User", "=", "get_user_model", "(", ")", "# noqa", "if", "api_settings", ".", "JWT_GET_USER_SECRET_KEY", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "pk", "=", "payload", "....
For enchanced security you may use secret key on user itself. This way you have an option to logout only this user if: - token is compromised - password is changed - etc.
[ "For", "enchanced", "security", "you", "may", "use", "secret", "key", "on", "user", "itself", ".", "This", "way", "you", "have", "an", "option", "to", "logout", "only", "this", "user", "if", ":", "-", "token", "is", "compromised", "-", "password", "is", ...
python
train
zhanglab/psamm
psamm/datasource/native.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/native.py#L547-L614
def create_metabolic_model(self): """Create a :class:`psamm.metabolicmodel.MetabolicModel`.""" def _translate_compartments(reaction, compartment): """Translate compound with missing compartments. These compounds will have the specified compartment in the output. """ left = (((c.in_compartment(compartment), v) if c.compartment is None else (c, v)) for c, v in reaction.left) right = (((c.in_compartment(compartment), v) if c.compartment is None else (c, v)) for c, v in reaction.right) return Reaction(reaction.direction, left, right) # Create metabolic model database = DictDatabase() for reaction in self.reactions: if reaction.equation is not None: equation = _translate_compartments( reaction.equation, self.default_compartment) database.set_reaction(reaction.id, equation) undefined_compartments = set() undefined_compounds = set() extracellular_compounds = set() extracellular = self.extracellular_compartment for reaction in database.reactions: for compound, _ in database.get_reaction_values(reaction): if compound.name not in self.compounds: undefined_compounds.add(compound.name) if compound.compartment == extracellular: extracellular_compounds.add(compound.name) if compound.compartment not in self.compartments: undefined_compartments.add(compound.compartment) for compartment in sorted(undefined_compartments): logger.warning( 'The compartment {} was not defined in the list' ' of compartments'.format(compartment)) for compound in sorted(undefined_compounds): logger.warning( 'The compound {} was not defined in the list' ' of compounds'.format(compound)) exchange_compounds = set() for exchange_compound in self.exchange: if exchange_compound.compartment == extracellular: exchange_compounds.add(exchange_compound.name) for compound in sorted(extracellular_compounds - exchange_compounds): logger.warning( 'The compound {} was in the extracellular compartment' ' but not defined in the exchange compounds'.format(compound)) for compound in sorted(exchange_compounds - extracellular_compounds): logger.warning( 'The compound {} was defined in the exchange compounds but' ' is not in the extracellular compartment'.format(compound)) model_definition = None if len(self.model) > 0: model_definition = self.model return MetabolicModel.load_model( database, model_definition, itervalues(self.exchange), itervalues(self.limits), v_max=self.default_flux_limit)
[ "def", "create_metabolic_model", "(", "self", ")", ":", "def", "_translate_compartments", "(", "reaction", ",", "compartment", ")", ":", "\"\"\"Translate compound with missing compartments.\n\n These compounds will have the specified compartment in the output.\n \"\...
Create a :class:`psamm.metabolicmodel.MetabolicModel`.
[ "Create", "a", ":", "class", ":", "psamm", ".", "metabolicmodel", ".", "MetabolicModel", "." ]
python
train
pytorch/text
torchtext/datasets/imdb.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/datasets/imdb.py#L40-L55
def splits(cls, text_field, label_field, root='.data', train='train', test='test', **kwargs): """Create dataset objects for splits of the IMDB dataset. Arguments: text_field: The field that will be used for the sentence. label_field: The field that will be used for label data. root: Root dataset storage directory. Default is '.data'. train: The directory that contains the training examples test: The directory that contains the test examples Remaining keyword arguments: Passed to the splits method of Dataset. """ return super(IMDB, cls).splits( root=root, text_field=text_field, label_field=label_field, train=train, validation=None, test=test, **kwargs)
[ "def", "splits", "(", "cls", ",", "text_field", ",", "label_field", ",", "root", "=", "'.data'", ",", "train", "=", "'train'", ",", "test", "=", "'test'", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "IMDB", ",", "cls", ")", ".", "spl...
Create dataset objects for splits of the IMDB dataset. Arguments: text_field: The field that will be used for the sentence. label_field: The field that will be used for label data. root: Root dataset storage directory. Default is '.data'. train: The directory that contains the training examples test: The directory that contains the test examples Remaining keyword arguments: Passed to the splits method of Dataset.
[ "Create", "dataset", "objects", "for", "splits", "of", "the", "IMDB", "dataset", "." ]
python
train
spyder-ide/spyder
spyder/preferences/layoutdialog.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/layoutdialog.py#L156-L161
def check_text(self, text): """Disable empty layout name possibility""" if to_text_string(text) == u'': self.button_ok.setEnabled(False) else: self.button_ok.setEnabled(True)
[ "def", "check_text", "(", "self", ",", "text", ")", ":", "if", "to_text_string", "(", "text", ")", "==", "u''", ":", "self", ".", "button_ok", ".", "setEnabled", "(", "False", ")", "else", ":", "self", ".", "button_ok", ".", "setEnabled", "(", "True", ...
Disable empty layout name possibility
[ "Disable", "empty", "layout", "name", "possibility" ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L4328-L4352
def next_available_name(self, fmt_str='arr{0}', counter=None): """Create a new array out of the given format string Parameters ---------- format_str: str The base string to use. ``'{0}'`` will be replaced by a counter counter: iterable An iterable where the numbers should be drawn from. If None, ``range(100)`` is used Returns ------- str A possible name that is not in the current project""" names = self.arr_names counter = counter or iter(range(1000)) try: new_name = next( filter(lambda n: n not in names, map(fmt_str.format, counter))) except StopIteration: raise ValueError( "{0} already in the list".format(fmt_str)) return new_name
[ "def", "next_available_name", "(", "self", ",", "fmt_str", "=", "'arr{0}'", ",", "counter", "=", "None", ")", ":", "names", "=", "self", ".", "arr_names", "counter", "=", "counter", "or", "iter", "(", "range", "(", "1000", ")", ")", "try", ":", "new_na...
Create a new array out of the given format string Parameters ---------- format_str: str The base string to use. ``'{0}'`` will be replaced by a counter counter: iterable An iterable where the numbers should be drawn from. If None, ``range(100)`` is used Returns ------- str A possible name that is not in the current project
[ "Create", "a", "new", "array", "out", "of", "the", "given", "format", "string" ]
python
train
opennode/waldur-core
waldur_core/cost_tracking/handlers.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/cost_tracking/handlers.py#L16-L35
def scope_deletion(sender, instance, **kwargs): """ Run different actions on price estimate scope deletion. If scope is a customer - delete all customer estimates and their children. If scope is a deleted resource - redefine consumption details, recalculate ancestors estimates and update estimate details. If scope is a unlinked resource - delete all resource price estimates and update ancestors. In all other cases - update price estimate details. """ is_resource = isinstance(instance, structure_models.ResourceMixin) if is_resource and getattr(instance, 'PERFORM_UNLINK', False): _resource_unlink(resource=instance) elif is_resource and not getattr(instance, 'PERFORM_UNLINK', False): _resource_deletion(resource=instance) elif isinstance(instance, structure_models.Customer): _customer_deletion(customer=instance) else: for price_estimate in models.PriceEstimate.objects.filter(scope=instance): price_estimate.init_details()
[ "def", "scope_deletion", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "is_resource", "=", "isinstance", "(", "instance", ",", "structure_models", ".", "ResourceMixin", ")", "if", "is_resource", "and", "getattr", "(", "instance", ",", "'...
Run different actions on price estimate scope deletion. If scope is a customer - delete all customer estimates and their children. If scope is a deleted resource - redefine consumption details, recalculate ancestors estimates and update estimate details. If scope is a unlinked resource - delete all resource price estimates and update ancestors. In all other cases - update price estimate details.
[ "Run", "different", "actions", "on", "price", "estimate", "scope", "deletion", "." ]
python
train
odrling/peony-twitter
examples/birthday.py
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/examples/birthday.py#L23-L33
async def set_tz(self): """ set the environment timezone to the timezone set in your twitter settings """ settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ['TZ'] = tz time.tzset()
[ "async", "def", "set_tz", "(", "self", ")", ":", "settings", "=", "await", "self", ".", "api", ".", "account", ".", "settings", ".", "get", "(", ")", "tz", "=", "settings", ".", "time_zone", ".", "tzinfo_name", "os", ".", "environ", "[", "'TZ'", "]",...
set the environment timezone to the timezone set in your twitter settings
[ "set", "the", "environment", "timezone", "to", "the", "timezone", "set", "in", "your", "twitter", "settings" ]
python
valid
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3612-L3626
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_ITEMS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_items_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_ITEMS", ",", "per_page", "=", ...
Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "items", "of", "delivery", "note", "per", "page" ]
python
train
ic-labs/django-icekit
icekit/templatetags/icekit_tags.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/templatetags/icekit_tags.py#L250-L271
def admin_link(obj): """ Returns a link to the admin URL of an object. No permissions checking is involved, so use with caution to avoid exposing the link to unauthorised users. Example:: {{ foo_obj|admin_link }} renders as:: <a href='/admin/foo/123'>Foo</a> :param obj: A Django model instance. :return: A safe string expressing an HTML link to the admin page for an object. """ if hasattr(obj, 'get_admin_link'): return mark_safe(obj.get_admin_link()) return mark_safe(admin_link_fn(obj))
[ "def", "admin_link", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'get_admin_link'", ")", ":", "return", "mark_safe", "(", "obj", ".", "get_admin_link", "(", ")", ")", "return", "mark_safe", "(", "admin_link_fn", "(", "obj", ")", ")" ]
Returns a link to the admin URL of an object. No permissions checking is involved, so use with caution to avoid exposing the link to unauthorised users. Example:: {{ foo_obj|admin_link }} renders as:: <a href='/admin/foo/123'>Foo</a> :param obj: A Django model instance. :return: A safe string expressing an HTML link to the admin page for an object.
[ "Returns", "a", "link", "to", "the", "admin", "URL", "of", "an", "object", "." ]
python
train
walchko/pyrk
pyrk/pyrk.py
https://github.com/walchko/pyrk/blob/f75dce843e795343d37cfe20d780989f56f0c418/pyrk/pyrk.py#L44-L58
def step(self, y, u, t, h): """ This is called by solve, but can be called by the user who wants to run through an integration with a control force. y - state at t u - control inputs at t t - time h - step size """ k1 = h * self.func(t, y, u) k2 = h * self.func(t + .5*h, y + .5*h*k1, u) k3 = h * self.func(t + .5*h, y + .5*h*k2, u) k4 = h * self.func(t + h, y + h*k3, u) return y + (k1 + 2*k2 + 2*k3 + k4) / 6.0
[ "def", "step", "(", "self", ",", "y", ",", "u", ",", "t", ",", "h", ")", ":", "k1", "=", "h", "*", "self", ".", "func", "(", "t", ",", "y", ",", "u", ")", "k2", "=", "h", "*", "self", ".", "func", "(", "t", "+", ".5", "*", "h", ",", ...
This is called by solve, but can be called by the user who wants to run through an integration with a control force. y - state at t u - control inputs at t t - time h - step size
[ "This", "is", "called", "by", "solve", "but", "can", "be", "called", "by", "the", "user", "who", "wants", "to", "run", "through", "an", "integration", "with", "a", "control", "force", "." ]
python
train
bapakode/OmMongo
ommongo/query.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/query.py#L381-L394
def query_bypass(self, query, raw_output=True): ''' Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo. :param raw_output: Skip OmMongo ORM layer (default: True) ''' if not isinstance(query, dict): raise BadQueryException('Query must be dict.') self.__query = query if raw_output: self._raw_output = True return self.__get_query_result().cursor else: return self
[ "def", "query_bypass", "(", "self", ",", "query", ",", "raw_output", "=", "True", ")", ":", "if", "not", "isinstance", "(", "query", ",", "dict", ")", ":", "raise", "BadQueryException", "(", "'Query must be dict.'", ")", "self", ".", "__query", "=", "query...
Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo. :param raw_output: Skip OmMongo ORM layer (default: True)
[ "Bypass", "query", "meaning", "that", "field", "check", "and", "validation", "is", "skipped", "then", "query", "object", "directly", "executed", "by", "pymongo", ".", ":", "param", "raw_output", ":", "Skip", "OmMongo", "ORM", "layer", "(", "default", ":", "T...
python
train
portfoliome/foil
foil/compose.py
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/compose.py#L53-L58
def flip_iterable_dict(d: dict) -> dict: """Transform dictionary to unpack values to map to respective key.""" value_keys = disjoint_union((cartesian_product((v, k)) for k, v in d.items())) return dict(value_keys)
[ "def", "flip_iterable_dict", "(", "d", ":", "dict", ")", "->", "dict", ":", "value_keys", "=", "disjoint_union", "(", "(", "cartesian_product", "(", "(", "v", ",", "k", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", ")", ...
Transform dictionary to unpack values to map to respective key.
[ "Transform", "dictionary", "to", "unpack", "values", "to", "map", "to", "respective", "key", "." ]
python
train
alerta/python-alerta-client
alertaclient/commands/cmd_heartbeats.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_heartbeats.py#L16-L93
def cli(obj, alert, severity, timeout, purge): """List heartbeats.""" client = obj['client'] if obj['output'] == 'json': r = client.http.get('/heartbeats') click.echo(json.dumps(r['heartbeats'], sort_keys=True, indent=4, ensure_ascii=False)) else: timezone = obj['timezone'] headers = { 'id': 'ID', 'origin': 'ORIGIN', 'customer': 'CUSTOMER', 'tags': 'TAGS', 'createTime': 'CREATED', 'receiveTime': 'RECEIVED', 'latency': 'LATENCY', 'timeout': 'TIMEOUT', 'since': 'SINCE', 'status': 'STATUS' } heartbeats = client.get_heartbeats() click.echo(tabulate([h.tabular(timezone) for h in heartbeats], headers=headers, tablefmt=obj['output'])) not_ok = [hb for hb in heartbeats if hb.status != 'ok'] if purge: with click.progressbar(not_ok, label='Purging {} heartbeats'.format(len(not_ok))) as bar: for b in bar: client.delete_heartbeat(b.id) elif alert: with click.progressbar(heartbeats, label='Alerting {} heartbeats'.format(len(heartbeats))) as bar: for b in bar: params = dict(filter(lambda a: len(a) == 2, map(lambda a: a.split(':'), b.tags))) environment = params.get('environment', 'Production') group = params.get('group', 'System') tags = list(filter(lambda a: not a.startswith('environment:') and not a.startswith('group:'), b.tags)) if b.status == 'expired': # aka. "stale" client.send_alert( resource=b.origin, event='HeartbeatFail', correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'], group=group, environment=environment, service=['Alerta'], severity=severity, value='{}'.format(b.since), text='Heartbeat not received in {} seconds'.format(b.timeout), tags=tags, type='heartbeatAlert', timeout=timeout, customer=b.customer ) elif b.status == 'slow': client.send_alert( resource=b.origin, event='HeartbeatSlow', correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'], group=group, environment=environment, service=['Alerta'], severity=severity, value='{}ms'.format(b.latency), text='Heartbeat took more than {}ms to be processed'.format(MAX_LATENCY), tags=tags, type='heartbeatAlert', timeout=timeout, customer=b.customer ) else: client.send_alert( resource=b.origin, event='HeartbeatOK', correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'], group=group, environment=environment, service=['Alerta'], severity='normal', value='', text='Heartbeat OK', tags=tags, type='heartbeatAlert', customer=b.customer )
[ "def", "cli", "(", "obj", ",", "alert", ",", "severity", ",", "timeout", ",", "purge", ")", ":", "client", "=", "obj", "[", "'client'", "]", "if", "obj", "[", "'output'", "]", "==", "'json'", ":", "r", "=", "client", ".", "http", ".", "get", "(",...
List heartbeats.
[ "List", "heartbeats", "." ]
python
train
blackecho/Deep-Learning-TensorFlow
yadlt/utils/utilities.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/utils/utilities.py#L46-L57
def xavier_init(fan_in, fan_out, const=1): """Xavier initialization of network weights. https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow :param fan_in: fan in of the network (n_features) :param fan_out: fan out of the network (n_components) :param const: multiplicative constant """ low = -const * np.sqrt(6.0 / (fan_in + fan_out)) high = const * np.sqrt(6.0 / (fan_in + fan_out)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high)
[ "def", "xavier_init", "(", "fan_in", ",", "fan_out", ",", "const", "=", "1", ")", ":", "low", "=", "-", "const", "*", "np", ".", "sqrt", "(", "6.0", "/", "(", "fan_in", "+", "fan_out", ")", ")", "high", "=", "const", "*", "np", ".", "sqrt", "("...
Xavier initialization of network weights. https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow :param fan_in: fan in of the network (n_features) :param fan_out: fan out of the network (n_components) :param const: multiplicative constant
[ "Xavier", "initialization", "of", "network", "weights", "." ]
python
train
saltstack/salt
salt/states/pcs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L900-L936
def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=None): ''' Ensure a resource operation default in the cluster is set to a given value Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: pcs_properties__resource_op_defaults_to_{{op_default}}) op_default name of the operation default resource property value value of the operation default resource property extra_args additional options for the pcs command cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml pcs_properties__resource_op_defaults_to_monitor-interval: pcs.resource_op_defaults_to: - op_default: monitor-interval - value: 60s - cibname: cib_for_cluster_settings ''' return _item_present(name=name, item='resource', item_id='{0}={1}'.format(op_default, value), item_type=None, show=['op', 'defaults'], create=['op', 'defaults'], extra_args=extra_args, cibname=cibname)
[ "def", "resource_op_defaults_to", "(", "name", ",", "op_default", ",", "value", ",", "extra_args", "=", "None", ",", "cibname", "=", "None", ")", ":", "return", "_item_present", "(", "name", "=", "name", ",", "item", "=", "'resource'", ",", "item_id", "=",...
Ensure a resource operation default in the cluster is set to a given value Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: pcs_properties__resource_op_defaults_to_{{op_default}}) op_default name of the operation default resource property value value of the operation default resource property extra_args additional options for the pcs command cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml pcs_properties__resource_op_defaults_to_monitor-interval: pcs.resource_op_defaults_to: - op_default: monitor-interval - value: 60s - cibname: cib_for_cluster_settings
[ "Ensure", "a", "resource", "operation", "default", "in", "the", "cluster", "is", "set", "to", "a", "given", "value" ]
python
train
lehins/python-wepay
wepay/calls/user.py
https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/user.py#L59-L89
def __register(self, client_id, client_secret, email, scope, first_name, last_name, original_ip, original_device, **kwargs): """Call documentation: `/user/register <https://www.wepay.com/developer/reference/user#register>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. note :: This call is NOT supported by API versions older then '2014-01-08'. """ params = { 'client_id': client_id, 'client_secret': client_secret, 'email': email, 'scope': scope, 'first_name': first_name, 'last_name': last_name, 'original_ip': original_ip, 'original_device': original_device } return self.make_call(self.__register, params, kwargs)
[ "def", "__register", "(", "self", ",", "client_id", ",", "client_secret", ",", "email", ",", "scope", ",", "first_name", ",", "last_name", ",", "original_ip", ",", "original_device", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'client_id'", ":"...
Call documentation: `/user/register <https://www.wepay.com/developer/reference/user#register>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` .. note :: This call is NOT supported by API versions older then '2014-01-08'.
[ "Call", "documentation", ":", "/", "user", "/", "register", "<https", ":", "//", "www", ".", "wepay", ".", "com", "/", "developer", "/", "reference", "/", "user#register", ">", "_", "plus", "extra", "keyword", "parameter", ":", ":", "keyword", "bool", "b...
python
train
boriel/zxbasic
zxblex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxblex.py#L643-L670
def is_label(token): """ Return whether the token is a label (an integer number or id at the beginning of a line. To do so, we compute find_column() and moves back to the beginning of the line if previous chars are spaces or tabs. If column 0 is reached, it's a label. """ if not LABELS_ALLOWED: return False c = i = token.lexpos input = token.lexer.lexdata c -= 1 while c > 0 and input[c] in (' ', '\t'): c -= 1 while i > 0: if input[i] == '\n': break i -= 1 column = c - i if column == 0: column += 1 return column == 1
[ "def", "is_label", "(", "token", ")", ":", "if", "not", "LABELS_ALLOWED", ":", "return", "False", "c", "=", "i", "=", "token", ".", "lexpos", "input", "=", "token", ".", "lexer", ".", "lexdata", "c", "-=", "1", "while", "c", ">", "0", "and", "input...
Return whether the token is a label (an integer number or id at the beginning of a line. To do so, we compute find_column() and moves back to the beginning of the line if previous chars are spaces or tabs. If column 0 is reached, it's a label.
[ "Return", "whether", "the", "token", "is", "a", "label", "(", "an", "integer", "number", "or", "id", "at", "the", "beginning", "of", "a", "line", "." ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L216-L245
def get_index_from_coord(coord, base_index): """Function to return the coordinate as integer, integer array or slice If `coord` is zero-dimensional, the corresponding integer in `base_index` will be supplied. Otherwise it is first tried to return a slice, if that does not work an integer array with the corresponding indices is returned. Parameters ---------- coord: xarray.Coordinate or xarray.Variable Coordinate to convert base_index: pandas.Index The base index from which the `coord` was extracted Returns ------- int, array of ints or slice The indexer that can be used to access the `coord` in the `base_index` """ try: values = coord.values except AttributeError: values = coord if values.ndim == 0: return base_index.get_loc(values[()]) if len(values) == len(base_index) and (values == base_index).all(): return slice(None) values = np.array(list(map(lambda i: base_index.get_loc(i), values))) return to_slice(values) or values
[ "def", "get_index_from_coord", "(", "coord", ",", "base_index", ")", ":", "try", ":", "values", "=", "coord", ".", "values", "except", "AttributeError", ":", "values", "=", "coord", "if", "values", ".", "ndim", "==", "0", ":", "return", "base_index", ".", ...
Function to return the coordinate as integer, integer array or slice If `coord` is zero-dimensional, the corresponding integer in `base_index` will be supplied. Otherwise it is first tried to return a slice, if that does not work an integer array with the corresponding indices is returned. Parameters ---------- coord: xarray.Coordinate or xarray.Variable Coordinate to convert base_index: pandas.Index The base index from which the `coord` was extracted Returns ------- int, array of ints or slice The indexer that can be used to access the `coord` in the `base_index`
[ "Function", "to", "return", "the", "coordinate", "as", "integer", "integer", "array", "or", "slice" ]
python
train
learningequality/ricecooker
examples/sample_program.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/examples/sample_program.py#L317-L415
def _build_tree(node, sourcetree): """ Parse nodes given in `sourcetree` and add as children of `node`. """ for child_source_node in sourcetree: try: main_file = child_source_node['files'][0] if 'files' in child_source_node else {} kind = guess_content_kind(path=main_file.get('path'), web_video_data=main_file.get('youtube_id') or main_file.get('web_url'), questions=child_source_node.get("questions")) except UnknownContentKindError: continue if kind == content_kinds.TOPIC: child_node = nodes.TopicNode( source_id=child_source_node["id"], title=child_source_node["title"], author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), ) node.add_child(child_node) source_tree_children = child_source_node.get("children", []) _build_tree(child_node, source_tree_children) elif kind == content_kinds.VIDEO: child_node = nodes.VideoNode( source_id=child_source_node["id"], title=child_source_node["title"], license=get_license(child_source_node.get("license"), description="Description of license", copyright_holder=child_source_node.get('copyright_holder')), author=child_source_node.get("author"), description=child_source_node.get("description"), derive_thumbnail=True, # video-specific data thumbnail=child_source_node.get('thumbnail'), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.AUDIO: child_node = nodes.AudioNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.DOCUMENT: child_node = nodes.DocumentNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) elif kind == content_kinds.EXERCISE: mastery_model = (child_source_node.get('mastery_model') and {"mastery_model": child_source_node['mastery_model']}) or {} child_node = nodes.ExerciseNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), exercise_data=mastery_model, thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) for q in child_source_node.get("questions"): question = create_question(q) child_node.add_question(question) node.add_child(child_node) elif kind == content_kinds.HTML5: child_node = nodes.HTML5AppNode( source_id=child_source_node["id"], title=child_source_node["title"], license=child_source_node.get("license"), author=child_source_node.get("author"), description=child_source_node.get("description"), thumbnail=child_source_node.get("thumbnail"), copyright_holder=child_source_node.get("copyright_holder"), ) add_files(child_node, child_source_node.get("files") or []) node.add_child(child_node) else: # unknown content file format continue return node
[ "def", "_build_tree", "(", "node", ",", "sourcetree", ")", ":", "for", "child_source_node", "in", "sourcetree", ":", "try", ":", "main_file", "=", "child_source_node", "[", "'files'", "]", "[", "0", "]", "if", "'files'", "in", "child_source_node", "else", "{...
Parse nodes given in `sourcetree` and add as children of `node`.
[ "Parse", "nodes", "given", "in", "sourcetree", "and", "add", "as", "children", "of", "node", "." ]
python
train
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L993-L1051
def plot2d(self, c_poly='default', alpha=1, cmap='default', ret=False, title=' ', colorbar=False, cbar_label=''): """ Generates a 2D plot for the z=0 Surface projection. :param c_poly: Polygons color. :type c_poly: matplotlib color :param alpha: Opacity. :type alpha: float :param cmap: colormap :type cmap: matplotlib.cm :param ret: If True, returns the figure. It can be used to add more elements to the plot or to modify it. :type ret: bool :param title: Figure title. :type title: str :param colorbar: If True, inserts a colorbar in the figure. :type colorbar: bool :param cbar_label: Colorbar right label. :type cbar_label: str :returns: None, axes :rtype: None, matplotlib axes """ import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.cm as cm paths = [polygon.get_path() for polygon in self] domain = self.get_domain()[:, :2] # Color if type(c_poly) == str: # Unicolor if c_poly is 'default': c_poly = 'b' color_vector = c_poly*len(paths) colorbar = False else: # Colormap if cmap is 'default': cmap = cm.YlOrRd import matplotlib.colors as mcolors normalize = mcolors.Normalize(vmin=c_poly.min(), vmax=c_poly.max()) color_vector = cmap(normalize(c_poly)) # Plot fig = plt.figure(title) ax = fig.add_subplot(111) for p, c in zip(paths, color_vector): ax.add_patch(patches.PathPatch(p, facecolor=c, lw=1, edgecolor='k', alpha=alpha)) ax.set_xlim(domain[0,0],domain[1,0]) ax.set_ylim(domain[0,1], domain[1,1]) # Colorbar if colorbar: scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap) scalarmappaple.set_array(c_poly) cbar = plt.colorbar(scalarmappaple, shrink=0.8, aspect=10) cbar.ax.set_ylabel(cbar_label, rotation=0) if ret: return ax
[ "def", "plot2d", "(", "self", ",", "c_poly", "=", "'default'", ",", "alpha", "=", "1", ",", "cmap", "=", "'default'", ",", "ret", "=", "False", ",", "title", "=", "' '", ",", "colorbar", "=", "False", ",", "cbar_label", "=", "''", ")", ":", "import...
Generates a 2D plot for the z=0 Surface projection. :param c_poly: Polygons color. :type c_poly: matplotlib color :param alpha: Opacity. :type alpha: float :param cmap: colormap :type cmap: matplotlib.cm :param ret: If True, returns the figure. It can be used to add more elements to the plot or to modify it. :type ret: bool :param title: Figure title. :type title: str :param colorbar: If True, inserts a colorbar in the figure. :type colorbar: bool :param cbar_label: Colorbar right label. :type cbar_label: str :returns: None, axes :rtype: None, matplotlib axes
[ "Generates", "a", "2D", "plot", "for", "the", "z", "=", "0", "Surface", "projection", ".", ":", "param", "c_poly", ":", "Polygons", "color", ".", ":", "type", "c_poly", ":", "matplotlib", "color", ":", "param", "alpha", ":", "Opacity", ".", ":", "type"...
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/bases.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L424-L445
def from_dir(cls, ID, datadir, parser, pattern='*.fcs', recursive=False, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs): """ Create a Collection of measurements from data files contained in a directory. Parameters ---------- ID : hashable Collection ID datadir : str Path of directory containing the data files. pattern : str Only files matching the pattern will be used to create measurements. recursive : bool Recursively look for files matching pattern in subdirectories. {_bases_filename_parser} {_bases_ID_kwargs} """ datafiles = get_files(datadir, pattern, recursive) return cls.from_files(ID, datafiles, parser, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs, **ID_kwargs)
[ "def", "from_dir", "(", "cls", ",", "ID", ",", "datadir", ",", "parser", ",", "pattern", "=", "'*.fcs'", ",", "recursive", "=", "False", ",", "readdata_kwargs", "=", "{", "}", ",", "readmeta_kwargs", "=", "{", "}", ",", "*", "*", "ID_kwargs", ")", ":...
Create a Collection of measurements from data files contained in a directory. Parameters ---------- ID : hashable Collection ID datadir : str Path of directory containing the data files. pattern : str Only files matching the pattern will be used to create measurements. recursive : bool Recursively look for files matching pattern in subdirectories. {_bases_filename_parser} {_bases_ID_kwargs}
[ "Create", "a", "Collection", "of", "measurements", "from", "data", "files", "contained", "in", "a", "directory", "." ]
python
train
partofthething/ace
ace/smoother.py
https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L249-L254
def _remove_observation_from_means(self, xj, yj): """Update the means without recalculating for the deletion of one observation.""" self._mean_x_in_window = ((self.window_size * self._mean_x_in_window - xj) / (self.window_size - 1.0)) self._mean_y_in_window = ((self.window_size * self._mean_y_in_window - yj) / (self.window_size - 1.0))
[ "def", "_remove_observation_from_means", "(", "self", ",", "xj", ",", "yj", ")", ":", "self", ".", "_mean_x_in_window", "=", "(", "(", "self", ".", "window_size", "*", "self", ".", "_mean_x_in_window", "-", "xj", ")", "/", "(", "self", ".", "window_size", ...
Update the means without recalculating for the deletion of one observation.
[ "Update", "the", "means", "without", "recalculating", "for", "the", "deletion", "of", "one", "observation", "." ]
python
train
polyaxon/polyaxon-cli
polyaxon_cli/cli/project.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/project.py#L47-L51
def project(ctx, project): # pylint:disable=redefined-outer-name """Commands for projects.""" if ctx.invoked_subcommand not in ['create', 'list']: ctx.obj = ctx.obj or {} ctx.obj['project'] = project
[ "def", "project", "(", "ctx", ",", "project", ")", ":", "# pylint:disable=redefined-outer-name", "if", "ctx", ".", "invoked_subcommand", "not", "in", "[", "'create'", ",", "'list'", "]", ":", "ctx", ".", "obj", "=", "ctx", ".", "obj", "or", "{", "}", "ct...
Commands for projects.
[ "Commands", "for", "projects", "." ]
python
valid
apache/incubator-heron
heronpy/api/bolt/window_bolt.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/bolt/window_bolt.py#L86-L94
def process(self, tup): """Process a single tuple of input We add the (time, tuple) pair into our current_tuples. And then look for expiring elemnents """ curtime = int(time.time()) self.current_tuples.append((tup, curtime)) self._expire(curtime)
[ "def", "process", "(", "self", ",", "tup", ")", ":", "curtime", "=", "int", "(", "time", ".", "time", "(", ")", ")", "self", ".", "current_tuples", ".", "append", "(", "(", "tup", ",", "curtime", ")", ")", "self", ".", "_expire", "(", "curtime", ...
Process a single tuple of input We add the (time, tuple) pair into our current_tuples. And then look for expiring elemnents
[ "Process", "a", "single", "tuple", "of", "input" ]
python
valid
MrYsLab/pymata-aio
pymata_aio/pymata_iot.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L577-L586
def analog_latch_callback(self, data): """ This method handles analog_latch data received from pymata_core :param data: analog latch callback message :returns:{"method": "analog_latch_data_reply", "params": [ANALOG_PIN, VALUE_AT_TRIGGER, TIME_STAMP_STRING]} """ ts = data[2] st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') reply = json.dumps({"method": "analog_latch_data_reply", "params": [data[0], data[1], st]}) asyncio.ensure_future(self.websocket.send(reply))
[ "def", "analog_latch_callback", "(", "self", ",", "data", ")", ":", "ts", "=", "data", "[", "2", "]", "st", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ts", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "reply", "=", "json", ...
This method handles analog_latch data received from pymata_core :param data: analog latch callback message :returns:{"method": "analog_latch_data_reply", "params": [ANALOG_PIN, VALUE_AT_TRIGGER, TIME_STAMP_STRING]}
[ "This", "method", "handles", "analog_latch", "data", "received", "from", "pymata_core", ":", "param", "data", ":", "analog", "latch", "callback", "message", ":", "returns", ":", "{", "method", ":", "analog_latch_data_reply", "params", ":", "[", "ANALOG_PIN", "VA...
python
train
Peter-Slump/python-keycloak-client
src/keycloak/admin/users.py
https://github.com/Peter-Slump/python-keycloak-client/blob/379ae58f3c65892327b0c98c06d4982aa83f357e/src/keycloak/admin/users.py#L60-L70
def all(self): """ Return all registered users http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource """ return self._client.get( url=self._client.get_full_url( self.get_path('collection', realm=self._realm_name) ) )
[ "def", "all", "(", "self", ")", ":", "return", "self", ".", "_client", ".", "get", "(", "url", "=", "self", ".", "_client", ".", "get_full_url", "(", "self", ".", "get_path", "(", "'collection'", ",", "realm", "=", "self", ".", "_realm_name", ")", ")...
Return all registered users http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource
[ "Return", "all", "registered", "users" ]
python
train
viralogic/py-enumerable
py_linq/py_linq.py
https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq.py#L212-L220
def where(self, predicate): """ Returns new Enumerable where elements matching predicate are selected :param predicate: predicate as a lambda expression :return: new Enumerable object """ if predicate is None: raise NullArgumentError("No predicate given for where clause") return Enumerable(itertools.ifilter(predicate, self))
[ "def", "where", "(", "self", ",", "predicate", ")", ":", "if", "predicate", "is", "None", ":", "raise", "NullArgumentError", "(", "\"No predicate given for where clause\"", ")", "return", "Enumerable", "(", "itertools", ".", "ifilter", "(", "predicate", ",", "se...
Returns new Enumerable where elements matching predicate are selected :param predicate: predicate as a lambda expression :return: new Enumerable object
[ "Returns", "new", "Enumerable", "where", "elements", "matching", "predicate", "are", "selected", ":", "param", "predicate", ":", "predicate", "as", "a", "lambda", "expression", ":", "return", ":", "new", "Enumerable", "object" ]
python
train
DarkEnergySurvey/ugali
ugali/utils/bayesian_efficiency.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/bayesian_efficiency.py#L24-L76
def confidenceInterval(n, k, alpha = 0.68, errorbar=False): """ Given n tests and k successes, return efficiency and confidence interval. """ try: e = float(k) / float(n) except ZeroDivisionError: return np.nan, [np.nan, np.nan] bins = 1000001 dx = 1. / bins efficiency = np.linspace(0, 1, bins) # MODIFIED FOR LARGE NUMBERS if n + 2 > 1000: a = gammalnStirling(n + 2) else: a = scipy.special.gammaln(n + 2) if k + 1 > 1000: b = gammalnStirling(k + 1) else: b = scipy.special.gammaln(k + 1) if n - k + 1 > 1000: c = gammalnStirling(n - k + 1) else: c = scipy.special.gammaln(n - k + 1) if k == 0: p = np.concatenate([[np.exp(a - b - c)], np.exp(a - b - c + (k * np.log(efficiency[1: -1])) + (n - k) * np.log(1. - efficiency[1: -1])), [0.]]) elif k == n: p = np.concatenate([[0.], np.exp(a - b - c + (k * np.log(efficiency[1: -1])) + (n - k) * np.log(1. - efficiency[1: -1])), [np.exp(a - b - c)]]) else: p = np.concatenate([[0.], np.exp(a - b - c + (k * np.log(efficiency[1: -1])) + (n - k) * np.log(1. - efficiency[1: -1])), [0.]]) i = np.argsort(p)[::-1] p_i = np.take(p, i) s = i[np.cumsum(p_i * dx) < alpha] low = min(np.min(s) * dx, e) high = max(np.max(s) * dx, e) if not errorbar: return e, [low, high] else: return e, [e - low, high - e]
[ "def", "confidenceInterval", "(", "n", ",", "k", ",", "alpha", "=", "0.68", ",", "errorbar", "=", "False", ")", ":", "try", ":", "e", "=", "float", "(", "k", ")", "/", "float", "(", "n", ")", "except", "ZeroDivisionError", ":", "return", "np", ".",...
Given n tests and k successes, return efficiency and confidence interval.
[ "Given", "n", "tests", "and", "k", "successes", "return", "efficiency", "and", "confidence", "interval", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L3894-L3915
def depth_soil_conductivity(self, value=None): """Corresponds to IDD Field `depth_soil_conductivity` Args: value (float): value for IDD Field `depth_soil_conductivity` Unit: W/m-K, if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_soil_conductivity`'.format(value)) self._depth_soil_conductivity = value
[ "def", "depth_soil_conductivity", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to b...
Corresponds to IDD Field `depth_soil_conductivity` Args: value (float): value for IDD Field `depth_soil_conductivity` Unit: W/m-K, if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "depth_soil_conductivity" ]
python
train
Kortemme-Lab/klab
klab/db/sqlalchemy_interface.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/sqlalchemy_interface.py#L48-L61
def row_to_dict(r, keep_relationships = False): '''Converts an SQLAlchemy record to a Python dict. We assume that _sa_instance_state exists and is the only value we do not care about. If DeclarativeBase is passed then all DeclarativeBase objects (e.g. those created by relationships) are also removed. ''' d = {} if not keep_relationships: # only returns the table columns t = r.__table__ for c in [c.name for c in list(sqlalchemy_inspect(t).columns)]: d[c] = getattr(r, c) return d else: # keeps all objects including those of type DeclarativeBase or InstrumentedList and the _sa_instance_state object return copy.deepcopy(r.__dict__)
[ "def", "row_to_dict", "(", "r", ",", "keep_relationships", "=", "False", ")", ":", "d", "=", "{", "}", "if", "not", "keep_relationships", ":", "# only returns the table columns", "t", "=", "r", ".", "__table__", "for", "c", "in", "[", "c", ".", "name", "...
Converts an SQLAlchemy record to a Python dict. We assume that _sa_instance_state exists and is the only value we do not care about. If DeclarativeBase is passed then all DeclarativeBase objects (e.g. those created by relationships) are also removed.
[ "Converts", "an", "SQLAlchemy", "record", "to", "a", "Python", "dict", ".", "We", "assume", "that", "_sa_instance_state", "exists", "and", "is", "the", "only", "value", "we", "do", "not", "care", "about", ".", "If", "DeclarativeBase", "is", "passed", "then",...
python
train
CivicSpleen/ambry
ambry/library/search_backends/postgres_backend.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/postgres_backend.py#L623-L634
def _delete(self, identifier=None): """ Deletes given identifier from index. Args: identifier (str): identifier of the document to delete. """ query = text(""" DELETE FROM identifier_index WHERE identifier = :identifier; """) self.execute(query, identifier=identifier)
[ "def", "_delete", "(", "self", ",", "identifier", "=", "None", ")", ":", "query", "=", "text", "(", "\"\"\"\n DELETE FROM identifier_index\n WHERE identifier = :identifier;\n \"\"\"", ")", "self", ".", "execute", "(", "query", ",", "identifier...
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
[ "Deletes", "given", "identifier", "from", "index", "." ]
python
train
raamana/pyradigm
pyradigm/multiple.py
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/multiple.py#L195-L217
def _get_data(self, id_list, format='MLDataset'): """Returns the data, from all modalities, for a given list of IDs""" format = format.lower() features = list() # returning a dict would be better if AutoMKL() can handle it for modality, data in self._modalities.items(): if format in ('ndarray', 'data_matrix'): # turning dict of arrays into a data matrix # this is arguably worse, as labels are difficult to pass subset = np.array(itemgetter(*id_list)(data)) elif format in ('mldataset', 'pyradigm'): # getting container with fake data subset = self._dataset.get_subset(id_list) # injecting actual features subset.data = { id_: data[id_] for id_ in id_list } else: raise ValueError('Invalid output format - choose only one of ' 'MLDataset or data_matrix') features.append(subset) return features
[ "def", "_get_data", "(", "self", ",", "id_list", ",", "format", "=", "'MLDataset'", ")", ":", "format", "=", "format", ".", "lower", "(", ")", "features", "=", "list", "(", ")", "# returning a dict would be better if AutoMKL() can handle it", "for", "modality", ...
Returns the data, from all modalities, for a given list of IDs
[ "Returns", "the", "data", "from", "all", "modalities", "for", "a", "given", "list", "of", "IDs" ]
python
train
robotools/fontParts
Lib/fontParts/base/contour.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/contour.py#L639-L678
def _setStartSegment(self, segmentIndex, **kwargs): """ Subclasses may override this method. """ segments = self.segments oldStart = segments[-1] oldLast = segments[0] # If the contour ends with a curve on top of a move, # delete the move. if oldLast.type == "curve" or oldLast.type == "qcurve": startOn = oldStart.onCurve lastOn = oldLast.onCurve if startOn.x == lastOn.x and startOn.y == lastOn.y: self.removeSegment(0) # Shift new the start index. segmentIndex = segmentIndex - 1 segments = self.segments # If the first point is a move, convert it to a line. if segments[0].type == "move": segments[0].type = "line" # Reorder the points internally. segments = segments[segmentIndex - 1:] + segments[:segmentIndex - 1] points = [] for segment in segments: for point in segment: points.append(((point.x, point.y), point.type, point.smooth, point.name, point.identifier)) # Clear the points. for point in self.points: self.removePoint(point) # Add the points. for point in points: position, type, smooth, name, identifier = point self.appendPoint( position, type=type, smooth=smooth, name=name, identifier=identifier )
[ "def", "_setStartSegment", "(", "self", ",", "segmentIndex", ",", "*", "*", "kwargs", ")", ":", "segments", "=", "self", ".", "segments", "oldStart", "=", "segments", "[", "-", "1", "]", "oldLast", "=", "segments", "[", "0", "]", "# If the contour ends wit...
Subclasses may override this method.
[ "Subclasses", "may", "override", "this", "method", "." ]
python
train
saltstack/salt
salt/utils/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cloud.py#L956-L973
def run_psexec_command(cmd, args, host, username, password, port=445): ''' Run a command remotly using the psexec protocol ''' if has_winexe() and not HAS_PSEXEC: ret_code = run_winexe_command(cmd, args, host, username, password, port) return None, None, ret_code service_name = 'PS-Exec-{0}'.format(uuid.uuid4()) stdout, stderr, ret_code = '', '', None client = Client(host, username, password, port=port, encrypt=False, service_name=service_name) client.connect() try: client.create_service() stdout, stderr, ret_code = client.run_executable(cmd, args) finally: client.remove_service() client.disconnect() return stdout, stderr, ret_code
[ "def", "run_psexec_command", "(", "cmd", ",", "args", ",", "host", ",", "username", ",", "password", ",", "port", "=", "445", ")", ":", "if", "has_winexe", "(", ")", "and", "not", "HAS_PSEXEC", ":", "ret_code", "=", "run_winexe_command", "(", "cmd", ",",...
Run a command remotly using the psexec protocol
[ "Run", "a", "command", "remotly", "using", "the", "psexec", "protocol" ]
python
train
erocarrera/pefile
pefile.py
https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/pefile.py#L1089-L1120
def contains_rva(self, rva): """Check whether the section contains the address provided.""" # Check if the SizeOfRawData is realistic. If it's bigger than the size of # the whole PE file minus the start address of the section it could be # either truncated or the SizeOfRawData contains a misleading value. # In either of those cases we take the VirtualSize # if len(self.pe.__data__) - self.pe.adjust_FileAlignment( self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment ) < self.SizeOfRawData: # PECOFF documentation v8 says: # VirtualSize: The total size of the section when loaded into memory. # If this value is greater than SizeOfRawData, the section is zero-padded. # This field is valid only for executable images and should be set to zero # for object files. # size = self.Misc_VirtualSize else: size = max(self.SizeOfRawData, self.Misc_VirtualSize) VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment ) # Check whether there's any section after the current one that starts before the # calculated end for the current one. If so, cut the current section's size # to fit in the range up to where the next section starts. if (self.next_section_virtual_address is not None and self.next_section_virtual_address > self.VirtualAddress and VirtualAddress_adj + size > self.next_section_virtual_address): size = self.next_section_virtual_address - VirtualAddress_adj return VirtualAddress_adj <= rva < VirtualAddress_adj + size
[ "def", "contains_rva", "(", "self", ",", "rva", ")", ":", "# Check if the SizeOfRawData is realistic. If it's bigger than the size of", "# the whole PE file minus the start address of the section it could be", "# either truncated or the SizeOfRawData contains a misleading value.", "# In either...
Check whether the section contains the address provided.
[ "Check", "whether", "the", "section", "contains", "the", "address", "provided", "." ]
python
train
peterservice-rnd/robotframework-jsonvalidator
src/JsonValidator.py
https://github.com/peterservice-rnd/robotframework-jsonvalidator/blob/acde5045c04d0a7b9079f22707c3e71a5d3fa724/src/JsonValidator.py#L329-L345
def element_should_not_exist(self, json_string, expr): """ Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError """ value = self.select_elements(json_string, expr) if value is not None: raise JsonValidatorError('Elements %s exist but should not' % expr)
[ "def", "element_should_not_exist", "(", "self", ",", "json_string", ",", "expr", ")", ":", "value", "=", "self", ".", "select_elements", "(", "json_string", ",", "expr", ")", "if", "value", "is", "not", "None", ":", "raise", "JsonValidatorError", "(", "'Elem...
Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError
[ "Check", "that", "one", "or", "more", "elements", "matching", "[", "http", ":", "//", "jsonselect", ".", "org", "/", "|", "JSONSelect", "]", "expression", "don", "t", "exist", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/managers.py#L809-L824
def get_bin_hierarchy_design_session(self): """Gets the bin hierarchy design session. return: (osid.resource.BinHierarchyDesignSession) - a ``BinHierarchyDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bin_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bin_hierarchy_design()`` is ``true``.* """ if not self.supports_bin_hierarchy_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.BinHierarchyDesignSession(runtime=self._runtime)
[ "def", "get_bin_hierarchy_design_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_bin_hierarchy_design", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "BinHierarchyDesign...
Gets the bin hierarchy design session. return: (osid.resource.BinHierarchyDesignSession) - a ``BinHierarchyDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bin_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bin_hierarchy_design()`` is ``true``.*
[ "Gets", "the", "bin", "hierarchy", "design", "session", "." ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1433-L1452
def children(self, as_resources=False): ''' method to return hierarchical children of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources ''' children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))] # if as_resources, issue GET requests for children and return if as_resources: logger.debug('retrieving children as resources') children = [ self.repo.get_resource(child) for child in children ] return children
[ "def", "children", "(", "self", ",", "as_resources", "=", "False", ")", ":", "children", "=", "[", "o", "for", "s", ",", "p", ",", "o", "in", "self", ".", "rdf", ".", "graph", ".", "triples", "(", "(", "None", ",", "self", ".", "rdf", ".", "pre...
method to return hierarchical children of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
[ "method", "to", "return", "hierarchical", "children", "of", "this", "resource" ]
python
train
fraoustin/flaskserver
flaskserver/main.py
https://github.com/fraoustin/flaskserver/blob/27ce6ead523ae42286993cab04406d17a92c6535/flaskserver/main.py#L250-L270
def log(host=None, port=None, limit=0): """view log of web server""" app.config['HOST'] = first_value(host, app.config.get('HOST',None), '0.0.0.0') app.config['PORT'] = int(first_value(port, app.config.get('PORT',None), 5001)) if app.config['HOST'] == "0.0.0.0": host="127.0.0.1" else: host = app.config['HOST'] port = app.config['PORT'] try: res = requests.get('http://%s:%s/api/log?limit=%s' % (host, port, limit)) if res.status_code == 200: for record in json.loads(res.text): if record['level'] >= 30: print(record['msg'], file=sys.stderr) else: print(record['msg'], file=sys.stdinfo) else: print('web server is not flaskserver', file=sys.stderr) except: print('web server is not flaskserver or not start', file=sys.stderr)
[ "def", "log", "(", "host", "=", "None", ",", "port", "=", "None", ",", "limit", "=", "0", ")", ":", "app", ".", "config", "[", "'HOST'", "]", "=", "first_value", "(", "host", ",", "app", ".", "config", ".", "get", "(", "'HOST'", ",", "None", ")...
view log of web server
[ "view", "log", "of", "web", "server" ]
python
train
openego/ding0
ding0/grid/mv_grid/models/models.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/models/models.py#L208-L231
def insert(self, nodes, pos): # TODO: check docstring """Inserts all nodes from `nodes` list into this route at position `pos` Parameters ---------- nodes : type Desc pos : type Desc """ node_list = [] nodes_demand = 0 for node in [node for node in nodes]: if node._allocation: node._allocation.deallocate([node]) node_list.append(node) node._allocation = self nodes_demand = nodes_demand + node.demand() self._nodes = self._nodes[:pos] + node_list + self._nodes[pos:] self._demand += nodes_demand
[ "def", "insert", "(", "self", ",", "nodes", ",", "pos", ")", ":", "# TODO: check docstring", "node_list", "=", "[", "]", "nodes_demand", "=", "0", "for", "node", "in", "[", "node", "for", "node", "in", "nodes", "]", ":", "if", "node", ".", "_allocation...
Inserts all nodes from `nodes` list into this route at position `pos` Parameters ---------- nodes : type Desc pos : type Desc
[ "Inserts", "all", "nodes", "from", "nodes", "list", "into", "this", "route", "at", "position", "pos", "Parameters", "----------", "nodes", ":", "type", "Desc", "pos", ":", "type", "Desc" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_traildb/c7n_traildb/trailes.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_traildb/c7n_traildb/trailes.py#L214-L268
def index( config, date=None, directory=None, concurrency=5, accounts=None, tag=None, verbose=False): """index traildbs directly from s3 for multiple accounts. context: assumes a daily traildb file in s3 with dated key path """ logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('c7n.worker').setLevel(logging.INFO) with open(config) as fh: config = yaml.safe_load(fh.read()) jsonschema.validate(config, CONFIG_SCHEMA) date = get_date_path(date, delta=24) directory = directory or "/tmp" with ProcessPoolExecutor(max_workers=concurrency) as w: futures = {} jobs = [] for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue if tag: found = False for t in account['tags'].values(): if tag == t: found = True break if not found: continue for region in account.get('regions'): p = (config, account, region, date, directory) jobs.append(p) for j in jobs: log.debug("submit account:{} region:{} date:{}".format( j[1]['name'], j[2], j[3])) futures[w.submit(index_account_trails, *j)] = j # Process completed for f in as_completed(futures): config, account, region, date, directory = futures[f] if f.exception(): log.warning("error account:{} region:{} error:{}".format( account['name'], region, f.exception())) continue log.info("complete account:{} region:{}".format( account['name'], region))
[ "def", "index", "(", "config", ",", "date", "=", "None", ",", "directory", "=", "None", ",", "concurrency", "=", "5", ",", "accounts", "=", "None", ",", "tag", "=", "None", ",", "verbose", "=", "False", ")", ":", "logging", ".", "basicConfig", "(", ...
index traildbs directly from s3 for multiple accounts. context: assumes a daily traildb file in s3 with dated key path
[ "index", "traildbs", "directly", "from", "s3", "for", "multiple", "accounts", "." ]
python
train
adafruit/Adafruit_Python_PN532
Adafruit_PN532/PN532.py
https://github.com/adafruit/Adafruit_Python_PN532/blob/343521a8ec842ea82f680a5ed868fee16e9609bd/Adafruit_PN532/PN532.py#L363-L381
def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1): """Wait for a MiFare card to be available and return its UID when found. Will wait up to timeout_sec seconds and return None if no card is found, otherwise a bytearray with the UID of the found card is returned. """ # Send passive read command for 1 card. Expect at most a 7 byte UUID. response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET, params=[0x01, card_baud], response_length=17) # If no response is available return None to indicate no card is present. if response is None: return None # Check only 1 card with up to a 7 byte UID is present. if response[0] != 0x01: raise RuntimeError('More than one card detected!') if response[5] > 7: raise RuntimeError('Found card with unexpectedly long UID!') # Return UID of card. return response[6:6+response[5]]
[ "def", "read_passive_target", "(", "self", ",", "card_baud", "=", "PN532_MIFARE_ISO14443A", ",", "timeout_sec", "=", "1", ")", ":", "# Send passive read command for 1 card. Expect at most a 7 byte UUID.", "response", "=", "self", ".", "call_function", "(", "PN532_COMMAND_I...
Wait for a MiFare card to be available and return its UID when found. Will wait up to timeout_sec seconds and return None if no card is found, otherwise a bytearray with the UID of the found card is returned.
[ "Wait", "for", "a", "MiFare", "card", "to", "be", "available", "and", "return", "its", "UID", "when", "found", ".", "Will", "wait", "up", "to", "timeout_sec", "seconds", "and", "return", "None", "if", "no", "card", "is", "found", "otherwise", "a", "bytea...
python
train
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L87-L105
def _package_to_staging(staging_package_url): """Repackage this package from local installed location and copy it to GCS. Args: staging_package_url: GCS path. """ import google.datalab.ml as ml # Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file package_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../')) setup_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'master_setup.py')) tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz') print('Building package and uploading to %s' % tar_gz_path) ml.package_and_copy(package_root, setup_path, tar_gz_path) return tar_gz_path
[ "def", "_package_to_staging", "(", "staging_package_url", ")", ":", "import", "google", ".", "datalab", ".", "ml", "as", "ml", "# Find the package root. __file__ is under [package_root]/mltoolbox/_structured_data/this_file", "package_root", "=", "os", ".", "path", ".", "abs...
Repackage this package from local installed location and copy it to GCS. Args: staging_package_url: GCS path.
[ "Repackage", "this", "package", "from", "local", "installed", "location", "and", "copy", "it", "to", "GCS", "." ]
python
train
emin63/eyap
eyap/core/comments.py
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/comments.py#L398-L402
def lookup_thread_id(self): "Lookup the thread id as path to comment file." path = os.path.join(self.realm, self.topic + '.csv') return path
[ "def", "lookup_thread_id", "(", "self", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "realm", ",", "self", ".", "topic", "+", "'.csv'", ")", "return", "path" ]
Lookup the thread id as path to comment file.
[ "Lookup", "the", "thread", "id", "as", "path", "to", "comment", "file", "." ]
python
train
pyroscope/pyrocore
src/pyrocore/util/matching.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/util/matching.py#L281-L302
def validate(self): """ Validate filter condition (template method). """ from pyrocore.torrent import formatting super(PatternFilter, self).validate() self._value = self._value.lower() self._template = None self._is_regex = self._value.startswith('/') and self._value.endswith('/') if self._is_regex: self._matcher = re.compile(self._value[1:-1]).search elif self._value.startswith('{{') or self._value.endswith('}}'): def _template_globber(val, item): """Helper.""" pattern = formatting.format_item(self._template, item).replace('[', '[[]') ##print('!!!', val, '~~~', pattern, '???') return fnmatch.fnmatchcase(val, pattern.lower()) self._template = formatting.preparse(self._value) self._matcher = _template_globber else: self._matcher = lambda val, _: fnmatch.fnmatchcase(val, self._value)
[ "def", "validate", "(", "self", ")", ":", "from", "pyrocore", ".", "torrent", "import", "formatting", "super", "(", "PatternFilter", ",", "self", ")", ".", "validate", "(", ")", "self", ".", "_value", "=", "self", ".", "_value", ".", "lower", "(", ")",...
Validate filter condition (template method).
[ "Validate", "filter", "condition", "(", "template", "method", ")", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/local/schedule.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/local/schedule.py#L447-L558
def eval(self, edate, etime): """Evaluate the schedule according to the provided date and time and return the appropriate present value, or None if not in the effective period.""" if _debug: LocalScheduleInterpreter._debug("eval %r %r", edate, etime) # reference the schedule object sched_obj = self.sched_obj if _debug: LocalScheduleInterpreter._debug(" sched_obj: %r", sched_obj) # verify the date falls in the effective period if not match_date_range(edate, sched_obj.effectivePeriod): return None # the event priority is a list of values that are in effect for # exception schedules with the special event priority, see 135.1-2013 # clause 7.3.2.23.10.3.8, Revision 4 Event Priority Test event_priority = [None] * 16 next_day = (24, 0, 0, 0) next_transition_time = [None] * 16 # check the exception schedule values if sched_obj.exceptionSchedule: for special_event in sched_obj.exceptionSchedule: if _debug: LocalScheduleInterpreter._debug(" - special_event: %r", special_event) # check the special event period special_event_period = special_event.period if special_event_period is None: raise RuntimeError("special event period required") match = False calendar_entry = special_event_period.calendarEntry if calendar_entry: if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry) match = date_in_calendar_entry(edate, calendar_entry) else: # get the calendar object from the application calendar_object = sched_obj._app.get_object_id(special_event_period.calendarReference) if not calendar_object: raise RuntimeError("invalid calendar object reference") if _debug: LocalScheduleInterpreter._debug(" - calendar_object: %r", calendar_object) for calendar_entry in calendar_object.dateList: if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry) match = date_in_calendar_entry(edate, calendar_entry) if match: break # didn't match the period, try the next special event if not match: if _debug: LocalScheduleInterpreter._debug(" - no matching calendar entry") continue # event priority array index priority = special_event.eventPriority - 1 if _debug: LocalScheduleInterpreter._debug(" - priority: %r", priority) # look for all of the possible times for time_value in special_event.listOfTimeValues: tval = time_value.time if tval <= etime: if isinstance(time_value.value, Null): if _debug: LocalScheduleInterpreter._debug(" - relinquish exception @ %r", tval) event_priority[priority] = None next_transition_time[priority] = None else: if _debug: LocalScheduleInterpreter._debug(" - consider exception @ %r", tval) event_priority[priority] = time_value.value next_transition_time[priority] = next_day else: next_transition_time[priority] = tval break # assume the next transition will be at the start of the next day earliest_transition = next_day # check if any of the special events came up with something for priority_value, next_transition in zip(event_priority, next_transition_time): if next_transition is not None: earliest_transition = min(earliest_transition, next_transition) if priority_value is not None: if _debug: LocalScheduleInterpreter._debug(" - priority_value: %r", priority_value) return priority_value, earliest_transition # start out with the default daily_value = sched_obj.scheduleDefault # check the daily schedule if sched_obj.weeklySchedule: daily_schedule = sched_obj.weeklySchedule[edate[3]] if _debug: LocalScheduleInterpreter._debug(" - daily_schedule: %r", daily_schedule) # look for all of the possible times for time_value in daily_schedule.daySchedule: if _debug: LocalScheduleInterpreter._debug(" - time_value: %r", time_value) tval = time_value.time if tval <= etime: if isinstance(time_value.value, Null): if _debug: LocalScheduleInterpreter._debug(" - back to normal @ %r", tval) daily_value = sched_obj.scheduleDefault else: if _debug: LocalScheduleInterpreter._debug(" - new value @ %r", tval) daily_value = time_value.value else: earliest_transition = min(earliest_transition, tval) break # return what was matched, if anything return daily_value, earliest_transition
[ "def", "eval", "(", "self", ",", "edate", ",", "etime", ")", ":", "if", "_debug", ":", "LocalScheduleInterpreter", ".", "_debug", "(", "\"eval %r %r\"", ",", "edate", ",", "etime", ")", "# reference the schedule object", "sched_obj", "=", "self", ".", "sched_o...
Evaluate the schedule according to the provided date and time and return the appropriate present value, or None if not in the effective period.
[ "Evaluate", "the", "schedule", "according", "to", "the", "provided", "date", "and", "time", "and", "return", "the", "appropriate", "present", "value", "or", "None", "if", "not", "in", "the", "effective", "period", "." ]
python
train
andersinno/python-database-sanitizer
database_sanitizer/utils/mysql.py
https://github.com/andersinno/python-database-sanitizer/blob/742bc1f43526b60f322a48f18c900f94fd446ed4/database_sanitizer/utils/mysql.py#L110-L131
def decode_mysql_string_literal(text): """ Removes quotes and decodes escape sequences from given MySQL string literal returning the result. :param text: MySQL string literal, with the quotes still included. :type text: str :return: Given string literal with quotes removed and escape sequences decoded. :rtype: str """ assert text.startswith("'") assert text.endswith("'") # Ditch quotes from the string literal. text = text[1:-1] return MYSQL_STRING_ESCAPE_SEQUENCE_PATTERN.sub( unescape_single_character, text, )
[ "def", "decode_mysql_string_literal", "(", "text", ")", ":", "assert", "text", ".", "startswith", "(", "\"'\"", ")", "assert", "text", ".", "endswith", "(", "\"'\"", ")", "# Ditch quotes from the string literal.", "text", "=", "text", "[", "1", ":", "-", "1", ...
Removes quotes and decodes escape sequences from given MySQL string literal returning the result. :param text: MySQL string literal, with the quotes still included. :type text: str :return: Given string literal with quotes removed and escape sequences decoded. :rtype: str
[ "Removes", "quotes", "and", "decodes", "escape", "sequences", "from", "given", "MySQL", "string", "literal", "returning", "the", "result", "." ]
python
train
albahnsen/CostSensitiveClassification
costcla/models/regression.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/regression.py#L20-L40
def _intercept_dot(w, X): """Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. """ c = 0. if w.size == X.shape[1] + 1: c = w[-1] w = w[:-1] z = np.dot(X, w) + c return w, c, z
[ "def", "_intercept_dot", "(", "w", ",", "X", ")", ":", "c", "=", "0.", "if", "w", ".", "size", "==", "X", ".", "shape", "[", "1", "]", "+", "1", ":", "c", "=", "w", "[", "-", "1", "]", "w", "=", "w", "[", ":", "-", "1", "]", "z", "=",...
Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data.
[ "Computes", "y", "*", "np", ".", "dot", "(", "X", "w", ")", "." ]
python
train
tomnor/channelpack
channelpack/pulltxt.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pulltxt.py#L139-L173
def rows2skip(self, decdel): """ Return the number of rows to skip based on the decimal delimiter decdel. When each record start to have the same number of matches, this is where the data starts. This is the idea. And the number of consecutive records to have the same number of matches is to be EQUAL_CNT_REQ. """ if decdel == '.': ms = self.matches_p elif decdel == ',': ms = self.matches_c # else make error... cnt = row = 0 for val1, val2 in zip(ms, ms[1:]): # val2 is one element ahead. row += 1 if val2 == val1 != 0: # 0 is no matches, so it doesn't count. cnt += 1 else: cnt = 0 if cnt == EQUAL_CNT_REQ: break else: # print 'No break-out for', decdel, 'cnt:', cnt pass self.cnt = cnt return row - EQUAL_CNT_REQ
[ "def", "rows2skip", "(", "self", ",", "decdel", ")", ":", "if", "decdel", "==", "'.'", ":", "ms", "=", "self", ".", "matches_p", "elif", "decdel", "==", "','", ":", "ms", "=", "self", ".", "matches_c", "# else make error...", "cnt", "=", "row", "=", ...
Return the number of rows to skip based on the decimal delimiter decdel. When each record start to have the same number of matches, this is where the data starts. This is the idea. And the number of consecutive records to have the same number of matches is to be EQUAL_CNT_REQ.
[ "Return", "the", "number", "of", "rows", "to", "skip", "based", "on", "the", "decimal", "delimiter", "decdel", "." ]
python
train
fp12/achallonge
challonge/match.py
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/match.py#L205-L222
async def attach_url(self, url: str, description: str = None) -> Attachment: """ add an url as an attachment |methcoro| Args: url: url you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: url must not be None APIException """ return await self._attach(url=url, description=description)
[ "async", "def", "attach_url", "(", "self", ",", "url", ":", "str", ",", "description", ":", "str", "=", "None", ")", "->", "Attachment", ":", "return", "await", "self", ".", "_attach", "(", "url", "=", "url", ",", "description", "=", "description", ")"...
add an url as an attachment |methcoro| Args: url: url you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: url must not be None APIException
[ "add", "an", "url", "as", "an", "attachment" ]
python
train
pschmitt/python-opsview
opsview/opsview.py
https://github.com/pschmitt/python-opsview/blob/720acc06c491db32d18c79d20f04cae16e57a7fb/opsview/opsview.py#L441-L497
def get_args(): ''' Parse CLI args ''' parser = argparse.ArgumentParser(description='Process args') parser.Add_argument( '-H', '--host', required=True, action='store', help='Remote host to connect to' ) parser.add_argument( '-P', '--port', type=int, default=443, action='store', help='Port to connect on' ) parser.add_argument( '-u', '--user', required=True, action='store', help='User name to use when connecting to host' ) parser.add_argument( '-p', '--password', required=False, action='store', help='Password to use when connecting to host' ) parser.add_argument( '-s', '--ssl', required=False, action='store_true', help='Use SSL' ) parser.add_argument( '-k', '--skip-ssl-verification', required=False, default=False, action='store_true', help='Skip SSL certificate validation' ) parser.add_argument( '-n', '--dryrun', required=False, action='store_true', default=False, help='Dry run. Don\'t annotate any VM' ) parser.add_argument( '-v', '--verbose', action='store_true', default=False, help='Verbose output' ) return parser.parse_args()
[ "def", "get_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Process args'", ")", "parser", ".", "Add_argument", "(", "'-H'", ",", "'--host'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",...
Parse CLI args
[ "Parse", "CLI", "args" ]
python
train
PythonCharmers/python-future
src/future/standard_library/__init__.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/standard_library/__init__.py#L701-L736
def from_import(module_name, *symbol_names, **kwargs): """ Example use: >>> HTTPConnection = from_import('http.client', 'HTTPConnection') >>> HTTPServer = from_import('http.server', 'HTTPServer') >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse') Equivalent to this on Py3: >>> from module_name import symbol_names[0], symbol_names[1], ... and this on Py2: >>> from future.moves.module_name import symbol_names[0], ... or: >>> from future.backports.module_name import symbol_names[0], ... except that it also handles dotted module names such as ``http.client``. """ if PY3: return __import__(module_name) else: if 'backport' in kwargs and bool(kwargs['backport']): prefix = 'future.backports' else: prefix = 'future.moves' parts = prefix.split('.') + module_name.split('.') module = importlib.import_module(prefix + '.' + module_name) output = [getattr(module, name) for name in symbol_names] if len(output) == 1: return output[0] else: return output
[ "def", "from_import", "(", "module_name", ",", "*", "symbol_names", ",", "*", "*", "kwargs", ")", ":", "if", "PY3", ":", "return", "__import__", "(", "module_name", ")", "else", ":", "if", "'backport'", "in", "kwargs", "and", "bool", "(", "kwargs", "[", ...
Example use: >>> HTTPConnection = from_import('http.client', 'HTTPConnection') >>> HTTPServer = from_import('http.server', 'HTTPServer') >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse') Equivalent to this on Py3: >>> from module_name import symbol_names[0], symbol_names[1], ... and this on Py2: >>> from future.moves.module_name import symbol_names[0], ... or: >>> from future.backports.module_name import symbol_names[0], ... except that it also handles dotted module names such as ``http.client``.
[ "Example", "use", ":", ">>>", "HTTPConnection", "=", "from_import", "(", "http", ".", "client", "HTTPConnection", ")", ">>>", "HTTPServer", "=", "from_import", "(", "http", ".", "server", "HTTPServer", ")", ">>>", "urlopen", "urlparse", "=", "from_import", "("...
python
train
SBRG/ssbio
ssbio/pipeline/gempro.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L278-L280
def genes_with_a_representative_sequence(self): """DictList: All genes with a representative sequence.""" return DictList(x for x in self.genes if x.protein.representative_sequence)
[ "def", "genes_with_a_representative_sequence", "(", "self", ")", ":", "return", "DictList", "(", "x", "for", "x", "in", "self", ".", "genes", "if", "x", ".", "protein", ".", "representative_sequence", ")" ]
DictList: All genes with a representative sequence.
[ "DictList", ":", "All", "genes", "with", "a", "representative", "sequence", "." ]
python
train
mrcagney/kml2geojson
kml2geojson/main.py
https://github.com/mrcagney/kml2geojson/blob/6c4720f2b1327d636e15ce397dd808c9df8580a5/kml2geojson/main.py#L539-L589
def convert(kml_path, output_dir, separate_folders=False, style_type=None, style_filename='style.json'): """ Given a path to a KML file, convert it to one or several GeoJSON FeatureCollection files and save the result(s) to the given output directory. If not ``separate_folders`` (the default), then create one GeoJSON file. Otherwise, create several GeoJSON files, one for each folder in the KML file that contains geodata or that has a descendant node that contains geodata. Warning: this can produce GeoJSON files with the same geodata in case the KML file has nested folders with geodata. If a ``style_type`` is given, then also build a JSON style file of the given style type and save it to the output directory under the name given by ``style_filename``. """ # Create absolute paths kml_path = Path(kml_path).resolve() output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() output_dir = output_dir.resolve() # Parse KML with kml_path.open(encoding='utf-8', errors='ignore') as src: kml_str = src.read() root = md.parseString(kml_str) # Build GeoJSON layers if separate_folders: layers = build_layers(root) else: layers = [build_feature_collection(root, name=kml_path.stem)] # Create filenames for layers filenames = disambiguate( [to_filename(layer['name']) for layer in layers]) filenames = [name + '.geojson' for name in filenames] # Write layers to files for i in range(len(layers)): path = output_dir/filenames[i] with path.open('w') as tgt: json.dump(layers[i], tgt) # Build and export style file if desired if style_type is not None: if style_type not in STYLE_TYPES: raise ValueError('style type must be one of {!s}'.format( STYLE_TYPES)) builder_name = 'build_{!s}_style'.format(style_type) style_dict = globals()[builder_name](root) path = output_dir/style_filename with path.open('w') as tgt: json.dump(style_dict, tgt)
[ "def", "convert", "(", "kml_path", ",", "output_dir", ",", "separate_folders", "=", "False", ",", "style_type", "=", "None", ",", "style_filename", "=", "'style.json'", ")", ":", "# Create absolute paths", "kml_path", "=", "Path", "(", "kml_path", ")", ".", "r...
Given a path to a KML file, convert it to one or several GeoJSON FeatureCollection files and save the result(s) to the given output directory. If not ``separate_folders`` (the default), then create one GeoJSON file. Otherwise, create several GeoJSON files, one for each folder in the KML file that contains geodata or that has a descendant node that contains geodata. Warning: this can produce GeoJSON files with the same geodata in case the KML file has nested folders with geodata. If a ``style_type`` is given, then also build a JSON style file of the given style type and save it to the output directory under the name given by ``style_filename``.
[ "Given", "a", "path", "to", "a", "KML", "file", "convert", "it", "to", "one", "or", "several", "GeoJSON", "FeatureCollection", "files", "and", "save", "the", "result", "(", "s", ")", "to", "the", "given", "output", "directory", "." ]
python
train
i3visio/osrframework
osrframework/utils/configuration.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/configuration.py#L106-L195
def returnListOfConfigurationValues(util): """ Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration. """ VALUES = {} # If a api_keys.cfg has not been found, creating it by copying from default configPath = os.path.join(getConfigPath()["appPath"], "general.cfg") # Checking if the configuration file exists if not os.path.exists(configPath): # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); # Reading the configuration file config = ConfigParser.ConfigParser() config.read(configPath) LISTS = ["tlds", "domains", "platforms", "extension", "exclude_platforms", "exclude_domains"] # Iterating through all the sections, which contain the platforms for section in config.sections(): incomplete = False if section.lower() == util.lower(): # Iterating through parameters for (param, value) in config.items(section): if value == '': # Manually setting an empty value if param in LISTS: value = [] else: value = "" # Splitting the parameters to create the arrays when needed elif param in LISTS: value = value.split(' ') # Converting threads to int elif param == "threads": try: value = int(value) except Exception as err: raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) elif param == "debug": try: if int(value) == 0: value = False else: value = True except Exception as err: print("Something happened when processing this debug option. Resetting to default.") # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); #raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) VALUES[param] = value break return VALUES
[ "def", "returnListOfConfigurationValues", "(", "util", ")", ":", "VALUES", "=", "{", "}", "# If a api_keys.cfg has not been found, creating it by copying from default", "configPath", "=", "os", ".", "path", ".", "join", "(", "getConfigPath", "(", ")", "[", "\"appPath\""...
Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration.
[ "Method", "that", "recovers", "the", "configuration", "information", "about", "each", "program" ]
python
train
pkgw/pwkit
pwkit/synphot.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/synphot.py#L485-L493
def register_bpass(self, telescope, klass): """Register a Bandpass class.""" if telescope in self._bpass_classes: raise AlreadyDefinedError('bandpass class for %s already ' 'defined', telescope) self._note(telescope, None) self._bpass_classes[telescope] = klass return self
[ "def", "register_bpass", "(", "self", ",", "telescope", ",", "klass", ")", ":", "if", "telescope", "in", "self", ".", "_bpass_classes", ":", "raise", "AlreadyDefinedError", "(", "'bandpass class for %s already '", "'defined'", ",", "telescope", ")", "self", ".", ...
Register a Bandpass class.
[ "Register", "a", "Bandpass", "class", "." ]
python
train
quintusdias/glymur
glymur/codestream.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/codestream.py#L244-L268
def _parse_reserved_segment(self, fptr): """Parse valid marker segment, segment description is unknown. Parameters ---------- fptr : file object The file to parse. Returns ------- Segment The current segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) if length > 0: data = fptr.read(length - 2) else: data = None segment = Segment(marker_id='0x{0:x}'.format(self._marker_id), offset=offset, length=length, data=data) return segment
[ "def", "_parse_reserved_segment", "(", "self", ",", "fptr", ")", ":", "offset", "=", "fptr", ".", "tell", "(", ")", "-", "2", "read_buffer", "=", "fptr", ".", "read", "(", "2", ")", "length", ",", "=", "struct", ".", "unpack", "(", "'>H'", ",", "re...
Parse valid marker segment, segment description is unknown. Parameters ---------- fptr : file object The file to parse. Returns ------- Segment The current segment.
[ "Parse", "valid", "marker", "segment", "segment", "description", "is", "unknown", "." ]
python
train
spyder-ide/spyder
spyder/utils/syntaxhighlighters.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/syntaxhighlighters.py#L1220-L1230
def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
[ "def", "highlightBlock", "(", "self", ",", "text", ")", ":", "# Note that an undefined blockstate is equal to -1, so the first block\r", "# will have the correct behaviour of starting at 0.\r", "if", "self", ".", "_allow_highlight", ":", "start", "=", "self", ".", "previousBloc...
Actually highlight the block
[ "Actually", "highlight", "the", "block" ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L703-L740
def remove_entry(self, pathname_name, recursive=True): """Removes the specified child file or directory. Args: pathname_name: Basename of the child object to remove. recursive: If True (default), the entries in contained directories are deleted first. Used to propagate removal errors (e.g. permission problems) from contained entries. Raises: KeyError: if no child exists by the specified name. OSError: if user lacks permission to delete the file, or (Windows only) the file is open. """ pathname_name = self._normalized_entryname(pathname_name) entry = self.get_entry(pathname_name) if self.filesystem.is_windows_fs: if entry.st_mode & PERM_WRITE == 0: self.filesystem.raise_os_error(errno.EACCES, pathname_name) if self.filesystem.has_open_file(entry): self.filesystem.raise_os_error(errno.EACCES, pathname_name) else: if (not is_root() and (self.st_mode & (PERM_WRITE | PERM_EXE) != PERM_WRITE | PERM_EXE)): self.filesystem.raise_os_error(errno.EACCES, pathname_name) if recursive and isinstance(entry, FakeDirectory): while entry.contents: entry.remove_entry(list(entry.contents)[0]) elif entry.st_nlink == 1: self.filesystem.change_disk_usage( -entry.size, pathname_name, entry.st_dev) self.st_nlink -= 1 entry.st_nlink -= 1 assert entry.st_nlink >= 0 del self.contents[pathname_name]
[ "def", "remove_entry", "(", "self", ",", "pathname_name", ",", "recursive", "=", "True", ")", ":", "pathname_name", "=", "self", ".", "_normalized_entryname", "(", "pathname_name", ")", "entry", "=", "self", ".", "get_entry", "(", "pathname_name", ")", "if", ...
Removes the specified child file or directory. Args: pathname_name: Basename of the child object to remove. recursive: If True (default), the entries in contained directories are deleted first. Used to propagate removal errors (e.g. permission problems) from contained entries. Raises: KeyError: if no child exists by the specified name. OSError: if user lacks permission to delete the file, or (Windows only) the file is open.
[ "Removes", "the", "specified", "child", "file", "or", "directory", "." ]
python
train
sbjorn/vici
vici/session.py
https://github.com/sbjorn/vici/blob/147135905b68892734b09ec8a569c71733648090/vici/session.py#L357-L373
def _read(self): """Get next packet from transport. :return: parsed packet in a tuple with message type and payload :rtype: :py:class:`collections.namedtuple` """ raw_response = self.transport.receive() response = Packet.parse(raw_response) # FIXME if response.response_type == Packet.EVENT and response.event_type == "log": # queue up any debug log messages, and get next self.log_events.append(response) # do something? self._read() else: return response
[ "def", "_read", "(", "self", ")", ":", "raw_response", "=", "self", ".", "transport", ".", "receive", "(", ")", "response", "=", "Packet", ".", "parse", "(", "raw_response", ")", "# FIXME", "if", "response", ".", "response_type", "==", "Packet", ".", "EV...
Get next packet from transport. :return: parsed packet in a tuple with message type and payload :rtype: :py:class:`collections.namedtuple`
[ "Get", "next", "packet", "from", "transport", "." ]
python
train
intake/intake
intake/config.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/config.py#L79-L90
def intake_path_dirs(path): """Return a list of directories from the intake path. If a string, perhaps taken from an environment variable, then the list of paths will be split on the character ":" for posix of ";" for windows. Protocol indicators ("protocol://") will be ignored. """ if isinstance(path, (list, tuple)): return path import re pattern = re.compile(";" if os.name == 'nt' else r"(?<!:):(?![:/])") return pattern.split(path)
[ "def", "intake_path_dirs", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "path", "import", "re", "pattern", "=", "re", ".", "compile", "(", "\";\"", "if", "os", ".", "name", "==", "...
Return a list of directories from the intake path. If a string, perhaps taken from an environment variable, then the list of paths will be split on the character ":" for posix of ";" for windows. Protocol indicators ("protocol://") will be ignored.
[ "Return", "a", "list", "of", "directories", "from", "the", "intake", "path", "." ]
python
train