repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mikedh/trimesh
trimesh/visual/color.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/visual/color.py#L703-L737
def interpolate(values, color_map=None, dtype=np.uint8): """ Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors """ # get a color interpolation function if color_map is None: cmap = linear_color_map else: from matplotlib.pyplot import get_cmap cmap = get_cmap(color_map) # make input always float values = np.asanyarray(values, dtype=np.float64).ravel() # scale values to 0.0 - 1.0 and get colors colors = cmap((values - values.min()) / values.ptp()) # convert to 0-255 RGBA rgba = to_rgba(colors, dtype=dtype) return rgba
[ "def", "interpolate", "(", "values", ",", "color_map", "=", "None", ",", "dtype", "=", "np", ".", "uint8", ")", ":", "# get a color interpolation function", "if", "color_map", "is", "None", ":", "cmap", "=", "linear_color_map", "else", ":", "from", "matplotlib...
Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors
[ "Given", "a", "1D", "list", "of", "values", "return", "interpolated", "colors", "for", "the", "range", "." ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L2732-L2775
def _tree_load_link(self, new_traj_node, load_data, traj, as_new, hdf5_soft_link): """ Loads a link :param new_traj_node: Node in traj containing link :param load_data: How to load data in the linked node :param traj: The trajectory :param as_new: If data in linked node should be loaded as new :param hdf5_soft_link: The hdf5 soft link """ try: linked_group = hdf5_soft_link() link_name = hdf5_soft_link._v_name if (not link_name in new_traj_node._links or load_data==pypetconstants.OVERWRITE_DATA): link_location = linked_group._v_pathname full_name = '.'.join(link_location.split('/')[2:]) if not full_name in traj: self._tree_load_sub_branch(traj, full_name, load_data=pypetconstants.LOAD_SKELETON, with_links=False, recursive=False, _trajectory=traj, _as_new=as_new, _hdf5_group=self._trajectory_group) if (load_data == pypetconstants.OVERWRITE_DATA and link_name in new_traj_node._links): new_traj_node.f_remove_link(link_name) if not link_name in new_traj_node._links: new_traj_node._nn_interface._add_generic(new_traj_node, type_name=nn.LINK, group_type_name=nn.GROUP, args=(link_name, traj.f_get(full_name)), kwargs={}, add_prefix=False, check_naming=False) else: raise RuntimeError('You shall not pass!') except pt.NoSuchNodeError: self._logger.error('Link `%s` under `%s` is broken, cannot load it, ' 'I will ignore it, you have to ' 'manually delete it!' % (hdf5_soft_link._v_name, new_traj_node.v_full_name))
[ "def", "_tree_load_link", "(", "self", ",", "new_traj_node", ",", "load_data", ",", "traj", ",", "as_new", ",", "hdf5_soft_link", ")", ":", "try", ":", "linked_group", "=", "hdf5_soft_link", "(", ")", "link_name", "=", "hdf5_soft_link", ".", "_v_name", "if", ...
Loads a link :param new_traj_node: Node in traj containing link :param load_data: How to load data in the linked node :param traj: The trajectory :param as_new: If data in linked node should be loaded as new :param hdf5_soft_link: The hdf5 soft link
[ "Loads", "a", "link", ":", "param", "new_traj_node", ":", "Node", "in", "traj", "containing", "link", ":", "param", "load_data", ":", "How", "to", "load", "data", "in", "the", "linked", "node", ":", "param", "traj", ":", "The", "trajectory", ":", "param"...
python
test
apache/spark
python/pyspark/broadcast.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L135-L148
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
[ "def", "value", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_value\"", ")", "and", "self", ".", "_path", "is", "not", "None", ":", "# we only need to decrypt it here when encryption is enabled and", "# if its on the driver, since executor decrypt...
Return the broadcasted value
[ "Return", "the", "broadcasted", "value" ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/finished/piecewise.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/piecewise.py#L178-L217
def convex_comb_agg_log(model,a,b): """convex_comb_agg_log -- add piecewise relation with a logarithmic number of binary variables using the convex combination formulation -- non-disaggregated. Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 G = int(math.ceil((math.log(K)/math.log(2)))) # number of required bits w,g = {},{} for k in range(K+1): w[k] = model.addVar(lb=0, ub=1, vtype="C") for j in range(G): g[j] = model.addVar(vtype="B") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") model.addCons(X == quicksum(a[k]*w[k] for k in range(K+1))) model.addCons(Y == quicksum(b[k]*w[k] for k in range(K+1))) model.addCons(quicksum(w[k] for k in range(K+1)) == 1) # binary variables setup for j in range(G): zeros,ones = [0],[] # print(j,"\tinit zeros:",zeros,"ones:",ones for k in range(1,K+1): # print(j,k,"\t>zeros:",zeros,"ones:",ones if (1 & gray(k)>>j) == 1 and (1 & gray(k-1)>>j) == 1: ones.append(k) if (1 & gray(k)>>j) == 0 and (1 & gray(k-1)>>j) == 0: zeros.append(k) # print(j,k,"\tzeros>:",zeros,"ones:",ones # print(j,"\tzeros:",zeros,"ones:",ones model.addCons(quicksum(w[k] for k in ones) <= g[j]) model.addCons(quicksum(w[k] for k in zeros) <= 1-g[j]) return X,Y,w
[ "def", "convex_comb_agg_log", "(", "model", ",", "a", ",", "b", ")", ":", "K", "=", "len", "(", "a", ")", "-", "1", "G", "=", "int", "(", "math", ".", "ceil", "(", "(", "math", ".", "log", "(", "K", ")", "/", "math", ".", "log", "(", "2", ...
convex_comb_agg_log -- add piecewise relation with a logarithmic number of binary variables using the convex combination formulation -- non-disaggregated. Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z.
[ "convex_comb_agg_log", "--", "add", "piecewise", "relation", "with", "a", "logarithmic", "number", "of", "binary", "variables", "using", "the", "convex", "combination", "formulation", "--", "non", "-", "disaggregated", ".", "Parameters", ":", "-", "model", ":", ...
python
train
python-openxml/python-docx
docx/image/png.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/png.py#L251-L258
def from_offset(cls, chunk_type, stream_rdr, offset): """ Return an _IHDRChunk instance containing the image dimensions extracted from the IHDR chunk in *stream* at *offset*. """ px_width = stream_rdr.read_long(offset) px_height = stream_rdr.read_long(offset, 4) return cls(chunk_type, px_width, px_height)
[ "def", "from_offset", "(", "cls", ",", "chunk_type", ",", "stream_rdr", ",", "offset", ")", ":", "px_width", "=", "stream_rdr", ".", "read_long", "(", "offset", ")", "px_height", "=", "stream_rdr", ".", "read_long", "(", "offset", ",", "4", ")", "return", ...
Return an _IHDRChunk instance containing the image dimensions extracted from the IHDR chunk in *stream* at *offset*.
[ "Return", "an", "_IHDRChunk", "instance", "containing", "the", "image", "dimensions", "extracted", "from", "the", "IHDR", "chunk", "in", "*", "stream", "*", "at", "*", "offset", "*", "." ]
python
train
symphonyoss/python-symphony
symphony/Pod/users.py
https://github.com/symphonyoss/python-symphony/blob/b939f35fbda461183ec0c01790c754f89a295be0/symphony/Pod/users.py#L28-L35
def get_user_id_by_user(self, username): ''' get user id by username ''' response, status_code = self.__pod__.Users.get_v2_user( sessionToken=self.__session__, username=username ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
[ "def", "get_user_id_by_user", "(", "self", ",", "username", ")", ":", "response", ",", "status_code", "=", "self", ".", "__pod__", ".", "Users", ".", "get_v2_user", "(", "sessionToken", "=", "self", ".", "__session__", ",", "username", "=", "username", ")", ...
get user id by username
[ "get", "user", "id", "by", "username" ]
python
train
openfisca/openfisca-core
openfisca_core/periods.py
https://github.com/openfisca/openfisca-core/blob/92ce9396e29ae5d9bac5ea604cfce88517c6b35c/openfisca_core/periods.py#L569-L575
def contains(self, other): """ Returns ``True`` if the period contains ``other``. For instance, ``period(2015)`` contains ``period(2015-01)`` """ if not isinstance(other, Period): other = period(other) return self.start <= other.start and self.stop >= other.stop
[ "def", "contains", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Period", ")", ":", "other", "=", "period", "(", "other", ")", "return", "self", ".", "start", "<=", "other", ".", "start", "and", "self", ".", "s...
Returns ``True`` if the period contains ``other``. For instance, ``period(2015)`` contains ``period(2015-01)``
[ "Returns", "True", "if", "the", "period", "contains", "other", ".", "For", "instance", "period", "(", "2015", ")", "contains", "period", "(", "2015", "-", "01", ")" ]
python
train
rochacbruno/flasgger
flasgger/base.py
https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/flasgger/base.py#L620-L673
def validate( self, schema_id, validation_function=None, validation_error_handler=None): """ A decorator that is used to validate incoming requests data against a schema swagger = Swagger(app) @app.route('/pets', methods=['POST']) @swagger.validate('Pet') @swag_from("pet_post_endpoint.yml") def post(): return db.insert(request.data) This annotation only works if the endpoint is already swagged, i.e. placing @swag_from above @validate or not declaring the swagger specifications in the method's docstring *won't work* Naturally, if you use @app.route annotation it still needs to be the outermost annotation :param schema_id: the id of the schema with which the data will be validated :param validation_function: custom validation function which takes the positional arguments: data to be validated at first and schema to validate against at second :param validation_error_handler: custom function to handle exceptions thrown when validating which takes the exception thrown as the first, the data being validated as the second and the schema being used to validate as the third argument """ if validation_function is None: validation_function = self.validation_function if validation_error_handler is None: validation_error_handler = self.validation_error_handler def decorator(func): @wraps(func) def wrapper(*args, **kwargs): specs = get_schema_specs(schema_id, self) validate( schema_id=schema_id, specs=specs, validation_function=validation_function, validation_error_handler=validation_error_handler) return func(*args, **kwargs) return wrapper return decorator
[ "def", "validate", "(", "self", ",", "schema_id", ",", "validation_function", "=", "None", ",", "validation_error_handler", "=", "None", ")", ":", "if", "validation_function", "is", "None", ":", "validation_function", "=", "self", ".", "validation_function", "if",...
A decorator that is used to validate incoming requests data against a schema swagger = Swagger(app) @app.route('/pets', methods=['POST']) @swagger.validate('Pet') @swag_from("pet_post_endpoint.yml") def post(): return db.insert(request.data) This annotation only works if the endpoint is already swagged, i.e. placing @swag_from above @validate or not declaring the swagger specifications in the method's docstring *won't work* Naturally, if you use @app.route annotation it still needs to be the outermost annotation :param schema_id: the id of the schema with which the data will be validated :param validation_function: custom validation function which takes the positional arguments: data to be validated at first and schema to validate against at second :param validation_error_handler: custom function to handle exceptions thrown when validating which takes the exception thrown as the first, the data being validated as the second and the schema being used to validate as the third argument
[ "A", "decorator", "that", "is", "used", "to", "validate", "incoming", "requests", "data", "against", "a", "schema" ]
python
train
jrderuiter/pybiomart
src/pybiomart/server.py
https://github.com/jrderuiter/pybiomart/blob/7802d45fe88549ab0512d6f37f815fc43b172b39/src/pybiomart/server.py#L64-L76
def list_marts(self): """Lists available marts in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available marts. """ def _row_gen(attributes): for attr in attributes.values(): yield (attr.name, attr.display_name) return pd.DataFrame.from_records( _row_gen(self.marts), columns=['name', 'display_name'])
[ "def", "list_marts", "(", "self", ")", ":", "def", "_row_gen", "(", "attributes", ")", ":", "for", "attr", "in", "attributes", ".", "values", "(", ")", ":", "yield", "(", "attr", ".", "name", ",", "attr", ".", "display_name", ")", "return", "pd", "."...
Lists available marts in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available marts.
[ "Lists", "available", "marts", "in", "a", "readable", "DataFrame", "format", "." ]
python
train
kyper-data/python-highcharts
highcharts/highmaps/highmaps.py
https://github.com/kyper-data/python-highcharts/blob/a4c488ae5c2e125616efad5a722f3dfd8a9bc450/highcharts/highmaps/highmaps.py#L201-L223
def add_data_set(self, data, series_type="map", name=None, is_coordinate = False, **kwargs): """set data for series option in highmaps """ self.data_set_count += 1 if not name: name = "Series %d" % self.data_set_count kwargs.update({'name':name}) if is_coordinate: self.data_is_coordinate = True self.add_JSsource('https://cdnjs.cloudflare.com/ajax/libs/proj4js/2.3.6/proj4.js') if self.map and not self.data_temp: series_data = Series([], series_type='map', **{'mapData': self.map}) series_data.__options__().update(SeriesOptions(series_type='map', **{'mapData': self.map}).__options__()) self.data_temp.append(series_data) if self.map and 'mapData' in kwargs.keys(): kwargs.update({'mapData': self.map}) series_data = Series(data, series_type=series_type, **kwargs) series_data.__options__().update(SeriesOptions(series_type=series_type, **kwargs).__options__()) self.data_temp.append(series_data)
[ "def", "add_data_set", "(", "self", ",", "data", ",", "series_type", "=", "\"map\"", ",", "name", "=", "None", ",", "is_coordinate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "data_set_count", "+=", "1", "if", "not", "name", ":", ...
set data for series option in highmaps
[ "set", "data", "for", "series", "option", "in", "highmaps" ]
python
train
jbrudvik/yahooscraper
yahooscraper/login.py
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/login.py#L18-L37
def authenticated_session(username, password): """ Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails. """ session = requests.Session() session.headers.update(headers()) response = session.get(url()) login_path = path(response.text) login_url = urljoin(response.url, login_path) login_post_data = post_data(response.text, username, password) response = session.post(login_url, data=login_post_data) if response.headers['connection'] == 'close': raise Exception('Authencation failed') return session
[ "def", "authenticated_session", "(", "username", ",", "password", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "headers", ".", "update", "(", "headers", "(", ")", ")", "response", "=", "session", ".", "get", "(", "url",...
Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails.
[ "Given", "username", "and", "password", "return", "an", "authenticated", "Yahoo", "requests", "session", "that", "can", "be", "used", "for", "further", "scraping", "requests", "." ]
python
train
ttroy50/pyephember
pyephember/pyephember.py
https://github.com/ttroy50/pyephember/blob/3ee159ee82b926b957dae8dcbc7a4bfb6807a9b4/pyephember/pyephember.py#L319-L332
def activate_boost_by_name(self, zone_name, target_temperature, num_hours=1): """ Activate boost by the name of the zone """ zone = self.get_zone(zone_name) if zone is None: raise RuntimeError("Unknown zone") return self.activate_boost_by_id(zone["zoneId"], target_temperature, num_hours)
[ "def", "activate_boost_by_name", "(", "self", ",", "zone_name", ",", "target_temperature", ",", "num_hours", "=", "1", ")", ":", "zone", "=", "self", ".", "get_zone", "(", "zone_name", ")", "if", "zone", "is", "None", ":", "raise", "RuntimeError", "(", "\"...
Activate boost by the name of the zone
[ "Activate", "boost", "by", "the", "name", "of", "the", "zone" ]
python
train
bootphon/h5features
h5features/h5features.py
https://github.com/bootphon/h5features/blob/d5f95db0f1cee58ac1ba4575d1212e796c39e1f9/h5features/h5features.py#L161-L165
def simple_write(filename, group, times, features, properties=None, item='item', mode='a'): """Simplified version of `write()` when there is only one item.""" write(filename, group, [item], [times], [features], mode=mode, properties=[properties] if properties is not None else None)
[ "def", "simple_write", "(", "filename", ",", "group", ",", "times", ",", "features", ",", "properties", "=", "None", ",", "item", "=", "'item'", ",", "mode", "=", "'a'", ")", ":", "write", "(", "filename", ",", "group", ",", "[", "item", "]", ",", ...
Simplified version of `write()` when there is only one item.
[ "Simplified", "version", "of", "write", "()", "when", "there", "is", "only", "one", "item", "." ]
python
train
grycap/cpyutils
evaluate.py
https://github.com/grycap/cpyutils/blob/fa966fc6d2ae1e1e799e19941561aa79b617f1b1/evaluate.py#L616-L630
def p_term_var(self, p): ''' term : VAR ''' _LOGGER.debug("term -> VAR") # TODO: determine the type of the var if p[1] not in self._VAR_VALUES: if self._autodefine_vars: self._VAR_VALUES[p[1]] = TypedClass(None, TypedClass.UNKNOWN) if p[1] in self._VAR_VALUES: _LOGGER.debug("term -> VAR") p[0] = self._VAR_VALUES[p[1]] else: raise UndefinedVar()
[ "def", "p_term_var", "(", "self", ",", "p", ")", ":", "_LOGGER", ".", "debug", "(", "\"term -> VAR\"", ")", "# TODO: determine the type of the var", "if", "p", "[", "1", "]", "not", "in", "self", ".", "_VAR_VALUES", ":", "if", "self", ".", "_autodefine_vars"...
term : VAR
[ "term", ":", "VAR" ]
python
train
scanny/python-pptx
pptx/dml/fill.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/dml/fill.py#L67-L78
def gradient(self): """Sets the fill type to gradient. If the fill is not already a gradient, a default gradient is added. The default gradient corresponds to the default in the built-in PowerPoint "White" template. This gradient is linear at angle 90-degrees (upward), with two stops. The first stop is Accent-1 with tint 100%, shade 100%, and satMod 130%. The second stop is Accent-1 with tint 50%, shade 100%, and satMod 350%. """ gradFill = self._xPr.get_or_change_to_gradFill() self._fill = _GradFill(gradFill)
[ "def", "gradient", "(", "self", ")", ":", "gradFill", "=", "self", ".", "_xPr", ".", "get_or_change_to_gradFill", "(", ")", "self", ".", "_fill", "=", "_GradFill", "(", "gradFill", ")" ]
Sets the fill type to gradient. If the fill is not already a gradient, a default gradient is added. The default gradient corresponds to the default in the built-in PowerPoint "White" template. This gradient is linear at angle 90-degrees (upward), with two stops. The first stop is Accent-1 with tint 100%, shade 100%, and satMod 130%. The second stop is Accent-1 with tint 50%, shade 100%, and satMod 350%.
[ "Sets", "the", "fill", "type", "to", "gradient", "." ]
python
train
jaraco/wolframalpha
wolframalpha/__init__.py
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/__init__.py#L206-L215
def results(self): """ The pods that hold the response to a simple, discrete query. """ return ( pod for pod in self.pods if pod.primary or pod.title == 'Result' )
[ "def", "results", "(", "self", ")", ":", "return", "(", "pod", "for", "pod", "in", "self", ".", "pods", "if", "pod", ".", "primary", "or", "pod", ".", "title", "==", "'Result'", ")" ]
The pods that hold the response to a simple, discrete query.
[ "The", "pods", "that", "hold", "the", "response", "to", "a", "simple", "discrete", "query", "." ]
python
test
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L172-L180
def is_expired(self): """ ``True`` if the signature has an expiration date, and is expired. Otherwise, ``False`` """ expires_at = self.expires_at if expires_at is not None and expires_at != self.created: return expires_at < datetime.utcnow() return False
[ "def", "is_expired", "(", "self", ")", ":", "expires_at", "=", "self", ".", "expires_at", "if", "expires_at", "is", "not", "None", "and", "expires_at", "!=", "self", ".", "created", ":", "return", "expires_at", "<", "datetime", ".", "utcnow", "(", ")", "...
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
[ "True", "if", "the", "signature", "has", "an", "expiration", "date", "and", "is", "expired", ".", "Otherwise", "False" ]
python
train
smdabdoub/phylotoast
bin/filter_rep_set.py
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/filter_rep_set.py#L32-L47
def filter_rep_set(inF, otuSet): """ Parse the rep set file and remove all sequences not associated with unique OTUs. :@type inF: file :@param inF: The representative sequence set :@rtype: list :@return: The set of sequences associated with unique OTUs """ seqs = [] for record in SeqIO.parse(inF, "fasta"): if record.id in otuSet: seqs.append(record) return seqs
[ "def", "filter_rep_set", "(", "inF", ",", "otuSet", ")", ":", "seqs", "=", "[", "]", "for", "record", "in", "SeqIO", ".", "parse", "(", "inF", ",", "\"fasta\"", ")", ":", "if", "record", ".", "id", "in", "otuSet", ":", "seqs", ".", "append", "(", ...
Parse the rep set file and remove all sequences not associated with unique OTUs. :@type inF: file :@param inF: The representative sequence set :@rtype: list :@return: The set of sequences associated with unique OTUs
[ "Parse", "the", "rep", "set", "file", "and", "remove", "all", "sequences", "not", "associated", "with", "unique", "OTUs", "." ]
python
train
nitely/django-hooks
hooks/formhook.py
https://github.com/nitely/django-hooks/blob/26ea2150c9be110e90b9ee60fbfd1065ac30ab1d/hooks/formhook.py#L41-L53
def save(self, *args, **kwargs): """ Save all the forms :param \*args: Positional arguments passed to the forms :param \*\*kwargs: Keyword arguments passed to the forms :return: Sequence of returned values by all the forms as tuples of (instance, result) :rtype: list """ return [ (form, form.save(*args, **kwargs)) for form in self.instances ]
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "(", "form", ",", "form", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "for", "form", "in", "self", ".", "instances", "]" ]
Save all the forms :param \*args: Positional arguments passed to the forms :param \*\*kwargs: Keyword arguments passed to the forms :return: Sequence of returned values by all the forms as tuples of (instance, result) :rtype: list
[ "Save", "all", "the", "forms" ]
python
train
d0c-s4vage/pfp
pfp/interp.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L366-L373
def get_local(self, name, recurse=True): """Get the local field (search for it) from the scope stack. An alias for ``get_var`` :name: The name of the local field """ self._dlog("getting local '{}'".format(name)) return self._search("vars", name, recurse)
[ "def", "get_local", "(", "self", ",", "name", ",", "recurse", "=", "True", ")", ":", "self", ".", "_dlog", "(", "\"getting local '{}'\"", ".", "format", "(", "name", ")", ")", "return", "self", ".", "_search", "(", "\"vars\"", ",", "name", ",", "recurs...
Get the local field (search for it) from the scope stack. An alias for ``get_var`` :name: The name of the local field
[ "Get", "the", "local", "field", "(", "search", "for", "it", ")", "from", "the", "scope", "stack", ".", "An", "alias", "for", "get_var" ]
python
train
python-diamond/Diamond
src/collectors/nvidia_gpu/nvidia_gpu.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/nvidia_gpu/nvidia_gpu.py#L81-L117
def collect_via_pynvml(self, stats_config): """ Use pynvml python binding to collect metrics :param stats_config: :return: """ try: NVML_TEMPERATURE_GPU = 0 pynvml.nvmlInit() device_count = pynvml.nvmlDeviceGetCount() for device_index in xrange(device_count): handle = pynvml.nvmlDeviceGetHandleByIndex(device_index) memoryInfo = pynvml.nvmlDeviceGetMemoryInfo(handle) utilizationRates = pynvml.nvmlDeviceGetUtilizationRates(handle) metrics = { 'memory.total': memoryInfo.total / 1024 / 1024, 'memory.used': memoryInfo.total / 1024 / 1024, 'memory.free': memoryInfo.free / 1024 / 1024, 'utilization.gpu': utilizationRates.gpu, 'utilization.memory': utilizationRates.memory, 'temperature.gpu': pynvml.nvmlDeviceGetTemperature(handle, NVML_TEMPERATURE_GPU) } for stat_name in stats_config[1:]: metric = metrics.get(stat_name) if metric: metric_name = 'gpu_{index}.{stat_name}'.format( index=str(device_index), stat_name=stat_name ) self.publish(metric_name, metric) finally: pynvml.nvmlShutdown()
[ "def", "collect_via_pynvml", "(", "self", ",", "stats_config", ")", ":", "try", ":", "NVML_TEMPERATURE_GPU", "=", "0", "pynvml", ".", "nvmlInit", "(", ")", "device_count", "=", "pynvml", ".", "nvmlDeviceGetCount", "(", ")", "for", "device_index", "in", "xrange...
Use pynvml python binding to collect metrics :param stats_config: :return:
[ "Use", "pynvml", "python", "binding", "to", "collect", "metrics", ":", "param", "stats_config", ":", ":", "return", ":" ]
python
train
cjdrake/pyeda
pyeda/parsing/pla.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/pla.py#L50-L151
def parse(s): """ Parse an input string in PLA format, and return an intermediate representation dict. Parameters ---------- s : str String containing a PLA. Returns ------- A dict with all PLA information: =============== ============ ================================= Key Value type Value description =============== ============ ================================= ninputs int Number of inputs noutputs int Number of outputs input_labels list Input variable names output_labels list Output function names intype int Cover type: {F, R, FD, FR, DR, FDR} cover set Implicant table =============== ============ ================================= """ d = dict(ninputs=None, noutputs=None, input_labels=None, output_labels=None, intype=None, cover=set()) lines = [line.strip() for line in s.splitlines()] for i, line in enumerate(lines, start=1): # skip comments if not line or _COMMENT.match(line): continue # .i m_in = _NINS.match(line) if m_in: if d['ninputs'] is None: d['ninputs'] = int(m_in.group(1)) continue else: raise Error(".i declared more than once") # .o m_out = _NOUTS.match(line) if m_out: if d['noutputs'] is None: d['noutputs'] = int(m_out.group(1)) continue else: raise Error(".o declared more than once") # ignore .p m_prod = _PROD.match(line) if m_prod: continue # .ilb m_ilb = _ILB.match(line) if m_ilb: if d['input_labels'] is None: d['input_labels'] = m_ilb.group(1).split() continue else: raise Error(".ilb declared more than once") # .ob m_ob = _OB.match(line) if m_ob: if d['output_labels'] is None: d['output_labels'] = m_ob.group(1).split() continue else: raise Error(".ob declared more than once") # .type m_type = _TYPE.match(line) if m_type: if d['intype'] is None: d['intype'] = _TYPES[m_type.group(1)] continue else: raise Error(".type declared more tha once") # cube m_cube = _CUBE.match(line) if m_cube: inputs, outputs = m_cube.groups() invec = tuple(_INCODE[c] for c in inputs) outvec = tuple(_OUTCODE[c] for c in outputs) d['cover'].add((invec, outvec)) continue # ignore .e m_end = _END.match(line) if m_end: continue raise Error("syntax error on line {}: {}".format(i, line)) return d
[ "def", "parse", "(", "s", ")", ":", "d", "=", "dict", "(", "ninputs", "=", "None", ",", "noutputs", "=", "None", ",", "input_labels", "=", "None", ",", "output_labels", "=", "None", ",", "intype", "=", "None", ",", "cover", "=", "set", "(", ")", ...
Parse an input string in PLA format, and return an intermediate representation dict. Parameters ---------- s : str String containing a PLA. Returns ------- A dict with all PLA information: =============== ============ ================================= Key Value type Value description =============== ============ ================================= ninputs int Number of inputs noutputs int Number of outputs input_labels list Input variable names output_labels list Output function names intype int Cover type: {F, R, FD, FR, DR, FDR} cover set Implicant table =============== ============ =================================
[ "Parse", "an", "input", "string", "in", "PLA", "format", "and", "return", "an", "intermediate", "representation", "dict", "." ]
python
train
pandas-dev/pandas
scripts/validate_docstrings.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L599-L785
def get_validation_data(doc): """ Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. """ errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) # PR01: Parameters not documented # PR02: Unknown parameters # PR03: Wrong parameters order errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): # Check can ignore var / kwargs if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0].name: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs
[ "def", "get_validation_data", "(", "doc", ")", ":", "errs", "=", "[", "]", "wrns", "=", "[", "]", "if", "not", "doc", ".", "raw_doc", ":", "errs", ".", "append", "(", "error", "(", "'GL08'", ")", ")", "return", "errs", ",", "wrns", ",", "''", "if...
Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function.
[ "Validate", "the", "docstring", "." ]
python
train
merll/docker-map
dockermap/client/docker_util.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/client/docker_util.py#L57-L77
def primary_container_name(names, default=None, strip_trailing_slash=True): """ From the list of names, finds the primary name of the container. Returns the defined default value (e.g. the container id or ``None``) in case it cannot find any. :param names: List with name and aliases of the container. :type names: list[unicode | str] :param default: Default value. :param strip_trailing_slash: As read directly from the Docker service, every container name includes a trailing slash. Set this to ``False`` if it is already removed. :type strip_trailing_slash: bool :return: Primary name of the container. :rtype: unicode | str """ if strip_trailing_slash: ex_names = [name[1:] for name in names if name.find('/', 2) == -1] else: ex_names = [name for name in names if name.find('/', 2) == -1] if ex_names: return ex_names[0] return default
[ "def", "primary_container_name", "(", "names", ",", "default", "=", "None", ",", "strip_trailing_slash", "=", "True", ")", ":", "if", "strip_trailing_slash", ":", "ex_names", "=", "[", "name", "[", "1", ":", "]", "for", "name", "in", "names", "if", "name",...
From the list of names, finds the primary name of the container. Returns the defined default value (e.g. the container id or ``None``) in case it cannot find any. :param names: List with name and aliases of the container. :type names: list[unicode | str] :param default: Default value. :param strip_trailing_slash: As read directly from the Docker service, every container name includes a trailing slash. Set this to ``False`` if it is already removed. :type strip_trailing_slash: bool :return: Primary name of the container. :rtype: unicode | str
[ "From", "the", "list", "of", "names", "finds", "the", "primary", "name", "of", "the", "container", ".", "Returns", "the", "defined", "default", "value", "(", "e", ".", "g", ".", "the", "container", "id", "or", "None", ")", "in", "case", "it", "cannot",...
python
train
Ouranosinc/xclim
xclim/run_length.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/run_length.py#L44-L63
def longest_run(da, dim='time'): """Return the length of the longest consecutive run of True values. Parameters ---------- arr : N-dimensional array (boolean) Input array dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- N-dimensional array (int) Length of longest run of True values along dimension """ d = rle(da, dim=dim) rl_long = d.max(dim=dim) return rl_long
[ "def", "longest_run", "(", "da", ",", "dim", "=", "'time'", ")", ":", "d", "=", "rle", "(", "da", ",", "dim", "=", "dim", ")", "rl_long", "=", "d", ".", "max", "(", "dim", "=", "dim", ")", "return", "rl_long" ]
Return the length of the longest consecutive run of True values. Parameters ---------- arr : N-dimensional array (boolean) Input array dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- N-dimensional array (int) Length of longest run of True values along dimension
[ "Return", "the", "length", "of", "the", "longest", "consecutive", "run", "of", "True", "values", "." ]
python
train
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L2772-L2797
def handle(): # pragma: no cover """ Main program execution handler. """ try: cli = ZappaCLI() sys.exit(cli.handle()) except SystemExit as e: # pragma: no cover cli.on_exit() sys.exit(e.code) except KeyboardInterrupt: # pragma: no cover cli.on_exit() sys.exit(130) except Exception as e: cli.on_exit() click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(") click.echo("\n==============\n") import traceback traceback.print_exc() click.echo("\n==============\n") shamelessly_promote() sys.exit(-1)
[ "def", "handle", "(", ")", ":", "# pragma: no cover", "try", ":", "cli", "=", "ZappaCLI", "(", ")", "sys", ".", "exit", "(", "cli", ".", "handle", "(", ")", ")", "except", "SystemExit", "as", "e", ":", "# pragma: no cover", "cli", ".", "on_exit", "(", ...
Main program execution handler.
[ "Main", "program", "execution", "handler", "." ]
python
train
bmcfee/muda
muda/core.py
https://github.com/bmcfee/muda/blob/ff82efdfaeb98da0a9f9124845826eb20536a9ba/muda/core.py#L18-L65
def jam_pack(jam, **kwargs): '''Pack data into a jams sandbox. If not already present, this creates a `muda` field within `jam.sandbox`, along with `history`, `state`, and version arrays which are populated by deformation objects. Any additional fields can be added to the `muda` sandbox by supplying keyword arguments. Parameters ---------- jam : jams.JAMS A JAMS object Returns ------- jam : jams.JAMS The updated JAMS object Examples -------- >>> jam = jams.JAMS() >>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None)) >>> jam.sandbox <Sandbox: muda> >>> jam.sandbox.muda <Sandbox: state, version, my_data, history> >>> jam.sandbox.muda.my_data {'foo': 5, 'bar': None} ''' if not hasattr(jam.sandbox, 'muda'): # If there's no mudabox, create one jam.sandbox.muda = jams.Sandbox(history=[], state=[], version=dict(muda=version, librosa=librosa.__version__, jams=jams.__version__, pysoundfile=psf.__version__)) elif not isinstance(jam.sandbox.muda, jams.Sandbox): # If there is a muda entry, but it's not a sandbox, coerce it jam.sandbox.muda = jams.Sandbox(**jam.sandbox.muda) jam.sandbox.muda.update(**kwargs) return jam
[ "def", "jam_pack", "(", "jam", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "jam", ".", "sandbox", ",", "'muda'", ")", ":", "# If there's no mudabox, create one", "jam", ".", "sandbox", ".", "muda", "=", "jams", ".", "Sandbox", "(", ...
Pack data into a jams sandbox. If not already present, this creates a `muda` field within `jam.sandbox`, along with `history`, `state`, and version arrays which are populated by deformation objects. Any additional fields can be added to the `muda` sandbox by supplying keyword arguments. Parameters ---------- jam : jams.JAMS A JAMS object Returns ------- jam : jams.JAMS The updated JAMS object Examples -------- >>> jam = jams.JAMS() >>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None)) >>> jam.sandbox <Sandbox: muda> >>> jam.sandbox.muda <Sandbox: state, version, my_data, history> >>> jam.sandbox.muda.my_data {'foo': 5, 'bar': None}
[ "Pack", "data", "into", "a", "jams", "sandbox", "." ]
python
valid
voronind/vk
vk/session.py
https://github.com/voronind/vk/blob/37f41c7634f67149d4dab8017be0adca5ea3dc24/vk/session.py#L77-L84
def on_api_error_14(self, request): """ 14. Captcha needed """ request.method_params['captcha_key'] = self.get_captcha_key(request) request.method_params['captcha_sid'] = request.api_error.captcha_sid return self.send(request)
[ "def", "on_api_error_14", "(", "self", ",", "request", ")", ":", "request", ".", "method_params", "[", "'captcha_key'", "]", "=", "self", ".", "get_captcha_key", "(", "request", ")", "request", ".", "method_params", "[", "'captcha_sid'", "]", "=", "request", ...
14. Captcha needed
[ "14", ".", "Captcha", "needed" ]
python
train
crossbario/txaio-etcd
txaioetcd/_client_tx.py
https://github.com/crossbario/txaio-etcd/blob/c9aebff7f288a0b219bffc9d2579d22cf543baa5/txaioetcd/_client_tx.py#L256-L291
def set(self, key, value, lease=None, return_previous=None, timeout=None): """ Set the value for the key in the key-value store. Setting a value on a key increments the revision of the key-value store and generates one event in the event history. :param key: key is the key, in bytes, to put into the key-value store. :type key: bytes :param value: value is the value, in bytes, to associate with the key in the key-value store. :key value: bytes :param lease: Lease to associate the key in the key-value store with. :type lease: instance of :class:`txaioetcd.Lease` or None :param return_previous: If set, return the previous key-value. :type return_previous: bool or None :param timeout: Request timeout in seconds. :type timeout: int :returns: Revision info :rtype: instance of :class:`txaioetcd.Revision` """ assembler = commons.PutRequestAssembler(self._url, key, value, lease, return_previous) obj = yield self._post(assembler.url, assembler.data, timeout) revision = Revision._parse(obj) returnValue(revision)
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "lease", "=", "None", ",", "return_previous", "=", "None", ",", "timeout", "=", "None", ")", ":", "assembler", "=", "commons", ".", "PutRequestAssembler", "(", "self", ".", "_url", ",", "key", ...
Set the value for the key in the key-value store. Setting a value on a key increments the revision of the key-value store and generates one event in the event history. :param key: key is the key, in bytes, to put into the key-value store. :type key: bytes :param value: value is the value, in bytes, to associate with the key in the key-value store. :key value: bytes :param lease: Lease to associate the key in the key-value store with. :type lease: instance of :class:`txaioetcd.Lease` or None :param return_previous: If set, return the previous key-value. :type return_previous: bool or None :param timeout: Request timeout in seconds. :type timeout: int :returns: Revision info :rtype: instance of :class:`txaioetcd.Revision`
[ "Set", "the", "value", "for", "the", "key", "in", "the", "key", "-", "value", "store", "." ]
python
train
pytest-dev/pluggy
scripts/release.py
https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/scripts/release.py#L36-L44
def pre_release(version): """Generates new docs, release announcements and creates a local tag.""" create_branch(version) changelog(version, write_out=True) check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"]) print() print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
[ "def", "pre_release", "(", "version", ")", ":", "create_branch", "(", "version", ")", "changelog", "(", "version", ",", "write_out", "=", "True", ")", "check_call", "(", "[", "\"git\"", ",", "\"commit\"", ",", "\"-a\"", ",", "\"-m\"", ",", "f\"Preparing rele...
Generates new docs, release announcements and creates a local tag.
[ "Generates", "new", "docs", "release", "announcements", "and", "creates", "a", "local", "tag", "." ]
python
train
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L515-L530
def get_uncompleted_tasks(self): """Return all of a user's uncompleted tasks. .. warning:: Requires Todoist premium. :return: A list of uncompleted tasks. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> uncompleted_tasks = user.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete() """ tasks = (p.get_uncompleted_tasks() for p in self.get_projects()) return list(itertools.chain.from_iterable(tasks))
[ "def", "get_uncompleted_tasks", "(", "self", ")", ":", "tasks", "=", "(", "p", ".", "get_uncompleted_tasks", "(", ")", "for", "p", "in", "self", ".", "get_projects", "(", ")", ")", "return", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "...
Return all of a user's uncompleted tasks. .. warning:: Requires Todoist premium. :return: A list of uncompleted tasks. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> uncompleted_tasks = user.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete()
[ "Return", "all", "of", "a", "user", "s", "uncompleted", "tasks", "." ]
python
train
cackharot/suds-py3
suds/servicedefinition.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/servicedefinition.py#L158-L170
def nextprefix(self): """ Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document. """ used = [ns[0] for ns in self.prefixes] used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()] for n in range(0, 1024): p = 'ns%d' % n if p not in used: return p raise Exception('prefixes exhausted')
[ "def", "nextprefix", "(", "self", ")", ":", "used", "=", "[", "ns", "[", "0", "]", "for", "ns", "in", "self", ".", "prefixes", "]", "used", "+=", "[", "ns", "[", "0", "]", "for", "ns", "in", "self", ".", "wsdl", ".", "root", ".", "nsprefixes", ...
Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document.
[ "Get", "the", "next", "available", "prefix", ".", "This", "means", "a", "prefix", "starting", "with", "ns", "with", "a", "number", "appended", "as", "(", "ns0", "ns1", "..", ")", "that", "is", "not", "already", "defined", "on", "the", "wsdl", "document",...
python
train
mjirik/io3d
io3d/misc.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/misc.py#L30-L58
def suggest_filename(file_path, exists=None): """ Try if exist path and append number to its end. For debug you can set as input if file exists or not. """ import os.path import re if not isinstance(exists, bool): exists = os.path.exists(file_path) if exists: file_path, file_extension = os.path.splitext(file_path) # print(file_path) m = re.search(r"_\d+$", file_path) if m is None: # cislo = 2 new_cislo_str = "_2" else: cislostr = (m.group()) cislo = int(cislostr[1:]) + 1 # it is normal number file_path = file_path[:-len(cislostr)] new_cislo_str = "_" + str(cislo) file_path = file_path + new_cislo_str + file_extension # .zfill(2) # trorcha rekurze file_path = suggest_filename(file_path) return file_path
[ "def", "suggest_filename", "(", "file_path", ",", "exists", "=", "None", ")", ":", "import", "os", ".", "path", "import", "re", "if", "not", "isinstance", "(", "exists", ",", "bool", ")", ":", "exists", "=", "os", ".", "path", ".", "exists", "(", "fi...
Try if exist path and append number to its end. For debug you can set as input if file exists or not.
[ "Try", "if", "exist", "path", "and", "append", "number", "to", "its", "end", ".", "For", "debug", "you", "can", "set", "as", "input", "if", "file", "exists", "or", "not", "." ]
python
train
ChristianTremblay/BAC0
BAC0/core/utils/notes.py
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/utils/notes.py#L44-L85
def update_log_level(level=None, *, file=None, stderr=None, stdout=None): """ Typical usage : Normal BAC0.log_level(file='warning', stdout='warning', stderr='error') Info on console....but not in file BAC0.log_level(file='warning', stdout='info', stderr='error') Debug BAC0.log_level(file='debug', stdout='info', stderr='error') """ if level: file = level stderr = level stdout = level file = convert_level(file) stderr = convert_level(stderr) stdout = convert_level(stdout) BAC0_logger = logging.getLogger("BAC0") # if console: # BAC0_logger.setLevel(console) # BAC0_logger.warning('Changed log level of console to {}'.format(logging.getLevelName(level))) for handler in BAC0_logger.handlers: if file and handler.get_name() == "file_handler": handler.setLevel(file) BAC0_logger.info( "Changed log level of file to {}".format(logging.getLevelName(file)) ) elif stdout and handler.get_name() == "stdout": handler.setLevel(stdout) BAC0_logger.info( "Changed log level of console stdout to {}".format( logging.getLevelName(stdout) ) ) elif stderr and handler.get_name() == "stderr": handler.setLevel(stderr) BAC0_logger.info( "Changed log level of console stderr to {}".format( logging.getLevelName(stderr) ) )
[ "def", "update_log_level", "(", "level", "=", "None", ",", "*", ",", "file", "=", "None", ",", "stderr", "=", "None", ",", "stdout", "=", "None", ")", ":", "if", "level", ":", "file", "=", "level", "stderr", "=", "level", "stdout", "=", "level", "f...
Typical usage : Normal BAC0.log_level(file='warning', stdout='warning', stderr='error') Info on console....but not in file BAC0.log_level(file='warning', stdout='info', stderr='error') Debug BAC0.log_level(file='debug', stdout='info', stderr='error')
[ "Typical", "usage", ":", "Normal", "BAC0", ".", "log_level", "(", "file", "=", "warning", "stdout", "=", "warning", "stderr", "=", "error", ")", "Info", "on", "console", "....", "but", "not", "in", "file", "BAC0", ".", "log_level", "(", "file", "=", "w...
python
train
openego/eTraGo
etrago/tools/utilities.py
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L1652-L1755
def crossborder_capacity(network, method, capacity_factor): """ Adjust interconnector capacties. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA method : string Method of correction. Options are 'ntc_acer' and 'thermal_acer'. 'ntc_acer' corrects all capacities according to values published by the ACER in 2016. 'thermal_acer' corrects certain capacities where our dataset most likely overestimates the thermal capacity. capacity_factor : float branch capacity factor. Reduction by branch-capacity factor is applied afterwards and shouln't effect ntc-values, which already include (n-1)-security. To exclude the ntc-capacities from the capacity factor, the crossborder-capacities are diveded by the factor in this function. For thermal-acer this is excluded by setting branch capacity factors to one. """ if method == 'ntc_acer': cap_per_country = {'AT': 4900, 'CH': 2695, 'CZ': 1301, 'DK': 913, 'FR': 3593, 'LU': 2912, 'NL': 2811, 'PL': 280, 'SE': 217, 'CZAT': 574, 'ATCZ': 574, 'CZPL': 312, 'PLCZ': 312, 'ATCH': 979, 'CHAT': 979, 'CHFR': 2087, 'FRCH': 2087, 'FRLU': 364, 'LUFR': 364, 'SEDK': 1928, 'DKSE': 1928} elif method == 'thermal_acer': cap_per_country = {'CH': 12000, 'DK': 4000, 'SEDK': 3500, 'DKSE': 3500} capacity_factor = {'HV': 1, 'eHV':1} if not network.lines[network.lines.country != 'DE'].empty: weighting = network.lines.loc[network.lines.country!='DE', 's_nom'].\ groupby(network.lines.country).transform(lambda x: x/x.sum()) weighting_links = network.links.loc[network.links.country!='DE', 'p_nom'].\ groupby(network.links.country).transform(lambda x: x/x.sum()) network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) for country in cap_per_country: index_HV = network.lines[(network.lines.country == country) &( network.lines.v_nom == 110)].index index_eHV = network.lines[(network.lines.country == country) &( network.lines.v_nom > 110)].index index_links = network.links[network.links.country == country].index if not network.lines[network.lines.country == country].empty: network.lines.loc[index_HV, 's_nom'] = weighting[index_HV] * \ cap_per_country[country] / capacity_factor['HV'] network.lines.loc[index_eHV, 's_nom'] = \ weighting[index_eHV] * cap_per_country[country] /\ capacity_factor['eHV'] if not network.links[network.links.country == country].empty: network.links.loc[index_links, 'p_nom'] = \ weighting_links[index_links] * cap_per_country\ [country] if country == 'SE': network.links.loc[network.links.country == country, 'p_nom'] =\ cap_per_country[country] if not network.lines[network.lines.country == (country+country)].empty: i_HV = network.lines[(network.lines.v_nom == 110)&( network.lines.country ==country+country)].index i_eHV = network.lines[(network.lines.v_nom == 110)&( network.lines.country ==country+country)].index network.lines.loc[i_HV, 's_nom'] = \ weighting[i_HV] * cap_per_country[country]/\ capacity_factor['HV'] network.lines.loc[i_eHV, 's_nom'] = \ weighting[i_eHV] * cap_per_country[country]/\ capacity_factor['eHV'] if not network.links[network.links.country == (country+country)].empty: i_links = network.links[network.links.country == (country+country)].index network.links.loc[i_links, 'p_nom'] = \ weighting_links[i_links] * cap_per_country\ [country]*capacity_factor
[ "def", "crossborder_capacity", "(", "network", ",", "method", ",", "capacity_factor", ")", ":", "if", "method", "==", "'ntc_acer'", ":", "cap_per_country", "=", "{", "'AT'", ":", "4900", ",", "'CH'", ":", "2695", ",", "'CZ'", ":", "1301", ",", "'DK'", ":...
Adjust interconnector capacties. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA method : string Method of correction. Options are 'ntc_acer' and 'thermal_acer'. 'ntc_acer' corrects all capacities according to values published by the ACER in 2016. 'thermal_acer' corrects certain capacities where our dataset most likely overestimates the thermal capacity. capacity_factor : float branch capacity factor. Reduction by branch-capacity factor is applied afterwards and shouln't effect ntc-values, which already include (n-1)-security. To exclude the ntc-capacities from the capacity factor, the crossborder-capacities are diveded by the factor in this function. For thermal-acer this is excluded by setting branch capacity factors to one.
[ "Adjust", "interconnector", "capacties", "." ]
python
train
ethereum/web3.py
web3/_utils/decorators.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/decorators.py#L42-L59
def deprecated_for(replace_message): """ Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ... """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): warnings.warn( "%s is deprecated in favor of %s" % (to_wrap.__name__, replace_message), category=DeprecationWarning, stacklevel=2) return to_wrap(*args, **kwargs) return wrapper return decorator
[ "def", "deprecated_for", "(", "replace_message", ")", ":", "def", "decorator", "(", "to_wrap", ")", ":", "@", "functools", ".", "wraps", "(", "to_wrap", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn"...
Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ...
[ "Decorate", "a", "deprecated", "function", "with", "info", "about", "what", "to", "use", "instead", "like", ":" ]
python
train
sosreport/sos
sos/utilities.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/utilities.py#L50-L62
def convert_bytes(bytes_, K=1 << 10, M=1 << 20, G=1 << 30, T=1 << 40): """Converts a number of bytes to a shorter, more human friendly format""" fn = float(bytes_) if bytes_ >= T: return '%.1fT' % (fn / T) elif bytes_ >= G: return '%.1fG' % (fn / G) elif bytes_ >= M: return '%.1fM' % (fn / M) elif bytes_ >= K: return '%.1fK' % (fn / K) else: return '%d' % bytes_
[ "def", "convert_bytes", "(", "bytes_", ",", "K", "=", "1", "<<", "10", ",", "M", "=", "1", "<<", "20", ",", "G", "=", "1", "<<", "30", ",", "T", "=", "1", "<<", "40", ")", ":", "fn", "=", "float", "(", "bytes_", ")", "if", "bytes_", ">=", ...
Converts a number of bytes to a shorter, more human friendly format
[ "Converts", "a", "number", "of", "bytes", "to", "a", "shorter", "more", "human", "friendly", "format" ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/move.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/move.py#L42-L132
def move(ctx, client, sources, destination): """Move files and check repository for potential problems.""" from renku.api._git import _expand_directories dst = Path(destination) def fmt_path(path): """Format path as relative to the client path.""" return str(Path(path).absolute().relative_to(client.path)) files = { fmt_path(source): fmt_path(file_or_dir) for file_or_dir in sources for source in _expand_directories((file_or_dir, )) } def fmt_dst(path): """Build a destination path for a source path.""" return str(dst / os.path.relpath(path, start=files[path])) destinations = {source: fmt_dst(source) for source in files} # 1. Check .gitignore. ignored = client.find_ignored_paths(*destinations.values()) if ignored: click.echo(WARNING + 'Renamed files match .gitignore.\n') if click.confirm( 'Do you want to edit ".gitignore" now?', default=False ): click.edit(filename=str(client.path / '.gitignore')) # 2. Update dataset metadata files. with progressbar( client.datasets.items(), item_show_func=lambda item: str(item[1].short_id) if item else '', label='Updating dataset metadata', width=0, ) as bar: for (path, dataset) in bar: renames = {} for file in dataset.files: filepath = fmt_path(os.path.normpath(str(path.parent / file))) if filepath in files: renames[file] = os.path.relpath( destinations[filepath], start=str(path.parent) ) if renames: dataset = dataset.rename_files( lambda key: renames.get(key, key) ) dataset.to_yaml() # 3. Manage .gitattributes for external storage. tracked = tuple( path for path, attr in client.find_attr(*files).items() if attr.get('filter') == 'lfs' ) client.untrack_paths_from_storage(*tracked) existing = client.find_attr(*tracked) if existing: click.echo(WARNING + 'There are custom .gitattributes.\n') if click.confirm( 'Do you want to edit ".gitattributes" now?', default=False ): click.edit(filename=str(client.path / '.gitattributes')) client.track_paths_in_storage(*(destinations[path] for path in tracked)) # 4. Handle symlinks. dst.parent.mkdir(parents=True, exist_ok=True) for source, target in destinations.items(): src = Path(source) if src.is_symlink(): Path(target).parent.mkdir(parents=True, exist_ok=True) Path(target).symlink_to( os.path.relpath( str(src.resolve()), start=os.path.dirname(target) ) ) src.unlink() del files[source] # Finally move the files. final_sources = list(set(files.values())) if final_sources: run(['git', 'mv'] + final_sources + [destination], check=True)
[ "def", "move", "(", "ctx", ",", "client", ",", "sources", ",", "destination", ")", ":", "from", "renku", ".", "api", ".", "_git", "import", "_expand_directories", "dst", "=", "Path", "(", "destination", ")", "def", "fmt_path", "(", "path", ")", ":", "\...
Move files and check repository for potential problems.
[ "Move", "files", "and", "check", "repository", "for", "potential", "problems", "." ]
python
train
sacrud/ps_alchemy
ps_alchemy/__init__.py
https://github.com/sacrud/ps_alchemy/blob/4f042329eb4643bf26fa2540df277fa94c5265ec/ps_alchemy/__init__.py#L15-L26
def models_preparing(app): """ Wrap all sqlalchemy model in settings. """ def wrapper(resource, parent): if isinstance(resource, DeclarativeMeta): resource = ListResource(resource) if not getattr(resource, '__parent__', None): resource.__parent__ = parent return resource resources_preparing_factory(app, wrapper)
[ "def", "models_preparing", "(", "app", ")", ":", "def", "wrapper", "(", "resource", ",", "parent", ")", ":", "if", "isinstance", "(", "resource", ",", "DeclarativeMeta", ")", ":", "resource", "=", "ListResource", "(", "resource", ")", "if", "not", "getattr...
Wrap all sqlalchemy model in settings.
[ "Wrap", "all", "sqlalchemy", "model", "in", "settings", "." ]
python
valid
hsolbrig/PyShEx
pyshex/shape_expressions_language/p5_4_node_constraints.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_4_node_constraints.py#L20-L29
def satisfiesNodeConstraint(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _: DebugContext) -> bool: """ `5.4.1 Semantics <http://shex.io/shex-semantics/#node-constraint-semantics>`_ For a node n and constraint nc, satisfies2(n, nc) if and only if for every nodeKind, datatype, xsFacet and values constraint value v present in nc nodeSatisfies(n, v). The following sections define nodeSatisfies for each of these types of constraints: """ return nodeSatisfiesNodeKind(cntxt, n, nc) and nodeSatisfiesDataType(cntxt, n, nc) and \ nodeSatisfiesStringFacet(cntxt, n, nc) and nodeSatisfiesNumericFacet(cntxt, n, nc) and \ nodeSatisfiesValues(cntxt, n, nc)
[ "def", "satisfiesNodeConstraint", "(", "cntxt", ":", "Context", ",", "n", ":", "Node", ",", "nc", ":", "ShExJ", ".", "NodeConstraint", ",", "_", ":", "DebugContext", ")", "->", "bool", ":", "return", "nodeSatisfiesNodeKind", "(", "cntxt", ",", "n", ",", ...
`5.4.1 Semantics <http://shex.io/shex-semantics/#node-constraint-semantics>`_ For a node n and constraint nc, satisfies2(n, nc) if and only if for every nodeKind, datatype, xsFacet and values constraint value v present in nc nodeSatisfies(n, v). The following sections define nodeSatisfies for each of these types of constraints:
[ "5", ".", "4", ".", "1", "Semantics", "<http", ":", "//", "shex", ".", "io", "/", "shex", "-", "semantics", "/", "#node", "-", "constraint", "-", "semantics", ">", "_" ]
python
train
gwastro/pycbc
pycbc/pool.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L91-L112
def map(self, func, items, chunksize=None): """ Catch keyboard interuppts to allow the pool to exit cleanly. Parameters ---------- func: function Function to call items: list of tuples Arguments to pass chunksize: int, Optional Number of calls for each process to handle at once """ results = self.map_async(func, items, chunksize) while True: try: return results.get(1800) except TimeoutError: pass except KeyboardInterrupt: self.terminate() self.join() raise KeyboardInterrupt
[ "def", "map", "(", "self", ",", "func", ",", "items", ",", "chunksize", "=", "None", ")", ":", "results", "=", "self", ".", "map_async", "(", "func", ",", "items", ",", "chunksize", ")", "while", "True", ":", "try", ":", "return", "results", ".", "...
Catch keyboard interuppts to allow the pool to exit cleanly. Parameters ---------- func: function Function to call items: list of tuples Arguments to pass chunksize: int, Optional Number of calls for each process to handle at once
[ "Catch", "keyboard", "interuppts", "to", "allow", "the", "pool", "to", "exit", "cleanly", "." ]
python
train
saltstack/salt
salt/cloud/clouds/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/lxc.py#L511-L570
def get_configured_provider(vm_=None): ''' Return the contextual provider of None if no configured one can be found. ''' if vm_ is None: vm_ = {} dalias, driver = __active_provider_name__.split(':') data = None tgt = 'unknown' img_provider = __opts__.get('list_images', '') arg_providers = __opts__.get('names', []) matched = False # --list-images level if img_provider: tgt = 'provider: {0}'.format(img_provider) if dalias == img_provider: data = get_provider(img_provider) matched = True # providers are set in configuration if not data and 'profile' not in __opts__ and arg_providers: for name in arg_providers: tgt = 'provider: {0}'.format(name) if dalias == name: data = get_provider(name) if data: matched = True break # -p is providen, get the uplinked provider elif 'profile' in __opts__: curprof = __opts__['profile'] profs = __opts__['profiles'] tgt = 'profile: {0}'.format(curprof) if ( curprof in profs and profs[curprof]['provider'] == __active_provider_name__ ): prov, cdriver = profs[curprof]['provider'].split(':') tgt += ' provider: {0}'.format(prov) data = get_provider(prov) matched = True # fallback if we have only __active_provider_name__ if ( (__opts__.get('destroy', False) and not data) or ( not matched and __active_provider_name__ ) ): data = __opts__.get('providers', {}).get(dalias, {}).get(driver, {}) # in all cases, verify that the linked saltmaster is alive. if data: ret = _salt('test.ping', salt_target=data['target']) if ret: return data else: log.error( 'Configured provider %s minion: %s is unreachable', __active_provider_name__, data['target'] ) return False
[ "def", "get_configured_provider", "(", "vm_", "=", "None", ")", ":", "if", "vm_", "is", "None", ":", "vm_", "=", "{", "}", "dalias", ",", "driver", "=", "__active_provider_name__", ".", "split", "(", "':'", ")", "data", "=", "None", "tgt", "=", "'unkno...
Return the contextual provider of None if no configured one can be found.
[ "Return", "the", "contextual", "provider", "of", "None", "if", "no", "configured", "one", "can", "be", "found", "." ]
python
train
polysquare/jobstamps
jobstamps/jobstamp.py
https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L96-L105
def check_dependency(self, dependency_path): """Check if mtime of dependency_path is greater than stored mtime.""" stored_hash = self._stamp_file_hashes.get(dependency_path) # This file was newly added, or we don't have a file # with stored hashes yet. Assume out of date. if not stored_hash: return False return stored_hash == _sha1_for_file(dependency_path)
[ "def", "check_dependency", "(", "self", ",", "dependency_path", ")", ":", "stored_hash", "=", "self", ".", "_stamp_file_hashes", ".", "get", "(", "dependency_path", ")", "# This file was newly added, or we don't have a file", "# with stored hashes yet. Assume out of date.", "...
Check if mtime of dependency_path is greater than stored mtime.
[ "Check", "if", "mtime", "of", "dependency_path", "is", "greater", "than", "stored", "mtime", "." ]
python
train
django-fluent/django-fluent-contents
fluent_contents/models/managers.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/models/managers.py#L164-L196
def create_for_placeholder(self, placeholder, sort_order=1, language_code=None, **kwargs): """ Create a Content Item with the given parameters If the language_code is not provided, the language code of the parent will be used. This may perform an additional database query, unless the :class:`~fluent_contents.models.managers.PlaceholderManager` methods were used to construct the object, such as :func:`~fluent_contents.models.managers.PlaceholderManager.create_for_object` or :func:`~fluent_contents.models.managers.PlaceholderManager.get_by_slot` """ if language_code is None: # Could also use get_language() or appsettings.FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE # thus avoid the risk of performing an extra query here to the parent. # However, this identical behavior to BaseContentItemFormSet, # and the parent can be set already via Placeholder.objects.create_for_object() language_code = get_parent_language_code(placeholder.parent) obj = self.create( placeholder=placeholder, parent_type_id=placeholder.parent_type_id, parent_id=placeholder.parent_id, sort_order=sort_order, language_code=language_code, **kwargs ) # Fill the reverse caches obj.placeholder = placeholder parent = getattr(placeholder, '_parent_cache', None) # by GenericForeignKey (_meta.virtual_fields[0].cache_attr) if parent is not None: obj.parent = parent return obj
[ "def", "create_for_placeholder", "(", "self", ",", "placeholder", ",", "sort_order", "=", "1", ",", "language_code", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "language_code", "is", "None", ":", "# Could also use get_language() or appsettings.FLUENT_CONT...
Create a Content Item with the given parameters If the language_code is not provided, the language code of the parent will be used. This may perform an additional database query, unless the :class:`~fluent_contents.models.managers.PlaceholderManager` methods were used to construct the object, such as :func:`~fluent_contents.models.managers.PlaceholderManager.create_for_object` or :func:`~fluent_contents.models.managers.PlaceholderManager.get_by_slot`
[ "Create", "a", "Content", "Item", "with", "the", "given", "parameters" ]
python
train
saltstack/salt
salt/grains/napalm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L82-L95
def _retrieve_device_cache(proxy=None): ''' Loads the network device details if not cached already. ''' global DEVICE_CACHE if not DEVICE_CACHE: if proxy and salt.utils.napalm.is_proxy(__opts__): # if proxy var passed and is NAPALM-type proxy minion if 'napalm.get_device' in proxy: DEVICE_CACHE = proxy['napalm.get_device']() elif not proxy and salt.utils.napalm.is_minion(__opts__): # if proxy var not passed and is running in a straight minion DEVICE_CACHE = salt.utils.napalm.get_device(__opts__) return DEVICE_CACHE
[ "def", "_retrieve_device_cache", "(", "proxy", "=", "None", ")", ":", "global", "DEVICE_CACHE", "if", "not", "DEVICE_CACHE", ":", "if", "proxy", "and", "salt", ".", "utils", ".", "napalm", ".", "is_proxy", "(", "__opts__", ")", ":", "# if proxy var passed and ...
Loads the network device details if not cached already.
[ "Loads", "the", "network", "device", "details", "if", "not", "cached", "already", "." ]
python
train
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L2491-L2505
def delete_library_value(self, key: str) -> None: """Delete the library value for the given key. Please consult the developer documentation for a list of valid keys. .. versionadded:: 1.0 Scriptable: Yes """ desc = Metadata.session_key_map.get(key) if desc is not None: field_id = desc['path'][-1] setattr(ApplicationData.get_session_metadata_model(), field_id, None) return raise KeyError()
[ "def", "delete_library_value", "(", "self", ",", "key", ":", "str", ")", "->", "None", ":", "desc", "=", "Metadata", ".", "session_key_map", ".", "get", "(", "key", ")", "if", "desc", "is", "not", "None", ":", "field_id", "=", "desc", "[", "'path'", ...
Delete the library value for the given key. Please consult the developer documentation for a list of valid keys. .. versionadded:: 1.0 Scriptable: Yes
[ "Delete", "the", "library", "value", "for", "the", "given", "key", "." ]
python
train
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L1633-L1637
def locate(self, path): """ Find a config item along a path; leading slash is optional and ignored. """ return Zconfig(lib.zconfig_locate(self._as_parameter_, path), False)
[ "def", "locate", "(", "self", ",", "path", ")", ":", "return", "Zconfig", "(", "lib", ".", "zconfig_locate", "(", "self", ".", "_as_parameter_", ",", "path", ")", ",", "False", ")" ]
Find a config item along a path; leading slash is optional and ignored.
[ "Find", "a", "config", "item", "along", "a", "path", ";", "leading", "slash", "is", "optional", "and", "ignored", "." ]
python
train
jalanb/pysyte
pysyte/bash/git.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/git.py#L217-L232
def log(args, number=None, oneline=False, quiet=False): """Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option """ options = ' '.join([ number and str('-n %s' % number) or '', oneline and '--oneline' or '' ]) try: return run('log %s %s' % (options, args), quiet=quiet) except UnknownRevision: return ''
[ "def", "log", "(", "args", ",", "number", "=", "None", ",", "oneline", "=", "False", ",", "quiet", "=", "False", ")", ":", "options", "=", "' '", ".", "join", "(", "[", "number", "and", "str", "(", "'-n %s'", "%", "number", ")", "or", "''", ",", ...
Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option
[ "Run", "a", "git", "log", "...", "command", "and", "return", "stdout" ]
python
train
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L130-L136
def barray(iterlines): """ Array of bytes """ lst = [line.encode('utf-8') for line in iterlines] arr = numpy.array(lst) return arr
[ "def", "barray", "(", "iterlines", ")", ":", "lst", "=", "[", "line", ".", "encode", "(", "'utf-8'", ")", "for", "line", "in", "iterlines", "]", "arr", "=", "numpy", ".", "array", "(", "lst", ")", "return", "arr" ]
Array of bytes
[ "Array", "of", "bytes" ]
python
train
xeroc/python-graphenelib
graphenecommon/amount.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/amount.py#L159-L166
def asset(self): """ Returns the asset as instance of :class:`.asset.Asset` """ if not self["asset"]: self["asset"] = self.asset_class( self["symbol"], blockchain_instance=self.blockchain ) return self["asset"]
[ "def", "asset", "(", "self", ")", ":", "if", "not", "self", "[", "\"asset\"", "]", ":", "self", "[", "\"asset\"", "]", "=", "self", ".", "asset_class", "(", "self", "[", "\"symbol\"", "]", ",", "blockchain_instance", "=", "self", ".", "blockchain", ")"...
Returns the asset as instance of :class:`.asset.Asset`
[ "Returns", "the", "asset", "as", "instance", "of", ":", "class", ":", ".", "asset", ".", "Asset" ]
python
valid
nagius/snmp_passpersist
example/settings.py
https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/example/settings.py#L76-L82
def write(self): '''atomic writing''' tmp_file, tmp_fname = tempfile.mkstemp() os.close(tmp_file) shutil.copystat(self.file_name, tmp_fname) self.config.write(open(tmp_fname, 'w')) shutil.move(tmp_fname, self.file_name)
[ "def", "write", "(", "self", ")", ":", "tmp_file", ",", "tmp_fname", "=", "tempfile", ".", "mkstemp", "(", ")", "os", ".", "close", "(", "tmp_file", ")", "shutil", ".", "copystat", "(", "self", ".", "file_name", ",", "tmp_fname", ")", "self", ".", "c...
atomic writing
[ "atomic", "writing" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/ipaddress.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/ipaddress.py#L1750-L1852
def _ip_int_from_string(cls, ip_str): """Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. """ if not ip_str: raise AddressValueError('Address cannot be empty') parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). _min_parts = 3 if len(parts) < _min_parts: msg = "At least %d parts expected in %r" % (_min_parts, ip_str) raise AddressValueError(msg) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: try: ipv4_int = IPv4Address(parts.pop())._ip except AddressValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str)) parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). # The extra colon comes from using the "::" notation for a single # leading or trailing zero part. _max_parts = cls._HEXTET_COUNT + 1 if len(parts) > _max_parts: msg = "At most %d colons permitted in %r" % ( _max_parts - 1, ip_str) raise AddressValueError(msg) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. skip_index = None for i in _compat_range(1, len(parts) - 1): if not parts[i]: if skip_index is not None: # Can't have more than one '::' msg = "At most one '::' permitted in %r" % ip_str raise AddressValueError(msg) skip_index = i # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: msg = "Expected at most %d other parts with '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) else: # Otherwise, allocate the entire address to parts_hi. The # endpoints could still be empty, but _parse_hextet() will check # for that. if len(parts) != cls._HEXTET_COUNT: msg = "Exactly %d parts expected without '::' in %r" raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) if not parts[0]: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # ^: requires ^:: if not parts[-1]: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError(msg % ip_str) # :$ requires ::$ parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0 for i in range(parts_hi): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in range(-parts_lo, 0): ip_int <<= 16 ip_int |= cls._parse_hextet(parts[i]) return ip_int except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str))
[ "def", "_ip_int_from_string", "(", "cls", ",", "ip_str", ")", ":", "if", "not", "ip_str", ":", "raise", "AddressValueError", "(", "'Address cannot be empty'", ")", "parts", "=", "ip_str", ".", "split", "(", "':'", ")", "# An IPv6 address needs at least 2 colons (3 p...
Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn't a valid IPv6 Address.
[ "Turn", "an", "IPv6", "ip_str", "into", "an", "integer", "." ]
python
train
Nic30/hwt
hwt/hdl/switchContainer.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/switchContainer.py#L253-L272
def isSame(self, other: HdlStatement) -> bool: """ Doc on parent class :meth:`HdlStatement.isSame` """ if self is other: return True if self.rank != other.rank: return False if isinstance(other, SwitchContainer) \ and isSameHVal(self.switchOn, other.switchOn)\ and len(self.cases) == len(other.cases)\ and isSameStatementList(self.default, other.default): for (ac, astm), (bc, bstm) in zip(self.cases, other.cases): if not isSameHVal(ac, bc)\ or not isSameStatementList(astm, bstm): return False return True return False
[ "def", "isSame", "(", "self", ",", "other", ":", "HdlStatement", ")", "->", "bool", ":", "if", "self", "is", "other", ":", "return", "True", "if", "self", ".", "rank", "!=", "other", ".", "rank", ":", "return", "False", "if", "isinstance", "(", "othe...
Doc on parent class :meth:`HdlStatement.isSame`
[ "Doc", "on", "parent", "class", ":", "meth", ":", "HdlStatement", ".", "isSame" ]
python
test
Unidata/siphon
siphon/http_util.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/http_util.py#L197-L226
def lonlat_box(self, west, east, south, north): """Add a latitude/longitude bounding box to the query. This adds a request for a spatial bounding box, bounded by ('north', 'south') for latitude and ('east', 'west') for the longitude. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- west: float The bounding longitude to the west, in degrees east of the prime meridian east : float The bounding longitude to the east, in degrees east of the prime meridian south : float The bounding latitude to the south, in degrees north of the equator north : float The bounding latitude to the north, in degrees north of the equator Returns ------- self : DataQuery Returns self for chaining calls """ self._set_query(self.spatial_query, west=west, east=east, south=south, north=north) return self
[ "def", "lonlat_box", "(", "self", ",", "west", ",", "east", ",", "south", ",", "north", ")", ":", "self", ".", "_set_query", "(", "self", ".", "spatial_query", ",", "west", "=", "west", ",", "east", "=", "east", ",", "south", "=", "south", ",", "no...
Add a latitude/longitude bounding box to the query. This adds a request for a spatial bounding box, bounded by ('north', 'south') for latitude and ('east', 'west') for the longitude. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing spatial queries that have been set. Parameters ---------- west: float The bounding longitude to the west, in degrees east of the prime meridian east : float The bounding longitude to the east, in degrees east of the prime meridian south : float The bounding latitude to the south, in degrees north of the equator north : float The bounding latitude to the north, in degrees north of the equator Returns ------- self : DataQuery Returns self for chaining calls
[ "Add", "a", "latitude", "/", "longitude", "bounding", "box", "to", "the", "query", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/__init__.py#L616-L637
def _set_ra_domain_name(self, v, load=False): """ Setter method for ra_domain_name, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_domain_name (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_domain_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("domain_name_string",ra_domain_name.ra_domain_name, yang_name="ra-domain-name", rest_name="ra-domain-name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='domain-name-string', extensions={u'tailf-common': {u'info': u'Set domain name option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDomainNameVlanIntf'}}), is_container='list', yang_name="ra-domain-name", rest_name="ra-domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set domain name option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDomainNameVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ra_domain_name must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("domain_name_string",ra_domain_name.ra_domain_name, yang_name="ra-domain-name", rest_name="ra-domain-name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='domain-name-string', extensions={u'tailf-common': {u'info': u'Set domain name option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDomainNameVlanIntf'}}), is_container='list', yang_name="ra-domain-name", rest_name="ra-domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set domain name option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDomainNameVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)""", }) self.__ra_domain_name = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ra_domain_name", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for ra_domain_name, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_domain_name (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_domain_name() directly.
[ "Setter", "method", "for", "ra_domain_name", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "interface", "/", "ve", "/", "ipv6", "/", "ipv6_nd_ra", "/", "ipv6_intf_cmds", "/", "nd", "/", "ra_domain_name", "(", "list", ")", "If", "this", "v...
python
train
Opentrons/opentrons
api/src/opentrons/system/nmcli.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/system/nmcli.py#L293-L322
async def connections( for_type: Optional[CONNECTION_TYPES] = None) -> List[Dict[str, str]]: """ Return the list of configured connections. This is all connections that nmcli knows about and manages. Each connection is a dict containing some basic information - the information retrievable from nmcli connection show. Further information should be queried on a connection by connection basis. If for_type is not None, it should be a str containing an element of CONNECTION_TYPES, and results will be limited to that connection type. """ fields = ['name', 'type', 'active'] res, _ = await _call(['-t', '-f', ','.join(fields), 'connection', 'show']) found = _dict_from_terse_tabular( fields, res, # ’ethernet’ or ’wireless’ from ’802-11-wireless’ or ’802-4-ethernet’ # and bools from ’yes’ or ’no transformers={'type': lambda s: s.split('-')[-1], 'active': lambda s: s.lower() == 'yes'} ) if for_type is not None: should_return = [] for c in found: if c['type'] == for_type.value: should_return.append(c) return should_return else: return found
[ "async", "def", "connections", "(", "for_type", ":", "Optional", "[", "CONNECTION_TYPES", "]", "=", "None", ")", "->", "List", "[", "Dict", "[", "str", ",", "str", "]", "]", ":", "fields", "=", "[", "'name'", ",", "'type'", ",", "'active'", "]", "res...
Return the list of configured connections. This is all connections that nmcli knows about and manages. Each connection is a dict containing some basic information - the information retrievable from nmcli connection show. Further information should be queried on a connection by connection basis. If for_type is not None, it should be a str containing an element of CONNECTION_TYPES, and results will be limited to that connection type.
[ "Return", "the", "list", "of", "configured", "connections", "." ]
python
train
decryptus/sonicprobe
sonicprobe/libs/xys.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/xys.py#L618-L630
def _transschema(x): """ Transform a schema, once loaded from its YAML representation, to its final internal representation """ if isinstance(x, tuple): return x.__class__(_transschema(x[0]), *x[1:]) elif isinstance(x, dict): return dict((_qualify_map(key, _transschema(val)) for key, val in x.iteritems())) elif isinstance(x, list): return map(_transschema, x) else: return x
[ "def", "_transschema", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "tuple", ")", ":", "return", "x", ".", "__class__", "(", "_transschema", "(", "x", "[", "0", "]", ")", ",", "*", "x", "[", "1", ":", "]", ")", "elif", "isinstance", "...
Transform a schema, once loaded from its YAML representation, to its final internal representation
[ "Transform", "a", "schema", "once", "loaded", "from", "its", "YAML", "representation", "to", "its", "final", "internal", "representation" ]
python
train
nocarryr/python-dispatch
pydispatch/dispatch.py
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L252-L267
def get_dispatcher_event(self, name): """Retrieves an Event object by name Args: name (str): The name of the :class:`Event` or :class:`~pydispatch.properties.Property` object to retrieve Returns: The :class:`Event` instance for the event or property definition .. versionadded:: 0.1.0 """ e = self.__property_events.get(name) if e is None: e = self.__events[name] return e
[ "def", "get_dispatcher_event", "(", "self", ",", "name", ")", ":", "e", "=", "self", ".", "__property_events", ".", "get", "(", "name", ")", "if", "e", "is", "None", ":", "e", "=", "self", ".", "__events", "[", "name", "]", "return", "e" ]
Retrieves an Event object by name Args: name (str): The name of the :class:`Event` or :class:`~pydispatch.properties.Property` object to retrieve Returns: The :class:`Event` instance for the event or property definition .. versionadded:: 0.1.0
[ "Retrieves", "an", "Event", "object", "by", "name" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/datasets/signature.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/datasets/signature.py#L23-L34
def sha256sum(path, blocksize=65536): """ Computes the SHA256 signature of a file to verify that the file has not been modified in transit and that it is the correct version of the data. """ sig = hashlib.sha256() with open(path, 'rb') as f: buf = f.read(blocksize) while len(buf) > 0: sig.update(buf) buf = f.read(blocksize) return sig.hexdigest()
[ "def", "sha256sum", "(", "path", ",", "blocksize", "=", "65536", ")", ":", "sig", "=", "hashlib", ".", "sha256", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "buf", "=", "f", ".", "read", "(", "blocksize", ")", "while"...
Computes the SHA256 signature of a file to verify that the file has not been modified in transit and that it is the correct version of the data.
[ "Computes", "the", "SHA256", "signature", "of", "a", "file", "to", "verify", "that", "the", "file", "has", "not", "been", "modified", "in", "transit", "and", "that", "it", "is", "the", "correct", "version", "of", "the", "data", "." ]
python
train
crypto101/merlyn
merlyn/exercise.py
https://github.com/crypto101/merlyn/blob/0f313210b9ea5385cc2e5b725dc766df9dc3284d/merlyn/exercise.py#L30-L37
def wasSolvedBy(self, user): """Checks if this exercise has previously been solved by the user. """ thisExercise = _Solution.what == self byThisUser = _Solution.who == user condition = q.AND(thisExercise, byThisUser) return self.store.query(_Solution, condition, limit=1).count() == 1
[ "def", "wasSolvedBy", "(", "self", ",", "user", ")", ":", "thisExercise", "=", "_Solution", ".", "what", "==", "self", "byThisUser", "=", "_Solution", ".", "who", "==", "user", "condition", "=", "q", ".", "AND", "(", "thisExercise", ",", "byThisUser", ")...
Checks if this exercise has previously been solved by the user.
[ "Checks", "if", "this", "exercise", "has", "previously", "been", "solved", "by", "the", "user", "." ]
python
train
scanny/python-pptx
pptx/compat/python3.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/compat/python3.py#L31-L39
def to_unicode(text): """ Return *text* as a unicode string. All text in Python 3 is unicode, so this just returns *text* unchanged. """ if not isinstance(text, str): tmpl = 'expected unicode string, got %s value %s' raise TypeError(tmpl % (type(text), text)) return text
[ "def", "to_unicode", "(", "text", ")", ":", "if", "not", "isinstance", "(", "text", ",", "str", ")", ":", "tmpl", "=", "'expected unicode string, got %s value %s'", "raise", "TypeError", "(", "tmpl", "%", "(", "type", "(", "text", ")", ",", "text", ")", ...
Return *text* as a unicode string. All text in Python 3 is unicode, so this just returns *text* unchanged.
[ "Return", "*", "text", "*", "as", "a", "unicode", "string", ".", "All", "text", "in", "Python", "3", "is", "unicode", "so", "this", "just", "returns", "*", "text", "*", "unchanged", "." ]
python
train
boundary/pulse-api-cli
boundary/metric_markdown.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/metric_markdown.py#L161-L170
def printFields(self, f, d): """ Prints out table rows based on the size of the data in columns """ for field in self.fields: fstr = field["title"] dstr = field["description"] flen = f - len(fstr) dlen = d - len(dstr) print("|{0}{1}|{2}{3}|".format(fstr, ' ' * flen, dstr, ' ' * dlen))
[ "def", "printFields", "(", "self", ",", "f", ",", "d", ")", ":", "for", "field", "in", "self", ".", "fields", ":", "fstr", "=", "field", "[", "\"title\"", "]", "dstr", "=", "field", "[", "\"description\"", "]", "flen", "=", "f", "-", "len", "(", ...
Prints out table rows based on the size of the data in columns
[ "Prints", "out", "table", "rows", "based", "on", "the", "size", "of", "the", "data", "in", "columns" ]
python
test
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxproject.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L284-L339
def new(self, name, summary=None, description=None, protected=None, restricted=None, download_restricted=None, contains_phi=None, tags=None, properties=None, bill_to=None, **kwargs): """ :param name: The name of the project :type name: string :param summary: If provided, a short summary of what the project contains :type summary: string :param description: If provided, the new project description :type name: string :param protected: If provided, whether the project should be protected :type protected: boolean :param restricted: If provided, whether the project should be restricted :type restricted: boolean :param download_restricted: If provided, whether external downloads should be restricted :type download_restricted: boolean :param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI) :type contains_phi: boolean :param tags: If provided, tags to associate with the project :type tags: list of strings :param properties: If provided, properties to associate with the project :type properties: dict :param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission :type bill_to: string Creates a new project. Initially only the user performing this action will be in the permissions/member list, with ADMINISTER access. See the API documentation for the `/project/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_ method for more info. """ input_hash = {} input_hash["name"] = name if summary is not None: input_hash["summary"] = summary if description is not None: input_hash["description"] = description if protected is not None: input_hash["protected"] = protected if restricted is not None: input_hash["restricted"] = restricted if download_restricted is not None: input_hash["downloadRestricted"] = download_restricted if contains_phi is not None: input_hash["containsPHI"] = contains_phi if bill_to is not None: input_hash["billTo"] = bill_to if tags is not None: input_hash["tags"] = tags if properties is not None: input_hash["properties"] = properties self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"]) self._desc = {} return self._dxid
[ "def", "new", "(", "self", ",", "name", ",", "summary", "=", "None", ",", "description", "=", "None", ",", "protected", "=", "None", ",", "restricted", "=", "None", ",", "download_restricted", "=", "None", ",", "contains_phi", "=", "None", ",", "tags", ...
:param name: The name of the project :type name: string :param summary: If provided, a short summary of what the project contains :type summary: string :param description: If provided, the new project description :type name: string :param protected: If provided, whether the project should be protected :type protected: boolean :param restricted: If provided, whether the project should be restricted :type restricted: boolean :param download_restricted: If provided, whether external downloads should be restricted :type download_restricted: boolean :param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI) :type contains_phi: boolean :param tags: If provided, tags to associate with the project :type tags: list of strings :param properties: If provided, properties to associate with the project :type properties: dict :param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission :type bill_to: string Creates a new project. Initially only the user performing this action will be in the permissions/member list, with ADMINISTER access. See the API documentation for the `/project/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_ method for more info.
[ ":", "param", "name", ":", "The", "name", "of", "the", "project", ":", "type", "name", ":", "string", ":", "param", "summary", ":", "If", "provided", "a", "short", "summary", "of", "what", "the", "project", "contains", ":", "type", "summary", ":", "str...
python
train
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L313-L320
def _get_property_values_with_defaults(self, classname, property_values): """Return the property values for the class, with default values applied where needed.""" # To uphold OrientDB semantics, make a new dict with all property values set # to their default values, which are None if no default was set. # Then, overwrite its data with the supplied property values. final_values = self.get_default_property_values(classname) final_values.update(property_values) return final_values
[ "def", "_get_property_values_with_defaults", "(", "self", ",", "classname", ",", "property_values", ")", ":", "# To uphold OrientDB semantics, make a new dict with all property values set", "# to their default values, which are None if no default was set.", "# Then, overwrite its data with t...
Return the property values for the class, with default values applied where needed.
[ "Return", "the", "property", "values", "for", "the", "class", "with", "default", "values", "applied", "where", "needed", "." ]
python
train
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_comm.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_comm.py#L177-L184
def run_as_pydevd_daemon_thread(func, *args, **kwargs): ''' Runs a function as a pydevd daemon thread (without any tracing in place). ''' t = PyDBDaemonThread(target_and_args=(func, args, kwargs)) t.name = '%s (pydevd daemon thread)' % (func.__name__,) t.start() return t
[ "def", "run_as_pydevd_daemon_thread", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "t", "=", "PyDBDaemonThread", "(", "target_and_args", "=", "(", "func", ",", "args", ",", "kwargs", ")", ")", "t", ".", "name", "=", "'%s (pydevd daem...
Runs a function as a pydevd daemon thread (without any tracing in place).
[ "Runs", "a", "function", "as", "a", "pydevd", "daemon", "thread", "(", "without", "any", "tracing", "in", "place", ")", "." ]
python
train
bmcfee/muda
muda/base.py
https://github.com/bmcfee/muda/blob/ff82efdfaeb98da0a9f9124845826eb20536a9ba/muda/base.py#L327-L342
def transform(self, jam): '''Apply the sequence of transformations to a single jam object. Parameters ---------- jam : jams.JAMS The jam object to transform Yields ------ jam_out : jams.JAMS The jam objects produced by each member of the union ''' for output in self.__serial_transform(jam, self.steps): yield output
[ "def", "transform", "(", "self", ",", "jam", ")", ":", "for", "output", "in", "self", ".", "__serial_transform", "(", "jam", ",", "self", ".", "steps", ")", ":", "yield", "output" ]
Apply the sequence of transformations to a single jam object. Parameters ---------- jam : jams.JAMS The jam object to transform Yields ------ jam_out : jams.JAMS The jam objects produced by each member of the union
[ "Apply", "the", "sequence", "of", "transformations", "to", "a", "single", "jam", "object", "." ]
python
valid
ArduPilot/MAVProxy
MAVProxy/modules/lib/MacOS/backend_wxagg.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wxagg.py#L145-L158
def _convert_agg_to_wx_bitmap(agg, bbox): """ Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If bbox is None, the entire buffer is converted. Note: agg must be a backend_agg.RendererAgg instance. """ if bbox is None: # agg => rgba buffer -> bitmap return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height), agg.buffer_rgba()) else: # agg => rgba buffer -> bitmap => clipped bitmap return _WX28_clipped_agg_as_bitmap(agg, bbox)
[ "def", "_convert_agg_to_wx_bitmap", "(", "agg", ",", "bbox", ")", ":", "if", "bbox", "is", "None", ":", "# agg => rgba buffer -> bitmap", "return", "wx", ".", "BitmapFromBufferRGBA", "(", "int", "(", "agg", ".", "width", ")", ",", "int", "(", "agg", ".", "...
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If bbox is None, the entire buffer is converted. Note: agg must be a backend_agg.RendererAgg instance.
[ "Convert", "the", "region", "of", "the", "agg", "buffer", "bounded", "by", "bbox", "to", "a", "wx", ".", "Bitmap", ".", "If", "bbox", "is", "None", "the", "entire", "buffer", "is", "converted", "." ]
python
train
BernardFW/bernard
src/bernard/platforms/management.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/management.py#L190-L199
def get_class(self, platform) -> Type[Platform]: """ For a given platform name, gets the matching class """ if platform in self._classes: return self._classes[platform] raise PlatformDoesNotExist('Platform "{}" is not in configuration' .format(platform))
[ "def", "get_class", "(", "self", ",", "platform", ")", "->", "Type", "[", "Platform", "]", ":", "if", "platform", "in", "self", ".", "_classes", ":", "return", "self", ".", "_classes", "[", "platform", "]", "raise", "PlatformDoesNotExist", "(", "'Platform ...
For a given platform name, gets the matching class
[ "For", "a", "given", "platform", "name", "gets", "the", "matching", "class" ]
python
train
NeilGirdhar/rectangle
rectangle/rectangle.py
https://github.com/NeilGirdhar/rectangle/blob/b0ca25e199cf6e331aef7fd99bda5ba10ae98753/rectangle/rectangle.py#L53-L67
def transformed(self, t): """ Transforms an m-dimensional Rect using t, an nxn matrix that can transform vectors in the form: [x, y, z, …, 1]. The Rect is padded to n dimensions. """ assert t.shape[0] == t.shape[1] extra_dimensions = t.shape[0] - self.dimensions - 1 def transform(a): return t.dot(np.concatenate( (a, [0] * extra_dimensions, [1]), axis=0 ))[:self.dimensions] return Rect(transform(self.mins), transform(self.maxes))
[ "def", "transformed", "(", "self", ",", "t", ")", ":", "assert", "t", ".", "shape", "[", "0", "]", "==", "t", ".", "shape", "[", "1", "]", "extra_dimensions", "=", "t", ".", "shape", "[", "0", "]", "-", "self", ".", "dimensions", "-", "1", "def...
Transforms an m-dimensional Rect using t, an nxn matrix that can transform vectors in the form: [x, y, z, …, 1]. The Rect is padded to n dimensions.
[ "Transforms", "an", "m", "-", "dimensional", "Rect", "using", "t", "an", "nxn", "matrix", "that", "can", "transform", "vectors", "in", "the", "form", ":", "[", "x", "y", "z", "…", "1", "]", ".", "The", "Rect", "is", "padded", "to", "n", "dimensions",...
python
train
timgabets/pynblock
pynblock/tools.py
https://github.com/timgabets/pynblock/blob/dbdb6d06bd7741e1138bed09d874b47b23d8d200/pynblock/tools.py#L194-L202
def parityOf(int_type): """ Calculates the parity of an integer, returning 0 if there are an even number of set bits, and -1 if there are an odd number. """ parity = 0 while (int_type): parity = ~parity int_type = int_type & (int_type - 1) return(parity)
[ "def", "parityOf", "(", "int_type", ")", ":", "parity", "=", "0", "while", "(", "int_type", ")", ":", "parity", "=", "~", "parity", "int_type", "=", "int_type", "&", "(", "int_type", "-", "1", ")", "return", "(", "parity", ")" ]
Calculates the parity of an integer, returning 0 if there are an even number of set bits, and -1 if there are an odd number.
[ "Calculates", "the", "parity", "of", "an", "integer", "returning", "0", "if", "there", "are", "an", "even", "number", "of", "set", "bits", "and", "-", "1", "if", "there", "are", "an", "odd", "number", "." ]
python
train
cackharot/suds-py3
suds/mx/literal.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/mx/literal.py#L218-L240
def translate(self, content): """ Translate using the XSD type information. Python I{dict} is translated to a suds object. Most importantly, primative values are translated from python types to XML types using the XSD type. @param content: The content to translate. @type content: L{Object} @return: self @rtype: L{Typed} """ v = content.value if v is None: return if isinstance(v, dict): cls = content.real.name content.value = Factory.object(cls, v) md = content.value.__metadata__ md.sxtype = content.type return v = content.real.translate(v, False) content.value = v return self
[ "def", "translate", "(", "self", ",", "content", ")", ":", "v", "=", "content", ".", "value", "if", "v", "is", "None", ":", "return", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "cls", "=", "content", ".", "real", ".", "name", "content", ...
Translate using the XSD type information. Python I{dict} is translated to a suds object. Most importantly, primative values are translated from python types to XML types using the XSD type. @param content: The content to translate. @type content: L{Object} @return: self @rtype: L{Typed}
[ "Translate", "using", "the", "XSD", "type", "information", ".", "Python", "I", "{", "dict", "}", "is", "translated", "to", "a", "suds", "object", ".", "Most", "importantly", "primative", "values", "are", "translated", "from", "python", "types", "to", "XML", ...
python
train
realestate-com-au/dashmat
dashmat/actions.py
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/actions.py#L68-L80
def requirements(collector): """Just print out the requirements""" out = sys.stdout artifact = collector.configuration['dashmat'].artifact if artifact not in (None, "", NotSpecified): if isinstance(artifact, six.string_types): out = open(artifact, 'w') else: out = artifact for active in collector.configuration['__imported__'].values(): for requirement in active.requirements(): out.write("{0}\n".format(requirement))
[ "def", "requirements", "(", "collector", ")", ":", "out", "=", "sys", ".", "stdout", "artifact", "=", "collector", ".", "configuration", "[", "'dashmat'", "]", ".", "artifact", "if", "artifact", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ...
Just print out the requirements
[ "Just", "print", "out", "the", "requirements" ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L466-L472
def update_user(self, user, name=None, password=None, host=None): """ Allows you to change one or more of the user's username, password, or host. """ return self._user_manager.update(user, name=name, password=password, host=host)
[ "def", "update_user", "(", "self", ",", "user", ",", "name", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ")", ":", "return", "self", ".", "_user_manager", ".", "update", "(", "user", ",", "name", "=", "name", ",", "password",...
Allows you to change one or more of the user's username, password, or host.
[ "Allows", "you", "to", "change", "one", "or", "more", "of", "the", "user", "s", "username", "password", "or", "host", "." ]
python
train
python-bonobo/bonobo
docs/_templates/alabaster/__init__.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/docs/_templates/alabaster/__init__.py#L6-L11
def get_path(): """ Shortcut for users whose theme is next to their conf.py. """ # Theme directory is defined as our parent directory return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
[ "def", "get_path", "(", ")", ":", "# Theme directory is defined as our parent directory", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ")" ]
Shortcut for users whose theme is next to their conf.py.
[ "Shortcut", "for", "users", "whose", "theme", "is", "next", "to", "their", "conf", ".", "py", "." ]
python
train
treycucco/bidon
lib/generate_models.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L85-L88
def get_data_table(filename): """Returns a DataTable instance built from either the filename, or STDIN if filename is None.""" with get_file_object(filename, "r") as rf: return DataTable(list(csv.reader(rf)))
[ "def", "get_data_table", "(", "filename", ")", ":", "with", "get_file_object", "(", "filename", ",", "\"r\"", ")", "as", "rf", ":", "return", "DataTable", "(", "list", "(", "csv", ".", "reader", "(", "rf", ")", ")", ")" ]
Returns a DataTable instance built from either the filename, or STDIN if filename is None.
[ "Returns", "a", "DataTable", "instance", "built", "from", "either", "the", "filename", "or", "STDIN", "if", "filename", "is", "None", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L3056-L3062
def set_phases(self, literals=[]): """ Sets polarities of a given list of variables. """ if self.minisat: pysolvers.minisat22_setphases(self.minisat, literals)
[ "def", "set_phases", "(", "self", ",", "literals", "=", "[", "]", ")", ":", "if", "self", ".", "minisat", ":", "pysolvers", ".", "minisat22_setphases", "(", "self", ".", "minisat", ",", "literals", ")" ]
Sets polarities of a given list of variables.
[ "Sets", "polarities", "of", "a", "given", "list", "of", "variables", "." ]
python
train
rikrd/inspire
inspirespeech/__init__.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L502-L508
def load_metadata(fileobj): """Load the submission from a file. :param filename: where to load the submission from """ with gzip.GzipFile(fileobj=fileobj, mode='r') as z: return json.loads(z.readline())
[ "def", "load_metadata", "(", "fileobj", ")", ":", "with", "gzip", ".", "GzipFile", "(", "fileobj", "=", "fileobj", ",", "mode", "=", "'r'", ")", "as", "z", ":", "return", "json", ".", "loads", "(", "z", ".", "readline", "(", ")", ")" ]
Load the submission from a file. :param filename: where to load the submission from
[ "Load", "the", "submission", "from", "a", "file", "." ]
python
train
bwohlberg/sporco
sporco/cdict.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cdict.py#L220-L246
def check(self, key, value): """Check whether key,value pair is allowed. The key is allowed if there is a corresponding key in the defaults class attribute dict. The value is not allowed if it is a dict in the defaults dict and not a dict in value. Parameters ---------- key : str or tuple of str Dict key value : any Dict value corresponding to key """ # This test necessary to avoid unpickling errors in Python 3 if hasattr(self, 'dflt'): # Get corresponding node to self, as determined by pth # attribute, of the defaults dict tree a = self.__class__.getnode(self.dflt, self.pth) # Raise UnknownKeyError exception if key not in corresponding # node of defaults tree if key not in a: raise UnknownKeyError(self.pth + (key,)) # Raise InvalidValueError if the key value in the defaults # tree is a dict and the value parameter is not a dict and elif isinstance(a[key], dict) and not isinstance(value, dict): raise InvalidValueError(self.pth + (key,))
[ "def", "check", "(", "self", ",", "key", ",", "value", ")", ":", "# This test necessary to avoid unpickling errors in Python 3", "if", "hasattr", "(", "self", ",", "'dflt'", ")", ":", "# Get corresponding node to self, as determined by pth", "# attribute, of the defaults dict...
Check whether key,value pair is allowed. The key is allowed if there is a corresponding key in the defaults class attribute dict. The value is not allowed if it is a dict in the defaults dict and not a dict in value. Parameters ---------- key : str or tuple of str Dict key value : any Dict value corresponding to key
[ "Check", "whether", "key", "value", "pair", "is", "allowed", ".", "The", "key", "is", "allowed", "if", "there", "is", "a", "corresponding", "key", "in", "the", "defaults", "class", "attribute", "dict", ".", "The", "value", "is", "not", "allowed", "if", "...
python
train
PyCQA/pylint
pylint/checkers/typecheck.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/typecheck.py#L593-L627
def _infer_from_metaclass_constructor(cls, func): """Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = astroid.List() class_bases.postinit(elts=cls.bases) attrs = astroid.Dict() local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = astroid.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None
[ "def", "_infer_from_metaclass_constructor", "(", "cls", ",", "func", ")", ":", "context", "=", "astroid", ".", "context", ".", "InferenceContext", "(", ")", "class_bases", "=", "astroid", ".", "List", "(", ")", "class_bases", ".", "postinit", "(", "elts", "=...
Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef
[ "Try", "to", "infer", "what", "the", "given", "*", "func", "*", "constructor", "is", "building" ]
python
test
andymccurdy/redis-py
redis/connection.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/connection.py#L983-L1008
def get_connection(self, command_name, *keys, **options): "Get a connection from the pool" self._checkpid() try: connection = self._available_connections.pop() except IndexError: connection = self.make_connection() self._in_use_connections.add(connection) try: # ensure this connection is connected to Redis connection.connect() # connections that the pool provides should be ready to send # a command. if not, the connection was either returned to the # pool before all data has been read or the socket has been # closed. either way, reconnect and verify everything is good. if not connection.is_ready_for_command(): connection.disconnect() connection.connect() if not connection.is_ready_for_command(): raise ConnectionError('Connection not ready') except: # noqa: E722 # release the connection back to the pool so that we don't leak it self.release(connection) raise return connection
[ "def", "get_connection", "(", "self", ",", "command_name", ",", "*", "keys", ",", "*", "*", "options", ")", ":", "self", ".", "_checkpid", "(", ")", "try", ":", "connection", "=", "self", ".", "_available_connections", ".", "pop", "(", ")", "except", "...
Get a connection from the pool
[ "Get", "a", "connection", "from", "the", "pool" ]
python
train
Stranger6667/postmarker
postmarker/models/emails.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/emails.py#L411-L449
def Email( self, From, To, Cc=None, Bcc=None, Subject=None, Tag=None, HtmlBody=None, TextBody=None, Metadata=None, ReplyTo=None, Headers=None, TrackOpens=None, TrackLinks="None", Attachments=None, ): """ Constructs :py:class:`Email` instance. :return: :py:class:`Email` """ return Email( manager=self, From=From, To=To, Cc=Cc, Bcc=Bcc, Subject=Subject, Tag=Tag, HtmlBody=HtmlBody, TextBody=TextBody, Metadata=Metadata, ReplyTo=ReplyTo, Headers=Headers, TrackOpens=TrackOpens, TrackLinks=TrackLinks, Attachments=Attachments, )
[ "def", "Email", "(", "self", ",", "From", ",", "To", ",", "Cc", "=", "None", ",", "Bcc", "=", "None", ",", "Subject", "=", "None", ",", "Tag", "=", "None", ",", "HtmlBody", "=", "None", ",", "TextBody", "=", "None", ",", "Metadata", "=", "None", ...
Constructs :py:class:`Email` instance. :return: :py:class:`Email`
[ "Constructs", ":", "py", ":", "class", ":", "Email", "instance", "." ]
python
train
alexprengere/currencyconverter
currency_converter/currency_converter.py
https://github.com/alexprengere/currencyconverter/blob/e3cb0d693819c0c824214225b23a47e9380f71df/currency_converter/currency_converter.py#L67-L72
def parse_date(s): """Fast %Y-%m-%d parsing.""" try: return datetime.date(int(s[:4]), int(s[5:7]), int(s[8:10])) except ValueError: # other accepted format used in one-day data set return datetime.datetime.strptime(s, '%d %B %Y').date()
[ "def", "parse_date", "(", "s", ")", ":", "try", ":", "return", "datetime", ".", "date", "(", "int", "(", "s", "[", ":", "4", "]", ")", ",", "int", "(", "s", "[", "5", ":", "7", "]", ")", ",", "int", "(", "s", "[", "8", ":", "10", "]", "...
Fast %Y-%m-%d parsing.
[ "Fast", "%Y", "-", "%m", "-", "%d", "parsing", "." ]
python
test
samuelcolvin/pydantic
pydantic/schema.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/schema.py#L287-L306
def get_field_schema_validations(field: Field) -> Dict[str, Any]: """ Get the JSON Schema validation keywords for a ``field`` with an annotation of a Pydantic ``Schema`` with validation arguments. """ f_schema: Dict[str, Any] = {} if lenient_issubclass(field.type_, (str, bytes)): for attr_name, t, keyword in _str_types_attrs: attr = getattr(field.schema, attr_name, None) if isinstance(attr, t): f_schema[keyword] = attr if lenient_issubclass(field.type_, numeric_types) and not issubclass(field.type_, bool): for attr_name, t, keyword in _numeric_types_attrs: attr = getattr(field.schema, attr_name, None) if isinstance(attr, t): f_schema[keyword] = attr schema = cast('Schema', field.schema) if schema.extra: f_schema.update(schema.extra) return f_schema
[ "def", "get_field_schema_validations", "(", "field", ":", "Field", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "f_schema", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "if", "lenient_issubclass", "(", "field", ".", "type_", ",", ...
Get the JSON Schema validation keywords for a ``field`` with an annotation of a Pydantic ``Schema`` with validation arguments.
[ "Get", "the", "JSON", "Schema", "validation", "keywords", "for", "a", "field", "with", "an", "annotation", "of", "a", "Pydantic", "Schema", "with", "validation", "arguments", "." ]
python
train
creare-com/pydem
pydem/dem_processing.py
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L1846-L1908
def _mk_adjacency_matrix(self, section, proportion, flats, elev, mag, dX, dY): """ Calculates the adjacency of connectivity matrix. This matrix tells which pixels drain to which. For example, the pixel i, will recieve area from np.nonzero(A[i, :]) at the proportions given in A[i, :]. So, the row gives the pixel drain to, and the columns the pixels drained from. """ shp = section.shape mat_data = np.row_stack((proportion, 1 - proportion)) NN = np.prod(shp) i12 = np.arange(NN).reshape(shp) j1 = - np.ones_like(i12) j2 = - np.ones_like(i12) # make the connectivity for the non-flats/pits j1, j2 = self._mk_connectivity(section, i12, j1, j2) j = np.row_stack((j1, j2)) i = np.row_stack((i12, i12)) # connectivity for flats/pits if self.drain_pits: pit_i, pit_j, pit_prop, flats, mag = \ self._mk_connectivity_pits(i12, flats, elev, mag, dX, dY) j = np.concatenate([j.ravel(), pit_j]).astype('int64') i = np.concatenate([i.ravel(), pit_i]).astype('int64') mat_data = np.concatenate([mat_data.ravel(), pit_prop]) elif self.drain_flats: j1, j2, mat_data, flat_i, flat_j, flat_prop = \ self._mk_connectivity_flats( i12, j1, j2, mat_data, flats, elev, mag) j = np.concatenate([j.ravel(), flat_j]).astype('int64') i = np.concatenate([i.ravel(), flat_j]).astype('int64') mat_data = np.concatenate([mat_data.ravel(), flat_prop]) # This prevents no-data values, remove connections when not present, # and makes sure that floating point precision errors do not # create circular references where a lower elevation cell drains # to a higher elevation cell I = ~np.isnan(mat_data) & (j != -1) & (mat_data > 1e-8) \ & (elev.ravel()[j] <= elev.ravel()[i]) mat_data = mat_data[I] j = j[I] i = i[I] # %%Make the matrix and initialize # What is A? The row i area receives area contributions from the # entries in its columns. If all the entries in my columns have # drained, then I can drain. A = sps.csc_matrix((mat_data.ravel(), np.row_stack((j.ravel(), i.ravel()))), shape=(NN, NN)) normalize = np.array(A.sum(0) + 1e-16).squeeze() A = np.dot(A, sps.diags(1/normalize, 0)) return A
[ "def", "_mk_adjacency_matrix", "(", "self", ",", "section", ",", "proportion", ",", "flats", ",", "elev", ",", "mag", ",", "dX", ",", "dY", ")", ":", "shp", "=", "section", ".", "shape", "mat_data", "=", "np", ".", "row_stack", "(", "(", "proportion", ...
Calculates the adjacency of connectivity matrix. This matrix tells which pixels drain to which. For example, the pixel i, will recieve area from np.nonzero(A[i, :]) at the proportions given in A[i, :]. So, the row gives the pixel drain to, and the columns the pixels drained from.
[ "Calculates", "the", "adjacency", "of", "connectivity", "matrix", ".", "This", "matrix", "tells", "which", "pixels", "drain", "to", "which", "." ]
python
train
chainer/chainerui
chainerui/models/result.py
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L79-L96
def serialize_with_sampled_logs(self, logs_limit=-1): """serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs. """ return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
[ "def", "serialize_with_sampled_logs", "(", "self", ",", "logs_limit", "=", "-", "1", ")", ":", "return", "{", "'id'", ":", "self", ".", "id", ",", "'pathName'", ":", "self", ".", "path_name", ",", "'name'", ":", "self", ".", "name", ",", "'isUnregistered...
serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs.
[ "serialize", "a", "result", "with", "up", "to", "logs_limit", "logs", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L731-L743
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0): """ Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements. """ self.wait_for_ready_state_complete() if page_utils.is_xpath_selector(selector): by = By.XPATH if page_utils.is_link_text_selector(selector): selector = page_utils.get_link_text_from_selector(selector) by = By.LINK_TEXT v_elems = page_actions.find_visible_elements(self.driver, selector, by) if limit and limit > 0 and len(v_elems) > limit: v_elems = v_elems[:limit] return v_elems
[ "def", "find_visible_elements", "(", "self", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "limit", "=", "0", ")", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ...
Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements.
[ "Returns", "a", "list", "of", "matching", "WebElements", "that", "are", "visible", ".", "If", "limit", "is", "set", "and", ">", "0", "will", "only", "return", "that", "many", "elements", "." ]
python
train
hollenstein/maspy
maspy/core.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L259-L289
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): """Return a condensed array of data selected from :class:`Si` instances from ``self.sic`` for fast and convenient data processing. :param attr: list of :class:`Si` item attributes that should be added to the returned array. The attributes "id" and "specfile" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :param specfiles: filenames of ms-run files, if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each :class:`Si` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... } """ selector = (lambda si: True) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)
[ "def", "getArrays", "(", "self", ",", "attr", "=", "None", ",", "specfiles", "=", "None", ",", "sort", "=", "False", ",", "reverse", "=", "False", ",", "selector", "=", "None", ",", "defaultValue", "=", "None", ")", ":", "selector", "=", "(", "lambda...
Return a condensed array of data selected from :class:`Si` instances from ``self.sic`` for fast and convenient data processing. :param attr: list of :class:`Si` item attributes that should be added to the returned array. The attributes "id" and "specfile" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :param specfiles: filenames of ms-run files, if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each :class:`Si` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... }
[ "Return", "a", "condensed", "array", "of", "data", "selected", "from", ":", "class", ":", "Si", "instances", "from", "self", ".", "sic", "for", "fast", "and", "convenient", "data", "processing", "." ]
python
train
peri-source/peri
peri/logger.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/logger.py#L90-L104
def add_handler(self, name='console-color', level='info', formatter='standard', **kwargs): """ Add another handler to the logging system if not present already. Available handlers are currently: ['console-bw', 'console-color', 'rotating-log'] """ # make sure the the log file has a name if name == 'rotating-log' and 'filename' not in kwargs: kwargs.update({'filename': self.logfilename}) # make sure the the log file has a name if name == 'stringio' and 'stringio' not in kwargs: kwargs.update({'stringio': StringIO.StringIO()}) handler = types[name](**kwargs) self.add_handler_raw(handler, name, level=level, formatter=formatter)
[ "def", "add_handler", "(", "self", ",", "name", "=", "'console-color'", ",", "level", "=", "'info'", ",", "formatter", "=", "'standard'", ",", "*", "*", "kwargs", ")", ":", "# make sure the the log file has a name", "if", "name", "==", "'rotating-log'", "and", ...
Add another handler to the logging system if not present already. Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
[ "Add", "another", "handler", "to", "the", "logging", "system", "if", "not", "present", "already", ".", "Available", "handlers", "are", "currently", ":", "[", "console", "-", "bw", "console", "-", "color", "rotating", "-", "log", "]" ]
python
valid
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L4013-L4066
def _make_value(self, value): """ Constructs a _child_spec value from a native Python data type, or an appropriate Asn1Value object :param value: A native Python value, or some child of Asn1Value :return: An object of type _child_spec """ if isinstance(value, self._child_spec): new_value = value elif issubclass(self._child_spec, Any): if isinstance(value, Asn1Value): new_value = value else: raise ValueError(unwrap( ''' Can not set a native python value to %s where the _child_spec is Any - value must be an instance of Asn1Value ''', type_name(self) )) elif issubclass(self._child_spec, Choice): if not isinstance(value, Asn1Value): raise ValueError(unwrap( ''' Can not set a native python value to %s where the _child_spec is the choice type %s - value must be an instance of Asn1Value ''', type_name(self), self._child_spec.__name__ )) if not isinstance(value, self._child_spec): wrapper = self._child_spec() wrapper.validate(value.class_, value.tag, value.contents) wrapper._parsed = value value = wrapper new_value = value else: return self._child_spec(value=value) params = {} if self._child_spec.explicit: params['explicit'] = self._child_spec.explicit if self._child_spec.implicit: params['implicit'] = (self._child_spec.class_, self._child_spec.tag) return _fix_tagging(new_value, params)
[ "def", "_make_value", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "self", ".", "_child_spec", ")", ":", "new_value", "=", "value", "elif", "issubclass", "(", "self", ".", "_child_spec", ",", "Any", ")", ":", "if", "isins...
Constructs a _child_spec value from a native Python data type, or an appropriate Asn1Value object :param value: A native Python value, or some child of Asn1Value :return: An object of type _child_spec
[ "Constructs", "a", "_child_spec", "value", "from", "a", "native", "Python", "data", "type", "or", "an", "appropriate", "Asn1Value", "object" ]
python
train
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L1113-L1146
def del_work_units(self, work_spec_name, work_unit_keys=None, state=None, all=False): '''Delete work units from a work spec. The parameters are considered in order as follows: * If `all` is :const:`True`, then all work units in `work_spec_name` are deleted; otherwise * If `state` is not :const:`None`, then all work units in the named state are deleted; otherwise * If `work_unit_keys` are specified, then those specific work units are deleted; otherwise * Nothing is deleted. :param str work_spec_name: name of the work spec :param list work_unit_keys: if not :const:`None`, only delete these specific keys :param str state: only delete work units in this state :param bool all: if true, delete all work units :return: number of work units deleted ''' count = 0 if (state is None) or (state == AVAILABLE): count += self.remove_available_work_units(work_spec_name, work_unit_keys) if (state is None) or (state == PENDING): count += self.remove_pending_work_units(work_spec_name, work_unit_keys) if (state is None) or (state == BLOCKED): count += self.remove_blocked_work_units(work_spec_name, work_unit_keys) if (state is None) or (state == FAILED): count += self.remove_failed_work_units(work_spec_name, work_unit_keys) if (state is None) or (state == FINISHED): count += self.remove_finished_work_units(work_spec_name, work_unit_keys) return count
[ "def", "del_work_units", "(", "self", ",", "work_spec_name", ",", "work_unit_keys", "=", "None", ",", "state", "=", "None", ",", "all", "=", "False", ")", ":", "count", "=", "0", "if", "(", "state", "is", "None", ")", "or", "(", "state", "==", "AVAIL...
Delete work units from a work spec. The parameters are considered in order as follows: * If `all` is :const:`True`, then all work units in `work_spec_name` are deleted; otherwise * If `state` is not :const:`None`, then all work units in the named state are deleted; otherwise * If `work_unit_keys` are specified, then those specific work units are deleted; otherwise * Nothing is deleted. :param str work_spec_name: name of the work spec :param list work_unit_keys: if not :const:`None`, only delete these specific keys :param str state: only delete work units in this state :param bool all: if true, delete all work units :return: number of work units deleted
[ "Delete", "work", "units", "from", "a", "work", "spec", "." ]
python
train
rlabbe/filterpy
filterpy/leastsq/least_squares.py
https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/leastsq/least_squares.py#L157-L205
def errors(self): """ Computes and returns the error and standard deviation of the filter at this time step. Returns ------- error : np.array size 1xorder+1 std : np.array size 1xorder+1 """ n = self.n dt = self.dt order = self._order sigma = self.sigma error = np.zeros(order + 1) std = np.zeros(order + 1) if n == 0: return (error, std) if order == 0: error[0] = sigma/sqrt(n) std[0] = sigma/sqrt(n) elif order == 1: if n > 1: error[0] = sigma * sqrt(2*(2*n-1) / (n*(n+1))) error[1] = sigma * sqrt(12. / (n*(n*n-1)*dt*dt)) std[0] = sigma * sqrt((2*(2*n-1)) / (n*(n+1))) std[1] = (sigma/dt) * sqrt(12. / (n*(n*n-1))) elif order == 2: dt2 = dt * dt if n >= 3: error[0] = sigma * sqrt(3*(3*n*n-3*n+2) / (n*(n+1)*(n+2))) error[1] = sigma * sqrt(12*(16*n*n-30*n+11) / (n*(n*n-1)*(n*n-4)*dt2)) error[2] = sigma * sqrt(720/(n*(n*n-1)*(n*n-4)*dt2*dt2)) std[0] = sigma * sqrt((3*(3*n*n - 3*n + 2)) / (n*(n+1)*(n+2))) std[1] = (sigma/dt) * sqrt((12*(16*n*n - 30*n + 11)) / (n*(n*n - 1)*(n*n - 4))) std[2] = (sigma/dt2) * sqrt(720 / (n*(n*n-1)*(n*n-4))) return error, std
[ "def", "errors", "(", "self", ")", ":", "n", "=", "self", ".", "n", "dt", "=", "self", ".", "dt", "order", "=", "self", ".", "_order", "sigma", "=", "self", ".", "sigma", "error", "=", "np", ".", "zeros", "(", "order", "+", "1", ")", "std", "...
Computes and returns the error and standard deviation of the filter at this time step. Returns ------- error : np.array size 1xorder+1 std : np.array size 1xorder+1
[ "Computes", "and", "returns", "the", "error", "and", "standard", "deviation", "of", "the", "filter", "at", "this", "time", "step", "." ]
python
train
bokeh/bokeh
bokeh/core/property/container.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/container.py#L124-L134
def wrap(cls, value): ''' Some property types need to wrap their values in special containers, etc. ''' if isinstance(value, list): if isinstance(value, PropertyValueList): return value else: return PropertyValueList(value) else: return value
[ "def", "wrap", "(", "cls", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "if", "isinstance", "(", "value", ",", "PropertyValueList", ")", ":", "return", "value", "else", ":", "return", "PropertyValueList", "(", "value",...
Some property types need to wrap their values in special containers, etc.
[ "Some", "property", "types", "need", "to", "wrap", "their", "values", "in", "special", "containers", "etc", "." ]
python
train
evolbioinfo/pastml
pastml/ml.py
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/ml.py#L336-L375
def alter_zero_tip_allowed_states(tree, feature): """ Alters the bottom-up likelihood arrays for zero-distance tips to make sure they do not contradict with other zero-distance tip siblings. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the likelihood is altered :return: void, modifies the get_personalised_feature_name(feature, BU_LH) feature to zero-distance tips. """ zero_parent2tips = defaultdict(list) allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES) for tip in tree: if tip.dist == 0: state = getattr(tip, feature, None) if state is not None and state != '': zero_parent2tips[tip.up].append(tip) # adjust zero tips to contain all the zero tip options as states for parent, zero_tips in zero_parent2tips.items(): # If there is a common state do nothing counts = None for tip in zero_tips: if counts is None: counts = getattr(tip, allowed_state_feature).copy() else: counts += getattr(tip, allowed_state_feature) if counts.max() == len(zero_tips): continue # Otherwise set all tip states to state union allowed_states = None for tip in zero_tips: if allowed_states is None: allowed_states = getattr(tip, allowed_state_feature).copy() else: tip_allowed_states = getattr(tip, allowed_state_feature) allowed_states[np.nonzero(tip_allowed_states)] = 1 tip.add_feature(allowed_state_feature, allowed_states)
[ "def", "alter_zero_tip_allowed_states", "(", "tree", ",", "feature", ")", ":", "zero_parent2tips", "=", "defaultdict", "(", "list", ")", "allowed_state_feature", "=", "get_personalized_feature_name", "(", "feature", ",", "ALLOWED_STATES", ")", "for", "tip", "in", "t...
Alters the bottom-up likelihood arrays for zero-distance tips to make sure they do not contradict with other zero-distance tip siblings. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the likelihood is altered :return: void, modifies the get_personalised_feature_name(feature, BU_LH) feature to zero-distance tips.
[ "Alters", "the", "bottom", "-", "up", "likelihood", "arrays", "for", "zero", "-", "distance", "tips", "to", "make", "sure", "they", "do", "not", "contradict", "with", "other", "zero", "-", "distance", "tip", "siblings", "." ]
python
train
mbedmicro/pyOCD
pyocd/target/pack/cmsis_pack.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/cmsis_pack.py#L279-L341
def _build_memory_regions(self): """! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). """ for elem in self._info.memories: try: # Get the region name, type, and access permissions. if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): type = MemoryType.RAM else: type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] if 'RAM' in name: access = 'rwx' type = MemoryType.RAM else: access = 'rx' type = MemoryType.ROM else: continue # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: self._saw_startup = True attrs = { 'name': name, 'start': start, 'length': size, 'access': access, 'is_default': isDefault, 'is_boot_memory': isStartup, 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region except (KeyError, ValueError) as err: # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err))
[ "def", "_build_memory_regions", "(", "self", ")", ":", "for", "elem", "in", "self", ".", "_info", ".", "memories", ":", "try", ":", "# Get the region name, type, and access permissions.", "if", "'name'", "in", "elem", ".", "attrib", ":", "name", "=", "elem", "...
! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions().
[ "!" ]
python
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/table.py#L712-L784
def _do_mutate_retryable_rows(self): """Mutate all the rows that are eligible for retry. A row is eligible for retry if it has not been tried or if it resulted in a transient error in a previous call. :rtype: list :return: The responses statuses, which is a list of :class:`~google.rpc.status_pb2.Status`. :raises: One of the following: * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ retryable_rows = [] index_into_all_rows = [] for index, status in enumerate(self.responses_statuses): if self._is_retryable(status): retryable_rows.append(self.rows[index]) index_into_all_rows.append(index) if not retryable_rows: # All mutations are either successful or non-retryable now. return self.responses_statuses mutate_rows_request = _mutate_rows_request( self.table_name, retryable_rows, app_profile_id=self.app_profile_id ) data_client = self.client.table_data_client inner_api_calls = data_client._inner_api_calls if "mutate_rows" not in inner_api_calls: default_retry = (data_client._method_configs["MutateRows"].retry,) if self.timeout is None: default_timeout = data_client._method_configs["MutateRows"].timeout else: default_timeout = timeout.ExponentialTimeout(deadline=self.timeout) data_client._inner_api_calls["mutate_rows"] = wrap_method( data_client.transport.mutate_rows, default_retry=default_retry, default_timeout=default_timeout, client_info=data_client._client_info, ) responses = data_client._inner_api_calls["mutate_rows"]( mutate_rows_request, retry=None ) num_responses = 0 num_retryable_responses = 0 for response in responses: for entry in response.entries: num_responses += 1 index = index_into_all_rows[entry.index] self.responses_statuses[index] = entry.status if self._is_retryable(entry.status): num_retryable_responses += 1 if entry.status.code == 0: self.rows[index].clear() if len(retryable_rows) != num_responses: raise RuntimeError( "Unexpected number of responses", num_responses, "Expected", len(retryable_rows), ) if num_retryable_responses: raise _BigtableRetryableError return self.responses_statuses
[ "def", "_do_mutate_retryable_rows", "(", "self", ")", ":", "retryable_rows", "=", "[", "]", "index_into_all_rows", "=", "[", "]", "for", "index", ",", "status", "in", "enumerate", "(", "self", ".", "responses_statuses", ")", ":", "if", "self", ".", "_is_retr...
Mutate all the rows that are eligible for retry. A row is eligible for retry if it has not been tried or if it resulted in a transient error in a previous call. :rtype: list :return: The responses statuses, which is a list of :class:`~google.rpc.status_pb2.Status`. :raises: One of the following: * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried
[ "Mutate", "all", "the", "rows", "that", "are", "eligible", "for", "retry", "." ]
python
train
has2k1/plotnine
doc/sphinxext/examples_and_gallery.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/doc/sphinxext/examples_and_gallery.py#L289-L315
def add_entries_to_gallery(app, doctree, docname): """ Add entries to the gallery node Should happen when all the doctrees have been read and the gallery entries have been collected. i.e at doctree-resolved time. """ if docname != 'gallery': return if not has_gallery(app.builder.name): return # Find gallery node try: node = doctree.traverse(gallery)[0] except TypeError: return content = [] for entry in app.env.gallery_entries: raw_html_node = nodes.raw('', text=entry.html, format='html') content.append(raw_html_node) # Even when content is empty, we want the gallery node replaced node.replace_self(content)
[ "def", "add_entries_to_gallery", "(", "app", ",", "doctree", ",", "docname", ")", ":", "if", "docname", "!=", "'gallery'", ":", "return", "if", "not", "has_gallery", "(", "app", ".", "builder", ".", "name", ")", ":", "return", "# Find gallery node", "try", ...
Add entries to the gallery node Should happen when all the doctrees have been read and the gallery entries have been collected. i.e at doctree-resolved time.
[ "Add", "entries", "to", "the", "gallery", "node" ]
python
train
ThreatResponse/margaritashotgun
margaritashotgun/repository.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/repository.py#L310-L329
def verify_module(self, filename, module, verify_signature): """ Verify kernel module checksum and signature :type filename: str :param filename: downloaded kernel module path :type module: dict :param module: kernel module metadata :type verify_signature: bool :param verify_signature: enable/disable signature verification """ with open(filename, 'rb') as f: module_data = f.read() self.verify_checksum(module_data, module['checksum'], module['location']) if self.gpg_verify: signature_url = "{0}/{1}".format(self.url, module['signature']) file_url = "{0}/{1}".format(self.url, module['location']) self.verify_file_signature(signature_url, file_url, filename)
[ "def", "verify_module", "(", "self", ",", "filename", ",", "module", ",", "verify_signature", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "module_data", "=", "f", ".", "read", "(", ")", "self", ".", "verify_checksum", "...
Verify kernel module checksum and signature :type filename: str :param filename: downloaded kernel module path :type module: dict :param module: kernel module metadata :type verify_signature: bool :param verify_signature: enable/disable signature verification
[ "Verify", "kernel", "module", "checksum", "and", "signature" ]
python
train
sporestack/bitcash
bitcash/wallet.py
https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/wallet.py#L191-L198
def get_unspents(self): """Fetches all available unspent transaction outputs. :rtype: ``list`` of :class:`~bitcash.network.meta.Unspent` """ self.unspents[:] = NetworkAPI.get_unspent(self.address) self.balance = sum(unspent.amount for unspent in self.unspents) return self.unspents
[ "def", "get_unspents", "(", "self", ")", ":", "self", ".", "unspents", "[", ":", "]", "=", "NetworkAPI", ".", "get_unspent", "(", "self", ".", "address", ")", "self", ".", "balance", "=", "sum", "(", "unspent", ".", "amount", "for", "unspent", "in", ...
Fetches all available unspent transaction outputs. :rtype: ``list`` of :class:`~bitcash.network.meta.Unspent`
[ "Fetches", "all", "available", "unspent", "transaction", "outputs", "." ]
python
train
edoburu/django-tag-parser
tag_parser/parser.py
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/parser.py#L36-L87
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True): """ Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs) """ if isinstance(token, Token): bits = token.split_contents() else: bits = token expect_kwarg = False args = [] kwargs = {} prev_bit = None tag_name = bits[0] for bit in bits[1::]: kwarg_match = kwarg_re.match(bit) if kwarg_match: # Keyword argument expect_kwarg = True (name, expr) = bit.split('=', 2) kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr else: # Still at positioned arguments. if expect_kwarg: raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit)) args.append(parser.compile_filter(bit) if compile_args else bit) prev_bit = bit # Validate the allowed arguments, to make things easier for template developers if allowed_kwargs is not None and kwargs: if not allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.") for name in kwargs: if name not in allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs))) return tag_name, args, kwargs
[ "def", "parse_token_kwargs", "(", "parser", ",", "token", ",", "allowed_kwargs", "=", "None", ",", "compile_args", "=", "True", ",", "compile_kwargs", "=", "True", ")", ":", "if", "isinstance", "(", "token", ",", "Token", ")", ":", "bits", "=", "token", ...
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs)
[ "Allow", "the", "template", "tag", "arguments", "to", "be", "like", "a", "normal", "Python", "function", "with", "*", "args", "and", "**", "kwargs", "." ]
python
test
xav-b/pyconsul
pyconsul/utils.py
https://github.com/xav-b/pyconsul/blob/06ce3b921d01010c19643424486bea4b22196076/pyconsul/utils.py#L25-L43
def safe_request(fct): ''' Return json messages instead of raising errors ''' def inner(*args, **kwargs): ''' decorator ''' try: _data = fct(*args, **kwargs) except requests.exceptions.ConnectionError as error: return {'error': str(error), 'status': 404} if _data.ok: if _data.content: safe_data = _data.json() else: safe_data = {'success': True} else: safe_data = {'error': _data.reason, 'status': _data.status_code} return safe_data return inner
[ "def", "safe_request", "(", "fct", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "''' decorator '''", "try", ":", "_data", "=", "fct", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "requests", ".", "exce...
Return json messages instead of raising errors
[ "Return", "json", "messages", "instead", "of", "raising", "errors" ]
python
train