repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
amperser/proselint
proselint/checks/terms/denizen_labels.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/terms/denizen_labels.py#L62-L88
def check_denizen_labels_norris(text): """Suggest the preferred forms. source: Mary Norris source_url: http://nyr.kr/1rGienj """ err = "terms.denizen_labels.norris" msg = "Would you like '{}'?" preferences = [ ["Mancunian", ["Manchesterian"]], ["Mancunians", ["Manchesterians"]], ["Vallisoletano", ["Valladolidian"]], ["Wulfrunian", ["Wolverhamptonian", "Wolverhamptonite"]], ["Novocastrian", ["Newcastleite", "Newcastlite"]], ["Trifluvian", [u"Trois-Rivièrester"]], ["Leodenisian", ["Leedsian"]], ["Minneapolitan", ["Minneapolisian"]], ["Hartlepudlian", ["Hartlepoolian"]], ["Liverpudlian", ["Liverpoolian"]], ["Haligonian", ["Halifaxer"]], ["Varsovian", ["Warsawer", "Warsawian"]], ["Providentian", ["Providencian", "Providencer"]], ["Tridentine", ["Trentian", "Trentonian"]], ] return preferred_forms_check(text, preferences, err, msg)
[ "def", "check_denizen_labels_norris", "(", "text", ")", ":", "err", "=", "\"terms.denizen_labels.norris\"", "msg", "=", "\"Would you like '{}'?\"", "preferences", "=", "[", "[", "\"Mancunian\"", ",", "[", "\"Manchesterian\"", "]", "]", ",", "[", "\"Mancunians\"", ",...
Suggest the preferred forms. source: Mary Norris source_url: http://nyr.kr/1rGienj
[ "Suggest", "the", "preferred", "forms", "." ]
python
train
capnproto/pycapnp
examples/calculator_client.py
https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/examples/calculator_client.py#L35-L303
def main(host): client = capnp.TwoPartyClient(host) # Pass "calculator" to ez_restore (there's also a `restore` function that # takes a struct or AnyPointer as an argument), and then cast the returned # capability to it's proper type. This casting is due to capabilities not # having a reference to their schema calculator = client.bootstrap().cast_as(calculator_capnp.Calculator) '''Make a request that just evaluates the literal value 123. What's interesting here is that evaluate() returns a "Value", which is another interface and therefore points back to an object living on the server. We then have to call read() on that object to read it. However, even though we are making two RPC's, this block executes in *one* network round trip because of promise pipelining: we do not wait for the first call to complete before we send the second call to the server.''' print('Evaluating a literal... ', end="") # Make the request. Note we are using the shorter function form (instead # of evaluate_request), and we are passing a dictionary that represents a # struct and its member to evaluate eval_promise = calculator.evaluate({"literal": 123}) # This is equivalent to: ''' request = calculator.evaluate_request() request.expression.literal = 123 # Send it, which returns a promise for the result (without blocking). eval_promise = request.send() ''' # Using the promise, create a pipelined request to call read() on the # returned object. Note that here we are using the shortened method call # syntax read(), which is mostly just sugar for read_request().send() read_promise = eval_promise.value.read() # Now that we've sent all the requests, wait for the response. Until this # point, we haven't waited at all! response = read_promise.wait() assert response.value == 123 print("PASS") '''Make a request to evaluate 123 + 45 - 67. The Calculator interface requires that we first call getOperator() to get the addition and subtraction functions, then call evaluate() to use them. But, once again, we can get both functions, call evaluate(), and then read() the result -- four RPCs -- in the time of *one* network round trip, because of promise pipelining.''' print("Using add and subtract... ", end='') # Get the "add" function from the server. add = calculator.getOperator(op='add').func # Get the "subtract" function from the server. subtract = calculator.getOperator(op='subtract').func # Build the request to evaluate 123 + 45 - 67. Note the form is 'evaluate' # + '_request', where 'evaluate' is the name of the method we want to call request = calculator.evaluate_request() subtract_call = request.expression.init('call') subtract_call.function = subtract subtract_params = subtract_call.init('params', 2) subtract_params[1].literal = 67.0 add_call = subtract_params[0].init('call') add_call.function = add add_params = add_call.init('params', 2) add_params[0].literal = 123 add_params[1].literal = 45 # Send the evaluate() request, read() the result, and wait for read() to finish. eval_promise = request.send() read_promise = eval_promise.value.read() response = read_promise.wait() assert response.value == 101 print("PASS") ''' Note: a one liner version of building the previous request (I highly recommend not doing it this way for such a complicated structure, but I just wanted to demonstrate it is possible to set all of the fields with a dictionary): eval_promise = calculator.evaluate( {'call': {'function': subtract, 'params': [{'call': {'function': add, 'params': [{'literal': 123}, {'literal': 45}]}}, {'literal': 67.0}]}}) ''' '''Make a request to evaluate 4 * 6, then use the result in two more requests that add 3 and 5. Since evaluate() returns its result wrapped in a `Value`, we can pass that `Value` back to the server in subsequent requests before the first `evaluate()` has actually returned. Thus, this example again does only one network round trip.''' print("Pipelining eval() calls... ", end="") # Get the "add" function from the server. add = calculator.getOperator(op='add').func # Get the "multiply" function from the server. multiply = calculator.getOperator(op='multiply').func # Build the request to evaluate 4 * 6 request = calculator.evaluate_request() multiply_call = request.expression.init("call") multiply_call.function = multiply multiply_params = multiply_call.init("params", 2) multiply_params[0].literal = 4 multiply_params[1].literal = 6 multiply_result = request.send().value # Use the result in two calls that add 3 and add 5. add_3_request = calculator.evaluate_request() add_3_call = add_3_request.expression.init("call") add_3_call.function = add add_3_params = add_3_call.init("params", 2) add_3_params[0].previousResult = multiply_result add_3_params[1].literal = 3 add_3_promise = add_3_request.send().value.read() add_5_request = calculator.evaluate_request() add_5_call = add_5_request.expression.init("call") add_5_call.function = add add_5_params = add_5_call.init("params", 2) add_5_params[0].previousResult = multiply_result add_5_params[1].literal = 5 add_5_promise = add_5_request.send().value.read() # Now wait for the results. assert add_3_promise.wait().value == 27 assert add_5_promise.wait().value == 29 print("PASS") '''Our calculator interface supports defining functions. Here we use it to define two functions and then make calls to them as follows: f(x, y) = x * 100 + y g(x) = f(x, x + 1) * 2; f(12, 34) g(21) Once again, the whole thing takes only one network round trip.''' print("Defining functions... ", end="") # Get the "add" function from the server. add = calculator.getOperator(op='add').func # Get the "multiply" function from the server. multiply = calculator.getOperator(op='multiply').func # Define f. request = calculator.defFunction_request() request.paramCount = 2 # Build the function body. add_call = request.body.init("call") add_call.function = add add_params = add_call.init("params", 2) add_params[1].parameter = 1 # y multiply_call = add_params[0].init("call") multiply_call.function = multiply multiply_params = multiply_call.init("params", 2) multiply_params[0].parameter = 0 # x multiply_params[1].literal = 100 f = request.send().func # Define g. request = calculator.defFunction_request() request.paramCount = 1 # Build the function body. multiply_call = request.body.init("call") multiply_call.function = multiply multiply_params = multiply_call.init("params", 2) multiply_params[1].literal = 2 f_call = multiply_params[0].init("call") f_call.function = f f_params = f_call.init("params", 2) f_params[0].parameter = 0 add_call = f_params[1].init("call") add_call.function = add add_params = add_call.init("params", 2) add_params[0].parameter = 0 add_params[1].literal = 1 g = request.send().func # OK, we've defined all our functions. Now create our eval requests. # f(12, 34) f_eval_request = calculator.evaluate_request() f_call = f_eval_request.expression.init("call") f_call.function = f f_params = f_call.init("params", 2) f_params[0].literal = 12 f_params[1].literal = 34 f_eval_promise = f_eval_request.send().value.read() # g(21) g_eval_request = calculator.evaluate_request() g_call = g_eval_request.expression.init("call") g_call.function = g g_call.init('params', 1)[0].literal = 21 g_eval_promise = g_eval_request.send().value.read() # Wait for the results. assert f_eval_promise.wait().value == 1234 assert g_eval_promise.wait().value == 4244 print("PASS") '''Make a request that will call back to a function defined locally. Specifically, we will compute 2^(4 + 5). However, exponent is not defined by the Calculator server. So, we'll implement the Function interface locally and pass it to the server for it to use when evaluating the expression. This example requires two network round trips to complete, because the server calls back to the client once before finishing. In this particular case, this could potentially be optimized by using a tail call on the server side -- see CallContext::tailCall(). However, to keep the example simpler, we haven't implemented this optimization in the sample server.''' print("Using a callback... ", end="") # Get the "add" function from the server. add = calculator.getOperator(op='add').func # Build the eval request for 2^(4+5). request = calculator.evaluate_request() pow_call = request.expression.init("call") pow_call.function = PowerFunction() pow_params = pow_call.init("params", 2) pow_params[0].literal = 2 add_call = pow_params[1].init("call") add_call.function = add add_params = add_call.init("params", 2) add_params[0].literal = 4 add_params[1].literal = 5 # Send the request and wait. response = request.send().value.read().wait() assert response.value == 512 print("PASS")
[ "def", "main", "(", "host", ")", ":", "client", "=", "capnp", ".", "TwoPartyClient", "(", "host", ")", "# Pass \"calculator\" to ez_restore (there's also a `restore` function that", "# takes a struct or AnyPointer as an argument), and then cast the returned", "# capability to it's pr...
Make a request that just evaluates the literal value 123. What's interesting here is that evaluate() returns a "Value", which is another interface and therefore points back to an object living on the server. We then have to call read() on that object to read it. However, even though we are making two RPC's, this block executes in *one* network round trip because of promise pipelining: we do not wait for the first call to complete before we send the second call to the server.
[ "Make", "a", "request", "that", "just", "evaluates", "the", "literal", "value", "123", "." ]
python
train
enkore/i3pystatus
i3pystatus/timer.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/timer.py#L147-L158
def start(self, seconds=300): """ Starts timer. If timer is already running it will increase remaining time instead. :param int seconds: Initial time. """ if self.state is TimerState.stopped: self.compare = time.time() + abs(seconds) self.state = TimerState.running elif self.state is TimerState.running: self.increase(seconds)
[ "def", "start", "(", "self", ",", "seconds", "=", "300", ")", ":", "if", "self", ".", "state", "is", "TimerState", ".", "stopped", ":", "self", ".", "compare", "=", "time", ".", "time", "(", ")", "+", "abs", "(", "seconds", ")", "self", ".", "sta...
Starts timer. If timer is already running it will increase remaining time instead. :param int seconds: Initial time.
[ "Starts", "timer", ".", "If", "timer", "is", "already", "running", "it", "will", "increase", "remaining", "time", "instead", "." ]
python
train
Unidata/MetPy
metpy/interpolate/geometry.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/geometry.py#L377-L415
def find_local_boundary(tri, triangles): r"""Find and return the outside edges of a collection of natural neighbor triangles. There is no guarantee that this boundary is convex, so ConvexHull is not sufficient in some situations. Parameters ---------- tri: Object A Delaunay Triangulation triangles: (N, ) array List of natural neighbor triangles. Returns ------- edges: (2, N) ndarray List of vertex codes that form outer edges of a group of natural neighbor triangles. """ edges = [] for triangle in triangles: for i in range(3): pt1 = tri.simplices[triangle][i] pt2 = tri.simplices[triangle][(i + 1) % 3] if (pt1, pt2) in edges: edges.remove((pt1, pt2)) elif (pt2, pt1) in edges: edges.remove((pt2, pt1)) else: edges.append((pt1, pt2)) return edges
[ "def", "find_local_boundary", "(", "tri", ",", "triangles", ")", ":", "edges", "=", "[", "]", "for", "triangle", "in", "triangles", ":", "for", "i", "in", "range", "(", "3", ")", ":", "pt1", "=", "tri", ".", "simplices", "[", "triangle", "]", "[", ...
r"""Find and return the outside edges of a collection of natural neighbor triangles. There is no guarantee that this boundary is convex, so ConvexHull is not sufficient in some situations. Parameters ---------- tri: Object A Delaunay Triangulation triangles: (N, ) array List of natural neighbor triangles. Returns ------- edges: (2, N) ndarray List of vertex codes that form outer edges of a group of natural neighbor triangles.
[ "r", "Find", "and", "return", "the", "outside", "edges", "of", "a", "collection", "of", "natural", "neighbor", "triangles", "." ]
python
train
openstack/horizon
horizon/tabs/base.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/base.py#L170-L178
def load_tab_data(self): """Preload all data that for the tabs that will be displayed.""" for tab in self._tabs.values(): if tab.load and not tab.data_loaded: try: tab._data = tab.get_context_data(self.request) except Exception: tab._data = False exceptions.handle(self.request)
[ "def", "load_tab_data", "(", "self", ")", ":", "for", "tab", "in", "self", ".", "_tabs", ".", "values", "(", ")", ":", "if", "tab", ".", "load", "and", "not", "tab", ".", "data_loaded", ":", "try", ":", "tab", ".", "_data", "=", "tab", ".", "get_...
Preload all data that for the tabs that will be displayed.
[ "Preload", "all", "data", "that", "for", "the", "tabs", "that", "will", "be", "displayed", "." ]
python
train
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L1307-L1370
def add_source(self, name, src_dict, free=None, init_source=True, save_source_maps=True, use_pylike=True, use_single_psf=False, **kwargs): """Add a source to the ROI model. This function may be called either before or after `~fermipy.gtanalysis.GTAnalysis.setup`. Parameters ---------- name : str Source name. src_dict : dict or `~fermipy.roi_model.Source` object Dictionary or source object defining the source properties (coordinates, spectral parameters, etc.). free : bool Initialize the source with a free normalization parameter. use_pylike : bool Create source maps with pyLikelihood. use_single_psf : bool Use the PSF model calculated for the ROI center. If false then a new model will be generated using the position of the source. """ if self.roi.has_source(name): msg = 'Source %s already exists.' % name self.logger.error(msg) raise Exception(msg) loglevel = kwargs.pop('loglevel', self.loglevel) self.logger.log(loglevel, 'Adding source ' + name) src = self.roi.create_source(name, src_dict, rescale=True) self.make_template(src) for c in self.components: c.add_source(name, src_dict, free=free, save_source_maps=save_source_maps, use_pylike=use_pylike, use_single_psf=use_single_psf) if self._like is None: return if self.config['gtlike']['edisp'] and src.name not in \ self.config['gtlike']['edisp_disable']: self.set_edisp_flag(src.name, True) self.like.syncSrcParams(str(name)) self.like.model = self.like.components[0].model # if free is not None: # self.free_norm(name, free, loglevel=logging.DEBUG) if init_source: self._init_source(name) self._update_roi() if self._fitcache is not None: self._fitcache.update_source(name)
[ "def", "add_source", "(", "self", ",", "name", ",", "src_dict", ",", "free", "=", "None", ",", "init_source", "=", "True", ",", "save_source_maps", "=", "True", ",", "use_pylike", "=", "True", ",", "use_single_psf", "=", "False", ",", "*", "*", "kwargs",...
Add a source to the ROI model. This function may be called either before or after `~fermipy.gtanalysis.GTAnalysis.setup`. Parameters ---------- name : str Source name. src_dict : dict or `~fermipy.roi_model.Source` object Dictionary or source object defining the source properties (coordinates, spectral parameters, etc.). free : bool Initialize the source with a free normalization parameter. use_pylike : bool Create source maps with pyLikelihood. use_single_psf : bool Use the PSF model calculated for the ROI center. If false then a new model will be generated using the position of the source.
[ "Add", "a", "source", "to", "the", "ROI", "model", ".", "This", "function", "may", "be", "called", "either", "before", "or", "after", "~fermipy", ".", "gtanalysis", ".", "GTAnalysis", ".", "setup", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/slack.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/slack.py#L435-L457
def setup_cmd_parser(cls): """Returns the Slack argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Slack options group = parser.parser.add_argument_group('Slack arguments') group.add_argument('--max-items', dest='max_items', type=int, default=MAX_ITEMS, help="Maximum number of items requested on the same query") # Required arguments parser.parser.add_argument('channel', help="Slack channel identifier") return parser
[ "def", "setup_cmd_parser", "(", "cls", ")", ":", "parser", "=", "BackendCommandArgumentParser", "(", "cls", ".", "BACKEND", ".", "CATEGORIES", ",", "from_date", "=", "True", ",", "token_auth", "=", "True", ",", "archive", "=", "True", ")", "# Backend token is ...
Returns the Slack argument parser.
[ "Returns", "the", "Slack", "argument", "parser", "." ]
python
test
bioidiap/bob.ip.facedetect
bob/ip/facedetect/detector/cascade.py
https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/detector/cascade.py#L44-L65
def add(self, classifier, threshold, begin=None, end=None): """Adds a new strong classifier with the given threshold to the cascade. **Parameters:** classifier : :py:class:`bob.learn.boosting.BoostedMachine` A strong classifier to add ``threshold`` : float The classification threshold for this cascade step ``begin``, ``end`` : int or ``None`` If specified, only the weak machines with the indices ``range(begin,end)`` will be added. """ boosted_machine = bob.learn.boosting.BoostedMachine() if begin is None: begin = 0 if end is None: end = len(classifier.weak_machines) for i in range(begin, end): boosted_machine.add_weak_machine(classifier.weak_machines[i], classifier.weights[i]) self.cascade.append(boosted_machine) self.thresholds.append(threshold) self._indices()
[ "def", "add", "(", "self", ",", "classifier", ",", "threshold", ",", "begin", "=", "None", ",", "end", "=", "None", ")", ":", "boosted_machine", "=", "bob", ".", "learn", ".", "boosting", ".", "BoostedMachine", "(", ")", "if", "begin", "is", "None", ...
Adds a new strong classifier with the given threshold to the cascade. **Parameters:** classifier : :py:class:`bob.learn.boosting.BoostedMachine` A strong classifier to add ``threshold`` : float The classification threshold for this cascade step ``begin``, ``end`` : int or ``None`` If specified, only the weak machines with the indices ``range(begin,end)`` will be added.
[ "Adds", "a", "new", "strong", "classifier", "with", "the", "given", "threshold", "to", "the", "cascade", "." ]
python
train
timstaley/voevent-parse
src/voeventparse/convenience.py
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/convenience.py#L186-L198
def pull_astro_coords(voevent, index=0): """ Deprecated alias of :func:`.get_event_position` """ import warnings warnings.warn( """ The function `pull_astro_coords` has been renamed to `get_event_position`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_position(voevent, index)
[ "def", "pull_astro_coords", "(", "voevent", ",", "index", "=", "0", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"\"\"\n The function `pull_astro_coords` has been renamed to\n `get_event_position`. This alias is preserved for backwards\n compat...
Deprecated alias of :func:`.get_event_position`
[ "Deprecated", "alias", "of", ":", "func", ":", ".", "get_event_position" ]
python
train
inasafe/inasafe
safe/datastore/geopackage.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/datastore/geopackage.py#L182-L228
def _add_vector_layer(self, vector_layer, layer_name, save_style=False): """Add a vector layer to the geopackage. :param vector_layer: The layer to add. :type vector_layer: QgsVectorLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. Not implemented in geopackage ! :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0 """ # Fixme # if not self.is_writable(): # return False, 'The destination is not writable.' geometry = QGIS_OGR_GEOMETRY_MAP[vector_layer.wkbType()] spatial_reference = osr.SpatialReference() qgis_spatial_reference = vector_layer.crs().authid() # Use 4326 as default if the spatial reference is not found epsg = 4326 epsg_string = qgis_spatial_reference if epsg_string: epsg = int(epsg_string.split(':')[1]) spatial_reference.ImportFromEPSG(epsg) vector_datasource = self.vector_driver.Open( self.uri.absoluteFilePath(), True) vector_datasource.CreateLayer(layer_name, spatial_reference, geometry) uri = '{}|layerid=0'.format(self.uri.absoluteFilePath()) vector_layer = QgsVectorLayer(uri, layer_name, 'ogr') data_provider = vector_layer.dataProvider() for feature in vector_layer.getFeatures(): data_provider.addFeatures([feature]) return True, layer_name
[ "def", "_add_vector_layer", "(", "self", ",", "vector_layer", ",", "layer_name", ",", "save_style", "=", "False", ")", ":", "# Fixme", "# if not self.is_writable():", "# return False, 'The destination is not writable.'", "geometry", "=", "QGIS_OGR_GEOMETRY_MAP", "[", "ve...
Add a vector layer to the geopackage. :param vector_layer: The layer to add. :type vector_layer: QgsVectorLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. Not implemented in geopackage ! :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0
[ "Add", "a", "vector", "layer", "to", "the", "geopackage", "." ]
python
train
klahnakoski/pyLibrary
jx_base/expressions.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/expressions.py#L157-L201
def define(cls, expr): """ GENERAL SUPPORT FOR BUILDING EXPRESSIONS FROM JSON EXPRESSIONS OVERRIDE THIS IF AN OPERATOR EXPECTS COMPLICATED PARAMETERS :param expr: Data representing a JSON Expression :return: parse tree """ try: lang = cls.lang items = items_(expr) for item in items: op, term = item full_op = operators.get(op) if full_op: class_ = lang.ops[full_op.id] clauses = {k: jx_expression(v) for k, v in expr.items() if k != op} break else: if not items: return NULL raise Log.error("{{operator|quote}} is not a known operator", operator=expr) if term == None: return class_([], **clauses) elif is_list(term): terms = [jx_expression(t) for t in term] return class_(terms, **clauses) elif is_data(term): items = items_(term) if class_.has_simple_form: if len(items) == 1: k, v = items[0] return class_([Variable(k), Literal(v)], **clauses) else: return class_({k: Literal(v) for k, v in items}, **clauses) else: return class_(_jx_expression(term, lang), **clauses) else: if op in ["literal", "date", "offset"]: return class_(term, **clauses) else: return class_(_jx_expression(term, lang), **clauses) except Exception as e: Log.error("programmer error expr = {{value|quote}}", value=expr, cause=e)
[ "def", "define", "(", "cls", ",", "expr", ")", ":", "try", ":", "lang", "=", "cls", ".", "lang", "items", "=", "items_", "(", "expr", ")", "for", "item", "in", "items", ":", "op", ",", "term", "=", "item", "full_op", "=", "operators", ".", "get",...
GENERAL SUPPORT FOR BUILDING EXPRESSIONS FROM JSON EXPRESSIONS OVERRIDE THIS IF AN OPERATOR EXPECTS COMPLICATED PARAMETERS :param expr: Data representing a JSON Expression :return: parse tree
[ "GENERAL", "SUPPORT", "FOR", "BUILDING", "EXPRESSIONS", "FROM", "JSON", "EXPRESSIONS", "OVERRIDE", "THIS", "IF", "AN", "OPERATOR", "EXPECTS", "COMPLICATED", "PARAMETERS", ":", "param", "expr", ":", "Data", "representing", "a", "JSON", "Expression", ":", "return", ...
python
train
napalm-automation/napalm
napalm/base/helpers.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/base/helpers.py#L307-L333
def ip(addr, version=None): """ Converts a raw string to a valid IP address. Optional version argument will detect that \ object matches specified version. Motivation: the groups of the IP addreses may contain leading zeros. IPv6 addresses can \ contain sometimes uppercase characters. E.g.: 2001:0dB8:85a3:0000:0000:8A2e:0370:7334 has \ the same logical value as 2001:db8:85a3::8a2e:370:7334. However, their values as strings are \ not the same. :param raw: the raw string containing the value of the IP Address :param version: (optional) insist on a specific IP address version. :type version: int. :return: a string containing the IP Address in a standard format (no leading zeros, \ zeros-grouping, lowercase) Example: .. code-block:: python >>> ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334') u'2001:db8:85a3::8a2e:370:7334' """ addr_obj = IPAddress(addr) if version and addr_obj.version != version: raise ValueError("{} is not an ipv{} address".format(addr, version)) return py23_compat.text_type(addr_obj)
[ "def", "ip", "(", "addr", ",", "version", "=", "None", ")", ":", "addr_obj", "=", "IPAddress", "(", "addr", ")", "if", "version", "and", "addr_obj", ".", "version", "!=", "version", ":", "raise", "ValueError", "(", "\"{} is not an ipv{} address\"", ".", "f...
Converts a raw string to a valid IP address. Optional version argument will detect that \ object matches specified version. Motivation: the groups of the IP addreses may contain leading zeros. IPv6 addresses can \ contain sometimes uppercase characters. E.g.: 2001:0dB8:85a3:0000:0000:8A2e:0370:7334 has \ the same logical value as 2001:db8:85a3::8a2e:370:7334. However, their values as strings are \ not the same. :param raw: the raw string containing the value of the IP Address :param version: (optional) insist on a specific IP address version. :type version: int. :return: a string containing the IP Address in a standard format (no leading zeros, \ zeros-grouping, lowercase) Example: .. code-block:: python >>> ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334') u'2001:db8:85a3::8a2e:370:7334'
[ "Converts", "a", "raw", "string", "to", "a", "valid", "IP", "address", ".", "Optional", "version", "argument", "will", "detect", "that", "\\", "object", "matches", "specified", "version", "." ]
python
train
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L1733-L1757
def set(self, value): """ Sets the value of the string :param value: A unicode string """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' %s value must be a unicode string, not %s ''', type_name(self), type_name(value) )) self._unicode = value self.contents = value.encode(self._encoding) self._header = None if self._indefinite: self._indefinite = False self.method = 0 if self._trailer != b'': self._trailer = b''
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be a unicode string, not %s\n '''", ",", "type_name", "...
Sets the value of the string :param value: A unicode string
[ "Sets", "the", "value", "of", "the", "string" ]
python
train
spacetelescope/drizzlepac
drizzlepac/util.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L1021-L1031
def getRotatedSize(corners, angle): """ Determine the size of a rotated (meta)image.""" if angle: _rotm = fileutil.buildRotMatrix(angle) # Rotate about the center _corners = np.dot(corners, _rotm) else: # If there is no rotation, simply return original values _corners = corners return computeRange(_corners)
[ "def", "getRotatedSize", "(", "corners", ",", "angle", ")", ":", "if", "angle", ":", "_rotm", "=", "fileutil", ".", "buildRotMatrix", "(", "angle", ")", "# Rotate about the center", "_corners", "=", "np", ".", "dot", "(", "corners", ",", "_rotm", ")", "els...
Determine the size of a rotated (meta)image.
[ "Determine", "the", "size", "of", "a", "rotated", "(", "meta", ")", "image", "." ]
python
train
zhihu/redis-shard
redis_shard/shard.py
https://github.com/zhihu/redis-shard/blob/57c166ef7d55f7272b50efc1dedc1f6ed4691137/redis_shard/shard.py#L159-L170
def mset(self, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value """ servers = {} for key, value in mapping.items(): server_name = self.get_server_name(key) servers.setdefault(server_name, []) servers[server_name].append((key, value)) for name, items in servers.items(): self.connections[name].mset(dict(items)) return True
[ "def", "mset", "(", "self", ",", "mapping", ")", ":", "servers", "=", "{", "}", "for", "key", ",", "value", "in", "mapping", ".", "items", "(", ")", ":", "server_name", "=", "self", ".", "get_server_name", "(", "key", ")", "servers", ".", "setdefault...
Sets each key in the ``mapping`` dict to its corresponding value
[ "Sets", "each", "key", "in", "the", "mapping", "dict", "to", "its", "corresponding", "value" ]
python
valid
pyviz/holoviews
holoviews/core/accessors.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/accessors.py#L170-L204
def replace_dimensions(cls, dimensions, overrides): """Replaces dimensions in list with dictionary of overrides. Args: dimensions: List of dimensions overrides: Dictionary of dimension specs indexed by name Returns: list: List of dimensions with replacements applied """ from .dimension import Dimension replaced = [] for d in dimensions: if d.name in overrides: override = overrides[d.name] elif d.label in overrides: override = overrides[d.label] else: override = None if override is None: replaced.append(d) elif isinstance(override, (util.basestring, tuple)): replaced.append(d.clone(override)) elif isinstance(override, Dimension): replaced.append(override) elif isinstance(override, dict): replaced.append(d.clone(override.get('name',None), **{k:v for k,v in override.items() if k != 'name'})) else: raise ValueError('Dimension can only be overridden ' 'with another dimension or a dictionary ' 'of attributes') return replaced
[ "def", "replace_dimensions", "(", "cls", ",", "dimensions", ",", "overrides", ")", ":", "from", ".", "dimension", "import", "Dimension", "replaced", "=", "[", "]", "for", "d", "in", "dimensions", ":", "if", "d", ".", "name", "in", "overrides", ":", "over...
Replaces dimensions in list with dictionary of overrides. Args: dimensions: List of dimensions overrides: Dictionary of dimension specs indexed by name Returns: list: List of dimensions with replacements applied
[ "Replaces", "dimensions", "in", "list", "with", "dictionary", "of", "overrides", "." ]
python
train
log2timeline/plaso
plaso/parsers/sqlite_plugins/imessage.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/sqlite_plugins/imessage.py#L130-L156
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs): """Parses a message row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = IMessageEventData() event_data.attachment_location = self._GetRowValue( query_hash, row, 'attachment_location') event_data.imessage_id = self._GetRowValue(query_hash, row, 'imessage_id') event_data.message_type = self._GetRowValue(query_hash, row, 'message_type') event_data.offset = self._GetRowValue(query_hash, row, 'ROWID') event_data.query = query event_data.read_receipt = self._GetRowValue(query_hash, row, 'read_receipt') event_data.service = self._GetRowValue(query_hash, row, 'service') event_data.text = self._GetRowValue(query_hash, row, 'text') timestamp = self._GetRowValue(query_hash, row, 'date') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ParseMessageRow", "(", "self", ",", "parser_mediator", ",", "query", ",", "row", ",", "*", "*", "unused_kwargs", ")", ":", "query_hash", "=", "hash", "(", "query", ")", "event_data", "=", "IMessageEventData", "(", ")", "event_data", ".", "attachment_...
Parses a message row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
[ "Parses", "a", "message", "row", "." ]
python
train
dagwieers/vmguestlib
vmguestlib.py
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L393-L399
def GetMemSharedSavedMB(self): '''Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSharedSavedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSharedSavedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", ...
Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.
[ "Retrieves", "the", "estimated", "amount", "of", "physical", "memory", "on", "the", "host", "saved", "from", "copy", "-", "on", "-", "write", "(", "COW", ")", "shared", "guest", "physical", "memory", "." ]
python
train
alexhayes/django-toolkit
django_toolkit/templatetags/actions.py
https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/templatetags/actions.py#L8-L18
def actions(obj, **kwargs): """ Return actions available for an object """ if 'exclude' in kwargs: kwargs['exclude'] = kwargs['exclude'].split(',') actions = obj.get_actions(**kwargs) if isinstance(actions, dict): actions = actions.values() buttons = "".join("%s" % action.render() for action in actions) return '<div class="actions">%s</div>' % buttons
[ "def", "actions", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "'exclude'", "in", "kwargs", ":", "kwargs", "[", "'exclude'", "]", "=", "kwargs", "[", "'exclude'", "]", ".", "split", "(", "','", ")", "actions", "=", "obj", ".", "get_actions", ...
Return actions available for an object
[ "Return", "actions", "available", "for", "an", "object" ]
python
train
pescadores/pescador
pescador/mux.py
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/mux.py#L932-L947
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Don't activate the stream if the weight is 0 or None if self.stream_weights_[idx]: self.streams_[idx] = self.streamers[idx].iterate() else: self.streams_[idx] = None # Reset the sample count to zero self.stream_counts_[idx] = 0
[ "def", "_new_stream", "(", "self", ",", "idx", ")", ":", "# Don't activate the stream if the weight is 0 or None", "if", "self", ".", "stream_weights_", "[", "idx", "]", ":", "self", ".", "streams_", "[", "idx", "]", "=", "self", ".", "streamers", "[", "idx", ...
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
[ "Randomly", "select", "and", "create", "a", "new", "stream", "." ]
python
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L331-L339
def after_processing(eng, objects): """Process to update status.""" super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_processing(eng, objects) if eng.has_completed: eng.save(WorkflowStatus.COMPLETED) else: eng.save(WorkflowStatus.HALTED) db.session.commit()
[ "def", "after_processing", "(", "eng", ",", "objects", ")", ":", "super", "(", "InvenioProcessingFactory", ",", "InvenioProcessingFactory", ")", ".", "after_processing", "(", "eng", ",", "objects", ")", "if", "eng", ".", "has_completed", ":", "eng", ".", "save...
Process to update status.
[ "Process", "to", "update", "status", "." ]
python
train
numberoverzero/bloop
bloop/models.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L591-L608
def unpack_from_dynamodb(*, attrs, expected, model=None, obj=None, engine=None, context=None, **kwargs): """Push values by dynamo_name into an object""" context = context or {"engine": engine} engine = engine or context.get("engine", None) if not engine: raise ValueError("You must provide engine or a context with an engine.") if model is None and obj is None: raise ValueError("You must provide a model or obj to unpack.") if model is not None and obj is not None: raise ValueError("Only specify model or obj.") if model: obj = model.Meta.init() for column in expected: value = attrs.get(column.dynamo_name, None) value = engine._load(column.typedef, value, context=context, **kwargs) setattr(obj, column.name, value) return obj
[ "def", "unpack_from_dynamodb", "(", "*", ",", "attrs", ",", "expected", ",", "model", "=", "None", ",", "obj", "=", "None", ",", "engine", "=", "None", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "context", "=", "context", "or", ...
Push values by dynamo_name into an object
[ "Push", "values", "by", "dynamo_name", "into", "an", "object" ]
python
train
tanghaibao/jcvi
jcvi/compara/catalog.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/catalog.py#L587-L708
def ortholog(args): """ %prog ortholog species_a species_b Run a sensitive pipeline to find orthologs between two species a and b. The pipeline runs LAST and generate .lifted.anchors. `--full` mode would assume 1-to-1 quota synteny blocks as the backbone of such predictions. Extra orthologs will be recruited from reciprocal best match (RBH). """ from jcvi.apps.align import last as last_main from jcvi.compara.blastfilter import main as blastfilter_main from jcvi.compara.quota import main as quota_main from jcvi.compara.synteny import scan, mcscan, liftover from jcvi.formats.blast import cscore, filter p = OptionParser(ortholog.__doc__) p.add_option("--dbtype", default="nucl", choices=("nucl", "prot"), help="Molecule type of subject database") p.add_option("--full", default=False, action="store_true", help="Run in full mode, including blocks and RBH") p.add_option("--cscore", default=0.7, type="float", help="C-score cutoff [default: %default]") p.add_option("--dist", default=20, type="int", help="Extent of flanking regions to search") p.add_option("--quota", help="Quota align parameter") p.add_option("--nostdpf", default=False, action="store_true", help="Do not standardize contig names") p.add_option("--no_strip_names", default=False, action="store_true", help="Do not strip alternative splicing " "(e.g. At5g06540.1 -> At5g06540)") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) a, b = args dbtype = opts.dbtype suffix = ".cds" if dbtype == "nucl" else ".pep" abed, afasta = a + ".bed", a + suffix bbed, bfasta = b + ".bed", b + suffix ccscore = opts.cscore quota = opts.quota dist = "--dist={0}".format(opts.dist) aprefix = afasta.split(".")[0] bprefix = bfasta.split(".")[0] pprefix = ".".join((aprefix, bprefix)) qprefix = ".".join((bprefix, aprefix)) last = pprefix + ".last" if need_update((afasta, bfasta), last): last_main([bfasta, afasta], dbtype) if a == b: lastself = last + ".P98L0.inverse" if need_update(last, lastself): filter([last, "--hitlen=0", "--pctid=98", "--inverse", "--noself"]) last = lastself filtered_last = last + ".filtered" if need_update(last, filtered_last): if opts.no_strip_names: blastfilter_main([last, "--cscore={0}".format(ccscore), "--no_strip_names"]) else: blastfilter_main([last, "--cscore={0}".format(ccscore)]) anchors = pprefix + ".anchors" lifted_anchors = pprefix + ".lifted.anchors" pdf = pprefix + ".pdf" if not opts.full: if need_update(filtered_last, lifted_anchors): if opts.no_strip_names: scan([filtered_last, anchors, dist, "--liftover={0}".format(last), "--no_strip_names"]) else: scan([filtered_last, anchors, dist, "--liftover={0}".format(last)]) if quota: quota_main([lifted_anchors, "--quota={0}".format(quota), "--screen"]) if need_update(anchors, pdf): from jcvi.graphics.dotplot import dotplot_main dargs = [anchors] if opts.nostdpf: dargs += ["--nostdpf", "--skipempty"] dotplot_main(dargs) return if need_update(filtered_last, anchors): if opts.no_strip_names: scan([filtered_last, anchors, dist, "--no_strip_names"]) else: scan([filtered_last, anchors, dist]) ooanchors = pprefix + ".1x1.anchors" if need_update(anchors, ooanchors): quota_main([anchors, "--quota=1:1", "--screen"]) lifted_anchors = pprefix + ".1x1.lifted.anchors" if need_update((last, ooanchors), lifted_anchors): if opts.no_strip_names: liftover([last, ooanchors, dist, "--no_strip_names"]) else: liftover([last, ooanchors, dist]) pblocks = pprefix + ".1x1.blocks" qblocks = qprefix + ".1x1.blocks" if need_update(lifted_anchors, [pblocks, qblocks]): mcscan([abed, lifted_anchors, "--iter=1", "-o", pblocks]) mcscan([bbed, lifted_anchors, "--iter=1", "-o", qblocks]) rbh = pprefix + ".rbh" if need_update(last, rbh): cscore([last, "-o", rbh]) portho = pprefix + ".ortholog" qortho = qprefix + ".ortholog" if need_update([pblocks, qblocks, rbh], [portho, qortho]): make_ortholog(pblocks, rbh, portho) make_ortholog(qblocks, rbh, qortho)
[ "def", "ortholog", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "align", "import", "last", "as", "last_main", "from", "jcvi", ".", "compara", ".", "blastfilter", "import", "main", "as", "blastfilter_main", "from", "jcvi", ".", "compara", ".", ...
%prog ortholog species_a species_b Run a sensitive pipeline to find orthologs between two species a and b. The pipeline runs LAST and generate .lifted.anchors. `--full` mode would assume 1-to-1 quota synteny blocks as the backbone of such predictions. Extra orthologs will be recruited from reciprocal best match (RBH).
[ "%prog", "ortholog", "species_a", "species_b" ]
python
train
spyder-ide/spyder
spyder/config/user.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/user.py#L263-L272
def _load_old_defaults(self, old_version): """Read old defaults""" old_defaults = cp.ConfigParser() if check_version(old_version, '3.0.0', '<='): path = get_module_source_path('spyder') else: path = osp.dirname(self.filename()) path = osp.join(path, 'defaults') old_defaults.read(osp.join(path, 'defaults-'+old_version+'.ini')) return old_defaults
[ "def", "_load_old_defaults", "(", "self", ",", "old_version", ")", ":", "old_defaults", "=", "cp", ".", "ConfigParser", "(", ")", "if", "check_version", "(", "old_version", ",", "'3.0.0'", ",", "'<='", ")", ":", "path", "=", "get_module_source_path", "(", "'...
Read old defaults
[ "Read", "old", "defaults" ]
python
train
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L614-L638
def IsDir(directory): ''' :param unicode directory: A path :rtype: bool :returns: Returns whether the given path points to an existent directory. :raises NotImplementedProtocol: If the path protocol is not local or ftp .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' from six.moves.urllib.parse import urlparse directory_url = urlparse(directory) if _UrlIsLocal(directory_url): return os.path.isdir(directory) elif directory_url.scheme == 'ftp': from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(directory_url.scheme)
[ "def", "IsDir", "(", "directory", ")", ":", "from", "six", ".", "moves", ".", "urllib", ".", "parse", "import", "urlparse", "directory_url", "=", "urlparse", "(", "directory", ")", "if", "_UrlIsLocal", "(", "directory_url", ")", ":", "return", "os", ".", ...
:param unicode directory: A path :rtype: bool :returns: Returns whether the given path points to an existent directory. :raises NotImplementedProtocol: If the path protocol is not local or ftp .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
[ ":", "param", "unicode", "directory", ":", "A", "path" ]
python
valid
hobson/aima
aima/learning.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/learning.py#L168-L174
def smooth_for(self, o): """Include o among the possible observations, whether or not it's been observed yet.""" if o not in self.dictionary: self.dictionary[o] = self.default self.n_obs += self.default self.sampler = None
[ "def", "smooth_for", "(", "self", ",", "o", ")", ":", "if", "o", "not", "in", "self", ".", "dictionary", ":", "self", ".", "dictionary", "[", "o", "]", "=", "self", ".", "default", "self", ".", "n_obs", "+=", "self", ".", "default", "self", ".", ...
Include o among the possible observations, whether or not it's been observed yet.
[ "Include", "o", "among", "the", "possible", "observations", "whether", "or", "not", "it", "s", "been", "observed", "yet", "." ]
python
valid
GoogleCloudPlatform/psq
psq/psqworker.py
https://github.com/GoogleCloudPlatform/psq/blob/3c5130731d72b6c32d09a6a5d478f3580ff36d50/psq/psqworker.py#L72-L103
def main(path, pid, queue): """ Standalone PSQ worker. The queue argument must be the full importable path to a psq.Queue instance. Example usage: psqworker config.q psqworker --path /opt/app queues.fast """ setup_logging() if pid: with open(os.path.expanduser(pid), "w") as f: f.write(str(os.getpid())) if not path: path = os.getcwd() sys.path.insert(0, path) queue = import_queue(queue) import psq worker = psq.Worker(queue=queue) worker.listen()
[ "def", "main", "(", "path", ",", "pid", ",", "queue", ")", ":", "setup_logging", "(", ")", "if", "pid", ":", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "pid", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", ...
Standalone PSQ worker. The queue argument must be the full importable path to a psq.Queue instance. Example usage: psqworker config.q psqworker --path /opt/app queues.fast
[ "Standalone", "PSQ", "worker", "." ]
python
valid
noahbenson/neuropythy
neuropythy/vision/cmag.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/cmag.py#L545-L645
def cmag(mesh, retinotopy='any', surface=None, to='vertices'): ''' cmag(mesh) yields the neighborhood-based cortical magnification for the given mesh. cmag(mesh, retinotopy) uses the given retinotopy argument; this must be interpretable by the as_retinotopy function, or should be the name of a source (such as 'empirical' or 'any'). The neighborhood-based cortical magnification data is yielded as a map whose keys are 'radial', 'tangential', 'areal', and 'field_sign'; the units of 'radial' and 'tangential' magnifications are cortical-distance/degree and the units on the 'areal' magnification is (cortical-distance/degree)^2; the field sign has no unit. Note that if the retinotopy source is not given, this function will by default search for any source using the retinotopy_data function. The option surface (default None) can be provided to indicate that while the retinotopy and results should be formatted for the given mesh (i.e., the result should have a value for each vertex in mesh), the surface coordinates used to calculate areas on the cortical surface should come from the given surface. The surface option may be a super-mesh of mesh. The option to='faces' or to='vertices' (the default) specifies whether the return-values should be for the vertices or the faces of the given mesh. Vertex data are calculated from the face data by summing and averaging. ''' # First, find the retino data if pimms.is_str(retinotopy): retino = retinotopy_data(mesh, retinotopy) else: retino = retinotopy # If this is a topology, we want to change to white surface if isinstance(mesh, geo.Topology): mesh = mesh.white_surface # Convert from polar angle/eccen to longitude/latitude vcoords = np.asarray(as_retinotopy(retino, 'geographical')) # note the surface coordinates if surface is None: scoords = mesh.coordinates else: scoords = surface.coordinates if scoords.shape[1] > mesh.vertex_count: scoords = scoords[:, surface.index(mesh.labels)] faces = mesh.tess.indexed_faces sx = mesh.face_coordinates # to understand this calculation, see this stack exchange question: # https://math.stackexchange.com/questions/2431913/gradient-of-angle-between-scalar-fields # each face has a directional magnification; we need to start with the face side lengths (s0,s1,s2) = np.sqrt(np.sum((np.roll(sx, -1, axis=0) - sx)**2, axis=1)) # we want a couple other handy values: (s0_2,s1_2,s2_2) = (s0**2, s1**2, s2**2) s0_inv = zinv(s0) b = 0.5 * (s0_2 - s1_2 + s2_2) * s0_inv h = 0.5 * np.sqrt(2*s0_2*(s1_2 + s2_2) - s0_2**2 - (s1_2 - s2_2)**2) * s0_inv h_inv = zinv(h) # get the visual coordinates at each face also vx = np.asarray([vcoords[:,f] for f in faces]) # we already have enough data to calculate areal magnification s_areas = geo.triangle_area(*sx) v_areas = geo.triangle_area(*vx) arl_mag = s_areas * zinv(v_areas) # calculate the gradient at each triangle; this array is dimension 2 x 2 x m where m is the # number of triangles; the first dimension is (vx,vy) and the second dimension is (fx,fy); fx # and fy are the coordinates in an arbitrary coordinate system built for each face. # So, to reiterate, grad is ((d(vx0)/d(fx0), d(vx0)/d(fx1)) (d(vx1)/d(fx0), d(vx1)/d(fx1))) dvx0_dfx = (vx[2] - vx[1]) * s0_inv dvx1_dfx = (vx[0] - (vx[1] + b*dvx0_dfx)) * h_inv grad = np.asarray([dvx0_dfx, dvx1_dfx]) # Okay, we want to know the field signs; this is just whether the cross product of the two grad # vectors (dvx0/dfx and dvx1/dfx) has a positive z fsgn = np.sign(grad[0,0]*grad[1,1] - grad[0,1]*grad[1,0]) # We can calculate the angle too, which is just the arccos of the normalized dot-product grad_norms_2 = np.sum(grad**2, axis=1) grad_norms = np.sqrt(grad_norms_2) (dvx_norms_inv, dvy_norms_inv) = zinv(grad_norms) ngrad = grad * ((dvx_norms_inv, dvx_norms_inv), (dvy_norms_inv, dvy_norms_inv)) dp = np.clip(np.sum(ngrad[0] * ngrad[1], axis=0), -1, 1) fang = fsgn * np.arccos(dp) # Great; now we can calculate the drad and dtan; we have dx and dy, so we just need to # calculate the jacobian of ((drad/dvx, drad/dvy), (dtan/dvx, dtan/dvy)) vx_ctr = np.mean(vx, axis=0) (x0, y0) = vx_ctr den_inv = zinv(np.sqrt(x0**2 + y0**2)) drad_dvx = np.asarray([x0, y0]) * den_inv dtan_dvx = np.asarray([-y0, x0]) * den_inv # get dtan and drad drad_dfx = np.asarray([np.sum(drad_dvx[i]*grad[:,i], axis=0) for i in [0,1]]) dtan_dfx = np.asarray([np.sum(dtan_dvx[i]*grad[:,i], axis=0) for i in [0,1]]) # we can now turn these into the magnitudes plus the field sign rad_mag = zinv(np.sqrt(np.sum(drad_dfx**2, axis=0))) tan_mag = zinv(np.sqrt(np.sum(dtan_dfx**2, axis=0))) # this is the entire result if we are doing faces only if to == 'faces': return {'radial': rad_mag, 'tangential': tan_mag, 'areal': arl_mag, 'field_sign': fsgn} # okay, we need to do some averaging! mtx = simplex_summation_matrix(mesh.tess.indexed_faces) cols = np.asarray(mtx.sum(axis=1), dtype=np.float)[:,0] cols_inv = zinv(cols) # for areal magnification, we want to do summation over the s and v areas then divide s_areas = mtx.dot(s_areas) v_areas = mtx.dot(v_areas) arl_mag = s_areas * zinv(v_areas) # for the others, we just average (rad_mag, tan_mag, fsgn) = [cols_inv * mtx.dot(x) for x in (rad_mag, tan_mag, fsgn)] return {'radial': rad_mag, 'tangential': tan_mag, 'areal': arl_mag, 'field_sign': fsgn}
[ "def", "cmag", "(", "mesh", ",", "retinotopy", "=", "'any'", ",", "surface", "=", "None", ",", "to", "=", "'vertices'", ")", ":", "# First, find the retino data", "if", "pimms", ".", "is_str", "(", "retinotopy", ")", ":", "retino", "=", "retinotopy_data", ...
cmag(mesh) yields the neighborhood-based cortical magnification for the given mesh. cmag(mesh, retinotopy) uses the given retinotopy argument; this must be interpretable by the as_retinotopy function, or should be the name of a source (such as 'empirical' or 'any'). The neighborhood-based cortical magnification data is yielded as a map whose keys are 'radial', 'tangential', 'areal', and 'field_sign'; the units of 'radial' and 'tangential' magnifications are cortical-distance/degree and the units on the 'areal' magnification is (cortical-distance/degree)^2; the field sign has no unit. Note that if the retinotopy source is not given, this function will by default search for any source using the retinotopy_data function. The option surface (default None) can be provided to indicate that while the retinotopy and results should be formatted for the given mesh (i.e., the result should have a value for each vertex in mesh), the surface coordinates used to calculate areas on the cortical surface should come from the given surface. The surface option may be a super-mesh of mesh. The option to='faces' or to='vertices' (the default) specifies whether the return-values should be for the vertices or the faces of the given mesh. Vertex data are calculated from the face data by summing and averaging.
[ "cmag", "(", "mesh", ")", "yields", "the", "neighborhood", "-", "based", "cortical", "magnification", "for", "the", "given", "mesh", ".", "cmag", "(", "mesh", "retinotopy", ")", "uses", "the", "given", "retinotopy", "argument", ";", "this", "must", "be", "...
python
train
h2oai/h2o-3
scripts/run.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/run.py#L2614-L2733
def main(argv): """ Main program. :param argv Command-line arguments :return none """ global g_script_name global g_num_clouds global g_nodes_per_cloud global g_output_dir global g_test_to_run global g_test_list_file global g_exclude_list_file global g_test_group global g_runner global g_nopass global g_nointernal global g_path_to_tar global g_path_to_whl global g_perf global g_git_hash global g_git_branch global g_machine_ip global g_date global g_build_id global g_ncpu global g_os global g_job_name global g_test_ssl g_script_name = os.path.basename(argv[0]) # Calculate test_root_dir. test_root_dir = os.path.realpath(os.getcwd()) # Calculate global variables. g_output_dir = os.path.join(test_root_dir, str("results")) g_failed_output_dir = os.path.join(g_output_dir, str("failed")) testreport_dir = os.path.join(test_root_dir, str("../build/test-results")) # Override any defaults with the user's choices. parse_args(argv) # Look for h2o jar file. h2o_jar = g_path_to_h2o_jar if h2o_jar is None: possible_h2o_jar_parent_dir = test_root_dir while True: possible_h2o_jar_dir = os.path.join(possible_h2o_jar_parent_dir, "build") possible_h2o_jar = os.path.join(possible_h2o_jar_dir, "h2o.jar") if os.path.exists(possible_h2o_jar): h2o_jar = possible_h2o_jar break next_possible_h2o_jar_parent_dir = os.path.dirname(possible_h2o_jar_parent_dir) if next_possible_h2o_jar_parent_dir == possible_h2o_jar_parent_dir: break possible_h2o_jar_parent_dir = next_possible_h2o_jar_parent_dir # Wipe output directory if requested. if g_wipe_output_dir: wipe_output_dir() # Wipe persistent test state if requested. if g_wipe_test_state: wipe_test_state(test_root_dir) # Create runner object. # Just create one cloud if we're only running one test, even if the user specified more. if g_test_to_run is not None: g_num_clouds = 1 g_runner = TestRunner(test_root_dir, g_use_cloud, g_use_cloud2, g_use_client, g_config, g_use_ip, g_use_port, g_num_clouds, g_nodes_per_cloud, h2o_jar, g_base_port, g_jvm_xmx, g_jvm_cp, g_output_dir, g_failed_output_dir, g_path_to_tar, g_path_to_whl, g_produce_unit_reports, testreport_dir, g_r_pkg_ver_chk, g_hadoop_namenode, g_on_hadoop, g_perf, g_test_ssl, g_ldap_config, g_jvm_opts) # Build test list. if g_exclude_list_file is not None: g_runner.read_exclude_list_file(g_exclude_list_file) if g_test_to_run is not None: g_runner.add_test(g_test_to_run) elif g_test_list_file is not None: g_runner.read_test_list_file(g_test_list_file) else: # Test group can be None or not. g_runner.build_test_list(g_test_group, g_run_small, g_run_medium, g_run_large, g_run_xlarge, g_nopass, g_nointernal) # If no run is specified, then do an early exit here. if g_no_run: sys.exit(0) # Handle killing the runner. signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # Sanity check existence of H2O jar file before starting the cloud. if not (h2o_jar and os.path.exists(h2o_jar)): print("") print("ERROR: H2O jar not found") print("") sys.exit(1) # Run. try: g_runner.start_clouds() g_runner.run_tests(g_nopass) finally: g_runner.check_clouds() g_runner.stop_clouds() g_runner.report_summary(g_nopass) # If the overall regression did not pass then exit with a failure status code. if not g_runner.get_regression_passed(): sys.exit(1)
[ "def", "main", "(", "argv", ")", ":", "global", "g_script_name", "global", "g_num_clouds", "global", "g_nodes_per_cloud", "global", "g_output_dir", "global", "g_test_to_run", "global", "g_test_list_file", "global", "g_exclude_list_file", "global", "g_test_group", "global"...
Main program. :param argv Command-line arguments :return none
[ "Main", "program", ".", ":", "param", "argv", "Command", "-", "line", "arguments", ":", "return", "none" ]
python
test
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L8642-L8659
def mxm(m1, m2): """ Multiply two 3x3 matrices. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxm_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: 3x3 double precision matrix. :rtype: 3x3-Element Array of floats """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix() libspice.mxm_c(m1, m2, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "mxm", "(", "m1", ",", "m2", ")", ":", "m1", "=", "stypes", ".", "toDoubleMatrix", "(", "m1", ")", "m2", "=", "stypes", ".", "toDoubleMatrix", "(", "m2", ")", "mout", "=", "stypes", ".", "emptyDoubleMatrix", "(", ")", "libspice", ".", "mxm_c",...
Multiply two 3x3 matrices. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxm_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: 3x3 double precision matrix. :rtype: 3x3-Element Array of floats
[ "Multiply", "two", "3x3", "matrices", "." ]
python
train
TaurusOlson/incisive
incisive/core.py
https://github.com/TaurusOlson/incisive/blob/25bb9f53495985c1416c82e26f54158df4050cb0/incisive/core.py#L91-L152
def write_csv(filename, header, data=None, rows=None, mode="w"): """Write the data to the specified filename Usage ----- >>> write_csv(filename, header, data, mode=mode) Parameters ---------- filename : str The name of the file header : list of strings The names of the columns (or fields): (fieldname1, fieldname2, ...) data : list of dictionaries (optional) [ {fieldname1: a1, fieldname2: a2}, {fieldname1: b1, fieldname2: b2}, ... ] rows : list of lists (optional) [ (a1, a2), (b1, b2), ... ] mode : str (optional) "w": write the data to the file by overwriting it "a": write the data to the file by appending them Returns ------- None. A CSV file is written. """ if data == rows == None: msg = "You must specify either data or rows" raise ValueError(msg) elif data != None and rows != None: msg = "You must specify either data or rows. Not both" raise ValueError(msg) data_header = dict((x, x) for x in header) with open(filename, mode) as f: if data: writer = csv.DictWriter(f, fieldnames=header) if mode == "w": writer.writerow(data_header) writer.writerows(data) elif rows: writer = csv.writer(f) if mode == "w": writer.writerow(header) writer.writerows(rows) print "Saved %s." % filename
[ "def", "write_csv", "(", "filename", ",", "header", ",", "data", "=", "None", ",", "rows", "=", "None", ",", "mode", "=", "\"w\"", ")", ":", "if", "data", "==", "rows", "==", "None", ":", "msg", "=", "\"You must specify either data or rows\"", "raise", "...
Write the data to the specified filename Usage ----- >>> write_csv(filename, header, data, mode=mode) Parameters ---------- filename : str The name of the file header : list of strings The names of the columns (or fields): (fieldname1, fieldname2, ...) data : list of dictionaries (optional) [ {fieldname1: a1, fieldname2: a2}, {fieldname1: b1, fieldname2: b2}, ... ] rows : list of lists (optional) [ (a1, a2), (b1, b2), ... ] mode : str (optional) "w": write the data to the file by overwriting it "a": write the data to the file by appending them Returns ------- None. A CSV file is written.
[ "Write", "the", "data", "to", "the", "specified", "filename", "Usage", "-----", ">>>", "write_csv", "(", "filename", "header", "data", "mode", "=", "mode", ")" ]
python
valid
briancappello/flask-unchained
flask_unchained/bundles/controller/utils.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/controller/utils.py#L52-L67
def controller_name(cls, _remove_suffixes=None) -> str: """ Returns the snake-cased name for a controller/resource class. Automatically strips ``Controller``, ``View``, and ``MethodView`` suffixes, eg:: SiteController -> site FooBarBazView -> foo_bar_baz UsersMethodView -> users """ name = cls if isinstance(cls, str) else cls.__name__ remove_suffixes = _remove_suffixes or getattr(cls, REMOVE_SUFFIXES_ATTR) for suffix in remove_suffixes: if name.endswith(suffix): name = right_replace(name, suffix, '') break return snake_case(name)
[ "def", "controller_name", "(", "cls", ",", "_remove_suffixes", "=", "None", ")", "->", "str", ":", "name", "=", "cls", "if", "isinstance", "(", "cls", ",", "str", ")", "else", "cls", ".", "__name__", "remove_suffixes", "=", "_remove_suffixes", "or", "getat...
Returns the snake-cased name for a controller/resource class. Automatically strips ``Controller``, ``View``, and ``MethodView`` suffixes, eg:: SiteController -> site FooBarBazView -> foo_bar_baz UsersMethodView -> users
[ "Returns", "the", "snake", "-", "cased", "name", "for", "a", "controller", "/", "resource", "class", ".", "Automatically", "strips", "Controller", "View", "and", "MethodView", "suffixes", "eg", "::" ]
python
train
KnowledgeLinks/rdfframework
rdfframework/search/esloaders_temp.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/esloaders_temp.py#L59-L65
def _index_item(self, uri, num, batch_num): """ queries the triplestore for an item sends it to elasticsearch """ data = RdfDataset(get_all_item_data(uri, self.namespace), uri).base_class.es_json() self.batch_data[batch_num].append(data) self.count += 1
[ "def", "_index_item", "(", "self", ",", "uri", ",", "num", ",", "batch_num", ")", ":", "data", "=", "RdfDataset", "(", "get_all_item_data", "(", "uri", ",", "self", ".", "namespace", ")", ",", "uri", ")", ".", "base_class", ".", "es_json", "(", ")", ...
queries the triplestore for an item sends it to elasticsearch
[ "queries", "the", "triplestore", "for", "an", "item", "sends", "it", "to", "elasticsearch" ]
python
train
awslabs/serverless-application-model
samtranslator/intrinsics/actions.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/actions.py#L196-L253
def resolve_resource_refs(self, input_dict, supported_resource_refs): """ Resolves reference to some property of a resource. Inside string to be substituted, there could be either a "Ref" or a "GetAtt" usage of this property. They have to be handled differently. Ref usages are directly converted to a Ref on the resolved value. GetAtt usages are split under the assumption that there can be only one property of resource referenced here. Everything else is an attribute reference. Example: Let's say `LogicalId.Property` will be resolved to `ResolvedValue` Ref usage: ${LogicalId.Property} => ${ResolvedValue} GetAtt usage: ${LogicalId.Property.Arn} => ${ResolvedValue.Arn} ${LogicalId.Property.Attr1.Attr2} => {ResolvedValue.Attr1.Attr2} :param input_dict: Dictionary to be resolved :param samtranslator.intrinsics.resource_refs.SupportedResourceReferences supported_resource_refs: Instance of an `SupportedResourceReferences` object that contain value of the property. :return: Resolved dictionary """ def do_replacement(full_ref, ref_value): """ Perform the appropriate replacement to handle ${LogicalId.Property} type references inside a Sub. This method is called to get the replacement string for each reference within Sub's value :param full_ref: Entire reference string such as "${LogicalId.Property}" :param ref_value: Just the value of the reference such as "LogicalId.Property" :return: Resolved reference of the structure "${SomeOtherLogicalId}". Result should always include the ${} structure since we are not resolving to final value, but just converting one reference to another """ # Split the value by separator, expecting to separate out LogicalId.Property splits = ref_value.split(self._resource_ref_separator) # If we don't find at least two parts, there is nothing to resolve if len(splits) < 2: return full_ref logical_id = splits[0] property = splits[1] resolved_value = supported_resource_refs.get(logical_id, property) if not resolved_value: # This ID/property combination is not in the supported references return full_ref # We found a LogicalId.Property combination that can be resolved. Construct the output by replacing # the part of the reference string and not constructing a new ref. This allows us to support GetAtt-like # syntax and retain other attributes. Ex: ${LogicalId.Property.Arn} => ${SomeOtherLogicalId.Arn} replacement = self._resource_ref_separator.join([logical_id, property]) return full_ref.replace(replacement, resolved_value) return self._handle_sub_action(input_dict, do_replacement)
[ "def", "resolve_resource_refs", "(", "self", ",", "input_dict", ",", "supported_resource_refs", ")", ":", "def", "do_replacement", "(", "full_ref", ",", "ref_value", ")", ":", "\"\"\"\n Perform the appropriate replacement to handle ${LogicalId.Property} type references...
Resolves reference to some property of a resource. Inside string to be substituted, there could be either a "Ref" or a "GetAtt" usage of this property. They have to be handled differently. Ref usages are directly converted to a Ref on the resolved value. GetAtt usages are split under the assumption that there can be only one property of resource referenced here. Everything else is an attribute reference. Example: Let's say `LogicalId.Property` will be resolved to `ResolvedValue` Ref usage: ${LogicalId.Property} => ${ResolvedValue} GetAtt usage: ${LogicalId.Property.Arn} => ${ResolvedValue.Arn} ${LogicalId.Property.Attr1.Attr2} => {ResolvedValue.Attr1.Attr2} :param input_dict: Dictionary to be resolved :param samtranslator.intrinsics.resource_refs.SupportedResourceReferences supported_resource_refs: Instance of an `SupportedResourceReferences` object that contain value of the property. :return: Resolved dictionary
[ "Resolves", "reference", "to", "some", "property", "of", "a", "resource", ".", "Inside", "string", "to", "be", "substituted", "there", "could", "be", "either", "a", "Ref", "or", "a", "GetAtt", "usage", "of", "this", "property", ".", "They", "have", "to", ...
python
train
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2583-L2611
def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
[ "def", "_handle_tag_master_connected", "(", "self", ",", "tag", ",", "data", ")", ":", "# handle this event only once. otherwise it will pollute the log", "# also if master type is failover all the reconnection work is done", "# by `disconnected` event handler and this event must never happe...
Handle a master_connected event
[ "Handle", "a", "master_connected", "event" ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L6094-L6101
def SetWindowText(self, text: str) -> bool: """ Call native SetWindowText if control has a valid native handle. """ handle = self.NativeWindowHandle if handle: return SetWindowText(handle, text) return False
[ "def", "SetWindowText", "(", "self", ",", "text", ":", "str", ")", "->", "bool", ":", "handle", "=", "self", ".", "NativeWindowHandle", "if", "handle", ":", "return", "SetWindowText", "(", "handle", ",", "text", ")", "return", "False" ]
Call native SetWindowText if control has a valid native handle.
[ "Call", "native", "SetWindowText", "if", "control", "has", "a", "valid", "native", "handle", "." ]
python
valid
pokerregion/poker
poker/card.py
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/card.py#L42-L48
def difference(cls, first, second): """Tells the numerical difference between two ranks.""" # so we always get a Rank instance even if string were passed in first, second = cls(first), cls(second) rank_list = list(cls) return abs(rank_list.index(first) - rank_list.index(second))
[ "def", "difference", "(", "cls", ",", "first", ",", "second", ")", ":", "# so we always get a Rank instance even if string were passed in", "first", ",", "second", "=", "cls", "(", "first", ")", ",", "cls", "(", "second", ")", "rank_list", "=", "list", "(", "c...
Tells the numerical difference between two ranks.
[ "Tells", "the", "numerical", "difference", "between", "two", "ranks", "." ]
python
train
googlefonts/glyphsLib
Lib/glyphsLib/builder/kerning.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/kerning.py#L27-L46
def _to_ufo_kerning(self, ufo, kerning_data): """Add .glyphs kerning to an UFO.""" warning_msg = "Non-existent glyph class %s found in kerning rules." for left, pairs in kerning_data.items(): match = re.match(r"@MMK_L_(.+)", left) left_is_class = bool(match) if left_is_class: left = "public.kern1.%s" % match.group(1) if left not in ufo.groups: self.logger.warning(warning_msg % left) for right, kerning_val in pairs.items(): match = re.match(r"@MMK_R_(.+)", right) right_is_class = bool(match) if right_is_class: right = "public.kern2.%s" % match.group(1) if right not in ufo.groups: self.logger.warning(warning_msg % right) ufo.kerning[left, right] = kerning_val
[ "def", "_to_ufo_kerning", "(", "self", ",", "ufo", ",", "kerning_data", ")", ":", "warning_msg", "=", "\"Non-existent glyph class %s found in kerning rules.\"", "for", "left", ",", "pairs", "in", "kerning_data", ".", "items", "(", ")", ":", "match", "=", "re", "...
Add .glyphs kerning to an UFO.
[ "Add", ".", "glyphs", "kerning", "to", "an", "UFO", "." ]
python
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L879-L905
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
[ "def", "output_forecasts_csv", "(", "self", ",", "forecasts", ",", "mode", ",", "csv_path", ",", "run_date_format", "=", "\"%Y%m%d-%H%M\"", ")", ":", "merged_forecasts", "=", "pd", ".", "merge", "(", "forecasts", "[", "\"condition\"", "]", ",", "forecasts", "[...
Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns:
[ "Output", "hail", "forecast", "values", "to", "csv", "files", "by", "run", "date", "and", "ensemble", "member", "." ]
python
train
pandas-dev/pandas
pandas/core/resample.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L391-L406
def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj if isinstance(obj.index, PeriodIndex): result.index = obj.index.asfreq(self.freq) else: result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result
[ "def", "_wrap_result", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "ABCSeries", ")", "and", "self", ".", "_selection", "is", "not", "None", ":", "result", ".", "name", "=", "self", ".", "_selection", "if", "isinstance", ...
Potentially wrap any results.
[ "Potentially", "wrap", "any", "results", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/ImSim/MultiBand/multiband.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/MultiBand/multiband.py#L54-L71
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False): """ computes the likelihood of the data given a model This is specified with the non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images) """ # generate image logL = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: logL += self._imageModel_list[i].likelihood_data_given_model(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=source_marg) return logL
[ "def", "likelihood_data_given_model", "(", "self", ",", "kwargs_lens", ",", "kwargs_source", ",", "kwargs_lens_light", ",", "kwargs_ps", ",", "source_marg", "=", "False", ")", ":", "# generate image", "logL", "=", "0", "for", "i", "in", "range", "(", "self", "...
computes the likelihood of the data given a model This is specified with the non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
[ "computes", "the", "likelihood", "of", "the", "data", "given", "a", "model", "This", "is", "specified", "with", "the", "non", "-", "linear", "parameters", "and", "a", "linear", "inversion", "and", "prior", "marginalisation", ".", ":", "param", "kwargs_lens", ...
python
train
saltstack/salt
salt/states/win_dacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_dacl.py#L191-L222
def disinherit(name, objectType, copy_inherited_acl=True): ''' Ensure an object is not inheriting ACLs from its parent ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} tRet = __salt__['win_dacl.check_inheritance'](name, objectType) if tRet['result']: if tRet['Inheritance']: if __opts__['test']: ret['result'] = None ret['changes']['Inheritance'] = "Disabled" ret['comment'] = 'Inheritance is set to be disabled.' ret['changes']['Inherited ACLs'] = ( 'Are set to be kept' if copy_inherited_acl else 'Are set to be removed') return ret eRet = __salt__['win_dacl.disable_inheritance'](name, objectType, copy_inherited_acl) ret['result'] = eRet['result'] if eRet['result']: ret['changes'] = dict(ret['changes'], **eRet['changes']) else: ret['comment'] = ' '.join([ret['comment'], eRet['comment']]) else: if __opts__['test']: ret['result'] = None ret['comment'] = 'Inheritance is disabled.' else: ret['result'] = False ret['comment'] = tRet['comment'] return ret
[ "def", "disinherit", "(", "name", ",", "objectType", ",", "copy_inherited_acl", "=", "True", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "tRet", "=...
Ensure an object is not inheriting ACLs from its parent
[ "Ensure", "an", "object", "is", "not", "inheriting", "ACLs", "from", "its", "parent" ]
python
train
cihai/cihai
examples/variants.py
https://github.com/cihai/cihai/blob/43b0c2931da18c1ef1ff1cdd71e4b1c5eca24a41/examples/variants.py#L15-L34
def run(unihan_options={}): """Wrapped so we can test in tests/test_examples.py""" print("This example prints variant character data.") c = Cihai() if not c.unihan.is_bootstrapped: # download and install Unihan to db c.unihan.bootstrap(unihan_options) c.unihan.add_plugin( 'cihai.data.unihan.dataset.UnihanVariants', namespace='variants' ) print("## ZVariants") variant_list(c.unihan, "kZVariant") print("## kSemanticVariant") variant_list(c.unihan, "kSemanticVariant") print("## kSpecializedSemanticVariant") variant_list(c.unihan, "kSpecializedSemanticVariant")
[ "def", "run", "(", "unihan_options", "=", "{", "}", ")", ":", "print", "(", "\"This example prints variant character data.\"", ")", "c", "=", "Cihai", "(", ")", "if", "not", "c", ".", "unihan", ".", "is_bootstrapped", ":", "# download and install Unihan to db", ...
Wrapped so we can test in tests/test_examples.py
[ "Wrapped", "so", "we", "can", "test", "in", "tests", "/", "test_examples", ".", "py" ]
python
train
googlemaps/google-maps-services-python
googlemaps/convert.py
https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/convert.py#L156-L164
def _is_list(arg): """Checks if arg is list-like. This excludes strings and dicts.""" if isinstance(arg, dict): return False if isinstance(arg, str): # Python 3-only, as str has __iter__ return False return (not _has_method(arg, "strip") and _has_method(arg, "__getitem__") or _has_method(arg, "__iter__"))
[ "def", "_is_list", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "dict", ")", ":", "return", "False", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "# Python 3-only, as str has __iter__", "return", "False", "return", "(", "not", "_has_...
Checks if arg is list-like. This excludes strings and dicts.
[ "Checks", "if", "arg", "is", "list", "-", "like", ".", "This", "excludes", "strings", "and", "dicts", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L922-L942
def paste(self, *args): """ Usage: paste([PSMRL], text) If a pattern is specified, the pattern is clicked first. Doesn't support text paths. ``text`` is pasted as is using the OS paste shortcut (Ctrl+V for Windows/Linux, Cmd+V for OS X). Note that `paste()` does NOT use special formatting like `type()`. """ target = None text = "" if len(args) == 1 and isinstance(args[0], basestring): text = args[0] elif len(args) == 2 and isinstance(args[1], basestring): self.click(target) text = args[1] else: raise TypeError("paste method expected [PSMRL], text") pyperclip.copy(text) # Triggers OS paste for foreground window PlatformManager.osPaste() time.sleep(0.2)
[ "def", "paste", "(", "self", ",", "*", "args", ")", ":", "target", "=", "None", "text", "=", "\"\"", "if", "len", "(", "args", ")", "==", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "basestring", ")", ":", "text", "=", "args", "[...
Usage: paste([PSMRL], text) If a pattern is specified, the pattern is clicked first. Doesn't support text paths. ``text`` is pasted as is using the OS paste shortcut (Ctrl+V for Windows/Linux, Cmd+V for OS X). Note that `paste()` does NOT use special formatting like `type()`.
[ "Usage", ":", "paste", "(", "[", "PSMRL", "]", "text", ")" ]
python
train
kivy/python-for-android
pythonforandroid/build.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/build.py#L596-L806
def run_pymodules_install(ctx, modules, project_dir, ignore_setup_py=False): """ This function will take care of all non-recipe things, by: 1. Processing them from --requirements (the modules argument) and installing them 2. Installing the user project/app itself via setup.py if ignore_setup_py=True """ info('*** PYTHON PACKAGE / PROJECT INSTALL STAGE ***') modules = list(filter(ctx.not_has_package, modules)) # Bail out if no python deps and no setup.py to process: if not modules and ( ignore_setup_py or not project_has_setup_py(project_dir) ): info('No Python modules and no setup.py to process, skipping') return # Output messages about what we're going to do: if modules: info('The requirements ({}) don\'t have recipes, attempting to ' 'install them with pip'.format(', '.join(modules))) info('If this fails, it may mean that the module has compiled ' 'components and needs a recipe.') if project_has_setup_py(project_dir) and not ignore_setup_py: info('Will process project install, if it fails then the ' 'project may not be compatible for Android install.') venv = sh.Command(ctx.virtualenv) with current_directory(join(ctx.build_dir)): shprint(venv, '--python=python{}'.format( ctx.python_recipe.major_minor_version_string. partition(".")[0] ), 'venv' ) # Prepare base environment and upgrade pip: base_env = copy.copy(os.environ) base_env["PYTHONPATH"] = ctx.get_site_packages_dir() info('Upgrade pip to latest version') shprint(sh.bash, '-c', ( "source venv/bin/activate && pip install -U pip" ), _env=copy.copy(base_env)) # Install Cython in case modules need it to build: info('Install Cython in case one of the modules needs it to build') shprint(sh.bash, '-c', ( "venv/bin/pip install Cython" ), _env=copy.copy(base_env)) # Get environment variables for build (with CC/compiler set): standard_recipe = CythonRecipe() standard_recipe.ctx = ctx # (note: following line enables explicit -lpython... linker options) standard_recipe.call_hostpython_via_targetpython = False recipe_env = standard_recipe.get_recipe_env(ctx.archs[0]) env = copy.copy(base_env) env.update(recipe_env) # Make sure our build package dir is available, and the virtualenv # site packages come FIRST (so the proper pip version is used): env["PYTHONPATH"] += ":" + ctx.get_site_packages_dir() env["PYTHONPATH"] = os.path.abspath(join( ctx.build_dir, "venv", "lib", "python" + ctx.python_recipe.major_minor_version_string, "site-packages")) + ":" + env["PYTHONPATH"] # Install the manually specified requirements first: if not modules: info('There are no Python modules to install, skipping') else: info('Creating a requirements.txt file for the Python modules') with open('requirements.txt', 'w') as fileh: for module in modules: key = 'VERSION_' + module if key in environ: line = '{}=={}\n'.format(module, environ[key]) else: line = '{}\n'.format(module) fileh.write(line) info('Installing Python modules with pip') info('IF THIS FAILS, THE MODULES MAY NEED A RECIPE. ' 'A reason for this is often modules compiling ' 'native code that is unaware of Android cross-compilation ' 'and does not work without additional ' 'changes / workarounds.') shprint(sh.bash, '-c', ( "venv/bin/pip " + "install -v --target '{0}' --no-deps -r requirements.txt" ).format(ctx.get_site_packages_dir().replace("'", "'\"'\"'")), _env=copy.copy(env)) # Afterwards, run setup.py if present: if project_has_setup_py(project_dir) and not ignore_setup_py: with current_directory(project_dir): info('got setup.py or similar, running project install. ' + '(disable this behavior with --ignore-setup-py)') # Compute & output the constraints we will use: info('Contents that will be used for constraints.txt:') constraints = subprocess.check_output([ join( ctx.build_dir, "venv", "bin", "pip" ), "freeze" ], env=copy.copy(env)) try: constraints = constraints.decode("utf-8", "replace") except AttributeError: pass info(constraints) # Make sure all packages found are fixed in version # by writing a constraint file, to avoid recipes being # upgraded & reinstalled: with open('constraints.txt', 'wb') as fileh: fileh.write(constraints.encode("utf-8", "replace")) info('Populating venv\'s site-packages with ' 'ctx.get_site_packages_dir()...') # Copy dist contents into site-packages for discovery. # Why this is needed: # --target is somewhat evil and messes with discovery of # packages in PYTHONPATH if that also includes the target # folder. So we need to use the regular virtualenv # site-packages folder instead. # Reference: # https://github.com/pypa/pip/issues/6223 ctx_site_packages_dir = os.path.normpath( os.path.abspath(ctx.get_site_packages_dir()) ) venv_site_packages_dir = os.path.normpath(os.path.join( ctx.build_dir, "venv", "lib", [ f for f in os.listdir(os.path.join( ctx.build_dir, "venv", "lib" )) if f.startswith("python") ][0], "site-packages" )) copied_over_contents = [] for f in os.listdir(ctx_site_packages_dir): full_path = os.path.join(ctx_site_packages_dir, f) if not os.path.exists(os.path.join( venv_site_packages_dir, f )): if os.path.isdir(full_path): shutil.copytree(full_path, os.path.join( venv_site_packages_dir, f )) else: shutil.copy2(full_path, os.path.join( venv_site_packages_dir, f )) copied_over_contents.append(f) # Get listing of virtualenv's site-packages, to see the # newly added things afterwards & copy them back into # the distribution folder / build context site-packages: previous_venv_contents = os.listdir(venv_site_packages_dir) # Actually run setup.py: info('Launching package install...') shprint(sh.bash, '-c', ( "'" + join( ctx.build_dir, "venv", "bin", "pip" ).replace("'", "'\"'\"'") + "' " + "install -c constraints.txt -v ." ).format(ctx.get_site_packages_dir().replace("'", "'\"'\"'")), _env=copy.copy(env)) # Go over all new additions and copy them back: info('Copying additions resulting from setup.py back ' + 'into ctx.get_site_packages_dir()...') new_venv_additions = [] for f in (set(os.listdir(venv_site_packages_dir)) - set(previous_venv_contents)): new_venv_additions.append(f) full_path = os.path.join(venv_site_packages_dir, f) if os.path.isdir(full_path): shutil.copytree(full_path, os.path.join( ctx_site_packages_dir, f )) else: shutil.copy2(full_path, os.path.join( ctx_site_packages_dir, f )) # Undo all the changes we did to the venv-site packages: info('Reverting additions to virtualenv\'s site-packages...') for f in set(copied_over_contents + new_venv_additions): full_path = os.path.join(venv_site_packages_dir, f) if os.path.isdir(full_path): shutil.rmtree(full_path) else: os.remove(full_path) elif not ignore_setup_py: info("No setup.py found in project directory: " + str(project_dir) ) # Strip object files after potential Cython or native code builds: standard_recipe.strip_object_files(ctx.archs[0], env, build_dir=ctx.build_dir)
[ "def", "run_pymodules_install", "(", "ctx", ",", "modules", ",", "project_dir", ",", "ignore_setup_py", "=", "False", ")", ":", "info", "(", "'*** PYTHON PACKAGE / PROJECT INSTALL STAGE ***'", ")", "modules", "=", "list", "(", "filter", "(", "ctx", ".", "not_has_p...
This function will take care of all non-recipe things, by: 1. Processing them from --requirements (the modules argument) and installing them 2. Installing the user project/app itself via setup.py if ignore_setup_py=True
[ "This", "function", "will", "take", "care", "of", "all", "non", "-", "recipe", "things", "by", ":" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/genesis/genesis.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/genesis/genesis.py#L173-L189
def shot_taskfile_sel_changed(self, tf): """Callback for when the version selection has changed :param tf: the selected taskfileinfo :type tf: :class:`TaskFileInfo` | None :returns: None :rtype: None :raises: None """ self.shot_open_pb.setEnabled(bool(tf)) # only allow new, if the releasetype is work # only allow new, if there is a shot. if there is a shot, there should always be a task. enablenew = bool(self.browser.shotbrws.selected_indexes(1)) and self.browser.get_releasetype() == djadapter.RELEASETYPES['work'] self.shot_save_pb.setEnabled(enablenew) self.shot_descriptor_le.setEnabled(enablenew) self.shot_comment_pte.setEnabled(enablenew) self.update_descriptor_le(self.shot_descriptor_le, tf)
[ "def", "shot_taskfile_sel_changed", "(", "self", ",", "tf", ")", ":", "self", ".", "shot_open_pb", ".", "setEnabled", "(", "bool", "(", "tf", ")", ")", "# only allow new, if the releasetype is work", "# only allow new, if there is a shot. if there is a shot, there should alwa...
Callback for when the version selection has changed :param tf: the selected taskfileinfo :type tf: :class:`TaskFileInfo` | None :returns: None :rtype: None :raises: None
[ "Callback", "for", "when", "the", "version", "selection", "has", "changed" ]
python
train
onelogin/python3-saml
src/onelogin/saml2/utils.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/utils.py#L986-L1009
def sign_binary(msg, key, algorithm=xmlsec.Transform.RSA_SHA1, debug=False): """ Sign binary message :param msg: The element we should validate :type: bytes :param key: The private key :type: string :param debug: Activate the xmlsec debug :type: bool :return signed message :rtype str """ if isinstance(msg, str): msg = msg.encode('utf8') xmlsec.enable_debug_trace(debug) dsig_ctx = xmlsec.SignatureContext() dsig_ctx.key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None) return dsig_ctx.sign_binary(compat.to_bytes(msg), algorithm)
[ "def", "sign_binary", "(", "msg", ",", "key", ",", "algorithm", "=", "xmlsec", ".", "Transform", ".", "RSA_SHA1", ",", "debug", "=", "False", ")", ":", "if", "isinstance", "(", "msg", ",", "str", ")", ":", "msg", "=", "msg", ".", "encode", "(", "'u...
Sign binary message :param msg: The element we should validate :type: bytes :param key: The private key :type: string :param debug: Activate the xmlsec debug :type: bool :return signed message :rtype str
[ "Sign", "binary", "message" ]
python
train
libtcod/python-tcod
tdl/__init__.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tdl/__init__.py#L342-L376
def write(self, string): """This method mimics basic file-like behaviour. Because of this method you can replace sys.stdout or sys.stderr with a :any:`Console` or :any:`Window` instance. This is a convoluted process and behaviour seen now can be excepted to change on later versions. Args: string (Text): The text to write out. .. seealso:: :any:`set_colors`, :any:`set_mode`, :any:`Window` """ # some 'basic' line buffer stuff. # there must be an easier way to do this. The textwrap module didn't # help much. x, y = self._normalizeCursor(*self._cursor) width, height = self.get_size() wrapper = _textwrap.TextWrapper(initial_indent=(' '*x), width=width) writeLines = [] for line in string.split('\n'): if line: writeLines += wrapper.wrap(line) wrapper.initial_indent = '' else: writeLines.append([]) for line in writeLines: x, y = self._normalizeCursor(x, y) self.draw_str(x, y, line[x:], self._fg, self._bg) y += 1 x = 0 y -= 1 self._cursor = (x, y)
[ "def", "write", "(", "self", ",", "string", ")", ":", "# some 'basic' line buffer stuff.", "# there must be an easier way to do this. The textwrap module didn't", "# help much.", "x", ",", "y", "=", "self", ".", "_normalizeCursor", "(", "*", "self", ".", "_cursor", ")"...
This method mimics basic file-like behaviour. Because of this method you can replace sys.stdout or sys.stderr with a :any:`Console` or :any:`Window` instance. This is a convoluted process and behaviour seen now can be excepted to change on later versions. Args: string (Text): The text to write out. .. seealso:: :any:`set_colors`, :any:`set_mode`, :any:`Window`
[ "This", "method", "mimics", "basic", "file", "-", "like", "behaviour", "." ]
python
train
jd/tenacity
tenacity/compat.py
https://github.com/jd/tenacity/blob/354c40b7dc8e728c438668100dd020b65c84dfc6/tenacity/compat.py#L82-L94
def func_takes_last_result(waiter): """Check if function has a "last_result" parameter. Needed to provide backward compatibility for wait functions that didn't take "last_result" in the beginning. """ if not six.callable(waiter): return False if not inspect.isfunction(waiter) and not inspect.ismethod(waiter): # waiter is a class, check dunder-call rather than dunder-init. waiter = waiter.__call__ waiter_spec = _utils.getargspec(waiter) return 'last_result' in waiter_spec.args
[ "def", "func_takes_last_result", "(", "waiter", ")", ":", "if", "not", "six", ".", "callable", "(", "waiter", ")", ":", "return", "False", "if", "not", "inspect", ".", "isfunction", "(", "waiter", ")", "and", "not", "inspect", ".", "ismethod", "(", "wait...
Check if function has a "last_result" parameter. Needed to provide backward compatibility for wait functions that didn't take "last_result" in the beginning.
[ "Check", "if", "function", "has", "a", "last_result", "parameter", "." ]
python
train
MKLab-ITI/reveal-user-annotation
reveal_user_annotation/text/clean_text.py
https://github.com/MKLab-ITI/reveal-user-annotation/blob/ed019c031857b091e5601f53ba3f01a499a0e3ef/reveal_user_annotation/text/clean_text.py#L324-L334
def separate_camel_case(word, first_cap_re, all_cap_re): """ What it says on the tin. Input: - word: A string that may be in camelCase. Output: - separated_word: A list of strings with camel case separated. """ s1 = first_cap_re.sub(r'\1 \2', word) separated_word = all_cap_re.sub(r'\1 \2', s1) return separated_word
[ "def", "separate_camel_case", "(", "word", ",", "first_cap_re", ",", "all_cap_re", ")", ":", "s1", "=", "first_cap_re", ".", "sub", "(", "r'\\1 \\2'", ",", "word", ")", "separated_word", "=", "all_cap_re", ".", "sub", "(", "r'\\1 \\2'", ",", "s1", ")", "re...
What it says on the tin. Input: - word: A string that may be in camelCase. Output: - separated_word: A list of strings with camel case separated.
[ "What", "it", "says", "on", "the", "tin", "." ]
python
train
nsqio/pynsq
nsq/event.py
https://github.com/nsqio/pynsq/blob/48bf62d65ea63cddaa401efb23187b95511dbc84/nsq/event.py#L73-L84
def trigger(self, name, *args, **kwargs): """ Execute the callbacks for the listeners on the specified event with the supplied arguments. All extra arguments are passed through to each callback. :param name: the name of the event :type name: string """ for ev in self.__listeners[name]: ev(*args, **kwargs)
[ "def", "trigger", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "ev", "in", "self", ".", "__listeners", "[", "name", "]", ":", "ev", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Execute the callbacks for the listeners on the specified event with the supplied arguments. All extra arguments are passed through to each callback. :param name: the name of the event :type name: string
[ "Execute", "the", "callbacks", "for", "the", "listeners", "on", "the", "specified", "event", "with", "the", "supplied", "arguments", "." ]
python
test
euske/pdfminer
pdfminer/utils.py
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L107-L110
def csort(objs, key): """Order-preserving sorting function.""" idxs = dict((obj, i) for (i, obj) in enumerate(objs)) return sorted(objs, key=lambda obj: (key(obj), idxs[obj]))
[ "def", "csort", "(", "objs", ",", "key", ")", ":", "idxs", "=", "dict", "(", "(", "obj", ",", "i", ")", "for", "(", "i", ",", "obj", ")", "in", "enumerate", "(", "objs", ")", ")", "return", "sorted", "(", "objs", ",", "key", "=", "lambda", "o...
Order-preserving sorting function.
[ "Order", "-", "preserving", "sorting", "function", "." ]
python
train
ceph/ceph-deploy
vendor.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/vendor.py#L83-L90
def clean_vendor(name): """ Ensure that vendored code/dirs are removed, possibly when packaging when the environment flag is set to avoid vendoring. """ this_dir = path.dirname(path.abspath(__file__)) vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name) run(['rm', '-rf', vendor_dest])
[ "def", "clean_vendor", "(", "name", ")", ":", "this_dir", "=", "path", ".", "dirname", "(", "path", ".", "abspath", "(", "__file__", ")", ")", "vendor_dest", "=", "path", ".", "join", "(", "this_dir", ",", "'ceph_deploy/lib/vendor/%s'", "%", "name", ")", ...
Ensure that vendored code/dirs are removed, possibly when packaging when the environment flag is set to avoid vendoring.
[ "Ensure", "that", "vendored", "code", "/", "dirs", "are", "removed", "possibly", "when", "packaging", "when", "the", "environment", "flag", "is", "set", "to", "avoid", "vendoring", "." ]
python
train
peterdemin/pip-compile-multi
pipcompilemulti/cli_v2.py
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v2.py#L38-L46
def verify(ctx): """Upgrade locked dependency versions""" oks = run_configurations( skipper(verify_environments), read_sections, ) ctx.exit(0 if False not in oks else 1)
[ "def", "verify", "(", "ctx", ")", ":", "oks", "=", "run_configurations", "(", "skipper", "(", "verify_environments", ")", ",", "read_sections", ",", ")", "ctx", ".", "exit", "(", "0", "if", "False", "not", "in", "oks", "else", "1", ")" ]
Upgrade locked dependency versions
[ "Upgrade", "locked", "dependency", "versions" ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1339-L1356
def set_init_score(self, init_score): """Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score. """ self.init_score = init_score if self.handle is not None and init_score is not None: init_score = list_to_1d_numpy(init_score, np.float64, name='init_score') self.set_field('init_score', init_score) return self
[ "def", "set_init_score", "(", "self", ",", "init_score", ")", ":", "self", ".", "init_score", "=", "init_score", "if", "self", ".", "handle", "is", "not", "None", "and", "init_score", "is", "not", "None", ":", "init_score", "=", "list_to_1d_numpy", "(", "i...
Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score.
[ "Set", "init", "score", "of", "Booster", "to", "start", "from", "." ]
python
train
b-ryan/powerline-shell
powerline_shell/__init__.py
https://github.com/b-ryan/powerline-shell/blob/a9b8c9bb39dbfb7ec3c639e497b5a76fa6dcb8cc/powerline_shell/__init__.py#L30-L54
def get_valid_cwd(): """Determine and check the current working directory for validity. Typically, an directory arises when you checkout a different branch on git that doesn't have this directory. When an invalid directory is found, a warning is printed to the screen, but the directory is still returned as-is, since this is what the shell considers to be the cwd.""" try: cwd = _current_dir() except: warn("Your current directory is invalid. If you open a ticket at " + "https://github.com/milkbikis/powerline-shell/issues/new " + "we would love to help fix the issue.") sys.stdout.write("> ") sys.exit(1) parts = cwd.split(os.sep) up = cwd while parts and not os.path.exists(up): parts.pop() up = os.sep.join(parts) if cwd != up: warn("Your current directory is invalid. Lowest valid directory: " + up) return cwd
[ "def", "get_valid_cwd", "(", ")", ":", "try", ":", "cwd", "=", "_current_dir", "(", ")", "except", ":", "warn", "(", "\"Your current directory is invalid. If you open a ticket at \"", "+", "\"https://github.com/milkbikis/powerline-shell/issues/new \"", "+", "\"we would love t...
Determine and check the current working directory for validity. Typically, an directory arises when you checkout a different branch on git that doesn't have this directory. When an invalid directory is found, a warning is printed to the screen, but the directory is still returned as-is, since this is what the shell considers to be the cwd.
[ "Determine", "and", "check", "the", "current", "working", "directory", "for", "validity", "." ]
python
train
michaelpb/omnic
omnic/web/viewer.py
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/viewer.py#L40-L56
def get_resource(self): ''' Returns a BytesResource to build the viewers JavaScript ''' # Basename could be used for controlling caching # basename = 'viewers_%s' % settings.get_cache_string() node_packages = self.get_node_packages() # sort_keys is essential to ensure resulting string is # deterministic (and thus hashable) viewers_data_str = json.dumps(node_packages, sort_keys=True) viewers_data = viewers_data_str.encode('utf8') viewers_resource = ForeignBytesResource( viewers_data, extension=VIEWER_EXT, # basename=basename, ) return viewers_resource
[ "def", "get_resource", "(", "self", ")", ":", "# Basename could be used for controlling caching", "# basename = 'viewers_%s' % settings.get_cache_string()", "node_packages", "=", "self", ".", "get_node_packages", "(", ")", "# sort_keys is essential to ensure resulting string is", "# ...
Returns a BytesResource to build the viewers JavaScript
[ "Returns", "a", "BytesResource", "to", "build", "the", "viewers", "JavaScript" ]
python
train
PiotrDabkowski/Js2Py
js2py/legecy_translators/translator.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/legecy_translators/translator.py#L10-L70
def translate_js(js, top=TOP_GLOBAL): """js has to be a javascript source code. returns equivalent python code.""" # Remove constant literals no_const, constants = remove_constants(js) #print 'const count', len(constants) # Remove object literals no_obj, objects, obj_count = remove_objects(no_const) #print 'obj count', len(objects) # Remove arrays no_arr, arrays, arr_count = remove_arrays(no_obj) #print 'arr count', len(arrays) # Here remove and replace functions reset_inline_count() no_func, hoisted, inline = remove_functions(no_arr) #translate flow and expressions py_seed, to_register = translate_flow(no_func) # register variables and hoisted functions #top += '# register variables\n' top += 'var.registers(%s)\n' % str(to_register + hoisted.keys()) #Recover functions # hoisted functions recovery defs = '' #defs += '# define hoisted functions\n' #print len(hoisted) , 'HH'*40 for nested_name, nested_info in hoisted.iteritems(): nested_block, nested_args = nested_info new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args) new_code += 'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name) defs += new_code + '\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr( nested_name) #defs += '# Everting ready!\n' # inline functions recovery for nested_name, nested_info in inline.iteritems(): nested_block, nested_args = nested_info new_code = translate_func(nested_name, nested_block, nested_args) py_seed = inject_before_lval(py_seed, nested_name.split('@')[0], new_code) # add hoisted definitiond - they have literals that have to be recovered py_seed = defs + py_seed #Recover arrays for arr_lval, arr_code in arrays.iteritems(): translation, obj_count, arr_count = translate_array( arr_code, arr_lval, obj_count, arr_count) py_seed = inject_before_lval(py_seed, arr_lval, translation) #Recover objects for obj_lval, obj_code in objects.iteritems(): translation, obj_count, arr_count = translate_object( obj_code, obj_lval, obj_count, arr_count) py_seed = inject_before_lval(py_seed, obj_lval, translation) #Recover constants py_code = recover_constants(py_seed, constants) return top + py_code
[ "def", "translate_js", "(", "js", ",", "top", "=", "TOP_GLOBAL", ")", ":", "# Remove constant literals", "no_const", ",", "constants", "=", "remove_constants", "(", "js", ")", "#print 'const count', len(constants)", "# Remove object literals", "no_obj", ",", "objects", ...
js has to be a javascript source code. returns equivalent python code.
[ "js", "has", "to", "be", "a", "javascript", "source", "code", ".", "returns", "equivalent", "python", "code", "." ]
python
valid
markovmodel/PyEMMA
pyemma/coordinates/data/featurization/featurizer.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/featurization/featurizer.py#L586-L622
def add_group_mindist(self, group_definitions, group_pairs='all', threshold=None, periodic=True): r""" Adds the minimum distance between groups of atoms to the feature list. If the groups of atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`. Parameters ---------- group_definitions : list of 1D-arrays/iterables containing the group definitions via atom indices. If there is only one group_definition, it is assumed the minimum distance within this group (excluding the self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored. group_pairs : Can be of two types: 'all' Computes minimum distances between all pairs of groups contained in the group definitions ndarray((n, 2), dtype=int): n x 2 array with the pairs of groups for which the minimum distances will be computed. threshold : float, optional, default is None distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If left to None, the numerical value will be returned periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention. """ from .distances import GroupMinDistanceFeature # Some thorough input checking and reformatting group_definitions, group_pairs, distance_list, group_identifiers = \ _parse_groupwise_input(group_definitions, group_pairs, self.logger, 'add_group_mindist') distance_list = self._check_indices(distance_list) f = GroupMinDistanceFeature(self.topology, group_definitions, group_pairs, distance_list, group_identifiers, threshold, periodic) self.__add_feature(f)
[ "def", "add_group_mindist", "(", "self", ",", "group_definitions", ",", "group_pairs", "=", "'all'", ",", "threshold", "=", "None", ",", "periodic", "=", "True", ")", ":", "from", ".", "distances", "import", "GroupMinDistanceFeature", "# Some thorough input checking...
r""" Adds the minimum distance between groups of atoms to the feature list. If the groups of atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`. Parameters ---------- group_definitions : list of 1D-arrays/iterables containing the group definitions via atom indices. If there is only one group_definition, it is assumed the minimum distance within this group (excluding the self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored. group_pairs : Can be of two types: 'all' Computes minimum distances between all pairs of groups contained in the group definitions ndarray((n, 2), dtype=int): n x 2 array with the pairs of groups for which the minimum distances will be computed. threshold : float, optional, default is None distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If left to None, the numerical value will be returned periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
[ "r", "Adds", "the", "minimum", "distance", "between", "groups", "of", "atoms", "to", "the", "feature", "list", ".", "If", "the", "groups", "of", "atoms", "are", "identical", "to", "residues", "use", ":", "py", ":", "obj", ":", "add_residue_mindist", "<pyem...
python
train
architv/soccer-cli
soccer/writers.py
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L220-L228
def live_scores(self, live_scores): """Store output of live scores to a CSV file""" headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([game['league'], game['homeTeamName'], game['goalsHomeTeam'], game['goalsAwayTeam'], game['awayTeamName']] for game in live_scores['games']) self.generate_output(result)
[ "def", "live_scores", "(", "self", ",", "live_scores", ")", ":", "headers", "=", "[", "'League'", ",", "'Home Team Name'", ",", "'Home Team Goals'", ",", "'Away Team Goals'", ",", "'Away Team Name'", "]", "result", "=", "[", "headers", "]", "result", ".", "ext...
Store output of live scores to a CSV file
[ "Store", "output", "of", "live", "scores", "to", "a", "CSV", "file" ]
python
train
IdentityPython/pysaml2
src/saml2/ident.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/ident.py#L49-L56
def code_binary(item): """ Return a binary 'code' suitable for hashing. """ code_str = code(item) if isinstance(code_str, six.string_types): return code_str.encode('utf-8') return code_str
[ "def", "code_binary", "(", "item", ")", ":", "code_str", "=", "code", "(", "item", ")", "if", "isinstance", "(", "code_str", ",", "six", ".", "string_types", ")", ":", "return", "code_str", ".", "encode", "(", "'utf-8'", ")", "return", "code_str" ]
Return a binary 'code' suitable for hashing.
[ "Return", "a", "binary", "code", "suitable", "for", "hashing", "." ]
python
train
klen/pyserve
pyserve/bottle.py
https://github.com/klen/pyserve/blob/5942ff2eb41566fd39d73abbd3e5c7caa7366aa8/pyserve/bottle.py#L968-L974
def cookies(self): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() if len(cookies) > self.MAX_PARAMS: raise HTTPError(413, 'Too many cookies') return FormsDict((c.key, c.value) for c in cookies)
[ "def", "cookies", "(", "self", ")", ":", "cookies", "=", "SimpleCookie", "(", "self", ".", "environ", ".", "get", "(", "'HTTP_COOKIE'", ",", "''", ")", ")", ".", "values", "(", ")", "if", "len", "(", "cookies", ")", ">", "self", ".", "MAX_PARAMS", ...
Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies.
[ "Cookies", "parsed", "into", "a", ":", "class", ":", "FormsDict", ".", "Signed", "cookies", "are", "NOT", "decoded", ".", "Use", ":", "meth", ":", "get_cookie", "if", "you", "expect", "signed", "cookies", "." ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py#L1304-L1389
def plot_resp_slow(signal, rect_signal, sample_rate): """ ----- Brief ----- Figure intended to represent the acquired RIP signal together with a rectangular signal defining inhalation and exhalation periods. ----------- Description ----------- Function design to generate a Bokeh figure containing the evolution of RIP signal, when slow respiration cycles occur, and the rectangular signal that defines the stages of inhalation and exhalation. Applied in the Notebook "Particularities of Inductive Respiration (RIP) Sensor ". ---------- Parameters ---------- signal : list List with the acquired RIP signal. rect_signal : list Data samples of the rectangular signal that identifies inhalation and exhalation segments. sample_rate : int Sampling rate of acquisition. """ signal = numpy.array(signal) - numpy.average(signal) rect_signal = numpy.array(rect_signal) time = numpy.linspace(0, len(signal) / sample_rate, len(signal)) # Inhalation and Exhalation time segments. # [Signal Binarisation] rect_signal_rev = rect_signal - numpy.average(rect_signal) inhal_segments = numpy.where(rect_signal_rev >= 0)[0] exhal_segments = numpy.where(rect_signal_rev < 0)[0] rect_signal_rev[inhal_segments] = numpy.max(rect_signal_rev) rect_signal_rev[exhal_segments] = numpy.min(rect_signal_rev) # [Signal Differentiation] diff_rect_signal = numpy.diff(rect_signal_rev) inhal_begin = numpy.where(diff_rect_signal > 0)[0] inhal_end = numpy.where(diff_rect_signal < 0)[0] exhal_begin = inhal_end exhal_end = inhal_begin[1:] # Generation of a Bokeh figure where data will be plotted. plot_aux = plot(list([0]), list([0]), showPlot=False)[0] # Edition of Bokeh figure (title, axes labels...) # [title] title = Title() title.text = "RIP Signal with slow cycles" plot_aux.title = title # [plot] plot_aux.line(time, signal, **opensignals_kwargs("line")) inhal_color = opensignals_color_pallet() exhal_color = opensignals_color_pallet() for inhal_exhal in range(0, len(inhal_begin)): if inhal_exhal == 0: legend = ["Inhalation", "Exhalation"] else: legend = [None, None] plot_aux.line(time[inhal_begin[inhal_exhal]:inhal_end[inhal_exhal]], rect_signal_rev[inhal_begin[inhal_exhal]:inhal_end[inhal_exhal]], line_width=2, line_color=inhal_color, legend=legend[0]) if inhal_exhal != len(inhal_begin) - 1: plot_aux.line(time[exhal_begin[inhal_exhal]:exhal_end[inhal_exhal]], rect_signal_rev[exhal_begin[inhal_exhal]:exhal_end[inhal_exhal]], line_width=2, line_color=exhal_color, legend=legend[1]) else: plot_aux.line(time[exhal_begin[inhal_exhal]:], rect_signal_rev[exhal_begin[inhal_exhal]:], line_width=2, line_color=exhal_color, legend=legend[1]) # [axes labels] plot_aux.xaxis.axis_label = "Time (s)" plot_aux.yaxis.axis_label = "Raw Data (without DC component)" show(plot_aux)
[ "def", "plot_resp_slow", "(", "signal", ",", "rect_signal", ",", "sample_rate", ")", ":", "signal", "=", "numpy", ".", "array", "(", "signal", ")", "-", "numpy", ".", "average", "(", "signal", ")", "rect_signal", "=", "numpy", ".", "array", "(", "rect_si...
----- Brief ----- Figure intended to represent the acquired RIP signal together with a rectangular signal defining inhalation and exhalation periods. ----------- Description ----------- Function design to generate a Bokeh figure containing the evolution of RIP signal, when slow respiration cycles occur, and the rectangular signal that defines the stages of inhalation and exhalation. Applied in the Notebook "Particularities of Inductive Respiration (RIP) Sensor ". ---------- Parameters ---------- signal : list List with the acquired RIP signal. rect_signal : list Data samples of the rectangular signal that identifies inhalation and exhalation segments. sample_rate : int Sampling rate of acquisition.
[ "-----", "Brief", "-----", "Figure", "intended", "to", "represent", "the", "acquired", "RIP", "signal", "together", "with", "a", "rectangular", "signal", "defining", "inhalation", "and", "exhalation", "periods", "." ]
python
train
PyCQA/astroid
astroid/builder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/builder.py#L193-L214
def add_from_names_to_locals(self, node): """Store imported names to the locals Resort the locals if coming from a delayed node """ _key_func = lambda node: node.fromlineno def sort_locals(my_list): my_list.sort(key=_key_func) for (name, asname) in node.names: if name == "*": try: imported = node.do_import_module() except exceptions.AstroidBuildingError: continue for name in imported.public_names(): node.parent.set_local(name, node) sort_locals(node.parent.scope().locals[name]) else: node.parent.set_local(asname or name, node) sort_locals(node.parent.scope().locals[asname or name])
[ "def", "add_from_names_to_locals", "(", "self", ",", "node", ")", ":", "_key_func", "=", "lambda", "node", ":", "node", ".", "fromlineno", "def", "sort_locals", "(", "my_list", ")", ":", "my_list", ".", "sort", "(", "key", "=", "_key_func", ")", "for", "...
Store imported names to the locals Resort the locals if coming from a delayed node
[ "Store", "imported", "names", "to", "the", "locals" ]
python
train
quantumlib/Cirq
cirq/circuits/_bucket_priority_queue.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/_bucket_priority_queue.py#L86-L130
def enqueue(self, priority: int, item: TItem) -> bool: """Adds an entry to the priority queue. If drop_duplicate_entries is set and there is already a (priority, item) entry in the queue, then the enqueue is ignored. Check the return value to determine if an enqueue was kept or dropped. Args: priority: The priority of the item. Lower priorities dequeue before higher priorities. item: The item associated with the given priority. Returns: True if the item was enqueued. False if drop_duplicate_entries is set and the item is already in the queue. """ if self._drop_set is not None: if (priority, item) in self._drop_set: return False self._drop_set.add((priority, item)) # First enqueue initializes self._offset. if not self._buckets: self._buckets.append([item]) self._offset = priority self._len = 1 return True # Where is the bucket this item is supposed to go into? i = priority - self._offset # Extend bucket list backwards if needed. if i < 0: self._buckets[:0] = [[] for _ in range(-i)] self._offset = priority i = 0 # Extend bucket list forwards if needed. while i >= len(self._buckets): self._buckets.append([]) # Finish by adding item to the intended bucket's list. self._buckets[i].append(item) self._len += 1 return True
[ "def", "enqueue", "(", "self", ",", "priority", ":", "int", ",", "item", ":", "TItem", ")", "->", "bool", ":", "if", "self", ".", "_drop_set", "is", "not", "None", ":", "if", "(", "priority", ",", "item", ")", "in", "self", ".", "_drop_set", ":", ...
Adds an entry to the priority queue. If drop_duplicate_entries is set and there is already a (priority, item) entry in the queue, then the enqueue is ignored. Check the return value to determine if an enqueue was kept or dropped. Args: priority: The priority of the item. Lower priorities dequeue before higher priorities. item: The item associated with the given priority. Returns: True if the item was enqueued. False if drop_duplicate_entries is set and the item is already in the queue.
[ "Adds", "an", "entry", "to", "the", "priority", "queue", "." ]
python
train
dslackw/slpkg
slpkg/init.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/init.py#L712-L716
def file_remove(self, path, filename): """Check if filename exists and remove """ if os.path.isfile(path + filename): os.remove(path + filename)
[ "def", "file_remove", "(", "self", ",", "path", ",", "filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", "+", "filename", ")", ":", "os", ".", "remove", "(", "path", "+", "filename", ")" ]
Check if filename exists and remove
[ "Check", "if", "filename", "exists", "and", "remove" ]
python
train
suds-community/suds
suds/xsd/sxbasic.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/xsd/sxbasic.py#L443-L459
def namespace(self, prefix=None): """ Get this schema element's target namespace. In case of reference elements, the target namespace is defined by the referenced and not the referencing element node. @param prefix: The default prefix. @type prefix: str @return: The schema element's target namespace @rtype: (I{prefix},I{URI}) """ e = self.__deref() if e is not None: return e.namespace(prefix) return super(Element, self).namespace()
[ "def", "namespace", "(", "self", ",", "prefix", "=", "None", ")", ":", "e", "=", "self", ".", "__deref", "(", ")", "if", "e", "is", "not", "None", ":", "return", "e", ".", "namespace", "(", "prefix", ")", "return", "super", "(", "Element", ",", "...
Get this schema element's target namespace. In case of reference elements, the target namespace is defined by the referenced and not the referencing element node. @param prefix: The default prefix. @type prefix: str @return: The schema element's target namespace @rtype: (I{prefix},I{URI})
[ "Get", "this", "schema", "element", "s", "target", "namespace", "." ]
python
train
kolypto/py-good
good/schema/compiler.py
https://github.com/kolypto/py-good/blob/192ef19e79f6fd95c1cbd7c378a3074c7ad7a6d4/good/schema/compiler.py#L173-L190
def sub_compile(self, schema, path=None, matcher=False): """ Compile a sub-schema :param schema: Validation schema :type schema: * :param path: Path to this schema, if any :type path: list|None :param matcher: Compile a matcher? :type matcher: bool :rtype: CompiledSchema """ return type(self)( schema, self.path + (path or []), None, None, matcher )
[ "def", "sub_compile", "(", "self", ",", "schema", ",", "path", "=", "None", ",", "matcher", "=", "False", ")", ":", "return", "type", "(", "self", ")", "(", "schema", ",", "self", ".", "path", "+", "(", "path", "or", "[", "]", ")", ",", "None", ...
Compile a sub-schema :param schema: Validation schema :type schema: * :param path: Path to this schema, if any :type path: list|None :param matcher: Compile a matcher? :type matcher: bool :rtype: CompiledSchema
[ "Compile", "a", "sub", "-", "schema" ]
python
train
loganasherjones/yapconf
yapconf/spec.py
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L211-L225
def get_item(self, name, bootstrap=False): """Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise. """ for item in self._get_items(bootstrap): if item.name == name: return item return None
[ "def", "get_item", "(", "self", ",", "name", ",", "bootstrap", "=", "False", ")", ":", "for", "item", "in", "self", ".", "_get_items", "(", "bootstrap", ")", ":", "if", "item", ".", "name", "==", "name", ":", "return", "item", "return", "None" ]
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
[ "Get", "a", "particular", "item", "in", "the", "specification", "." ]
python
train
rocky/python-uncompyle6
uncompyle6/verify.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/verify.py#L417-L425
def compare_files(pyc_filename1, pyc_filename2, verify): """Compare two .pyc files.""" (version1, timestamp, magic_int1, code_obj1, is_pypy, source_size) = uncompyle6.load_module(pyc_filename1) (version2, timestamp, magic_int2, code_obj2, is_pypy, source_size) = uncompyle6.load_module(pyc_filename2) if (magic_int1 != magic_int2) and verify == 'verify': verify = 'weak_verify' cmp_code_objects(version1, is_pypy, code_obj1, code_obj2, verify)
[ "def", "compare_files", "(", "pyc_filename1", ",", "pyc_filename2", ",", "verify", ")", ":", "(", "version1", ",", "timestamp", ",", "magic_int1", ",", "code_obj1", ",", "is_pypy", ",", "source_size", ")", "=", "uncompyle6", ".", "load_module", "(", "pyc_filen...
Compare two .pyc files.
[ "Compare", "two", ".", "pyc", "files", "." ]
python
train
sanoma/django-arctic
arctic/generics.py
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/generics.py#L563-L575
def get_ordering(self): """Ordering used for queryset filtering (should not contain prefix).""" if self.sorting_field: return [self.sorting_field] prefix = self.get_prefix() fields = self.get_ordering_with_prefix() if self.prefix: fields = [f.replace(prefix, "", 1) for f in fields] return [ f for f in fields if f.lstrip("-") in self.get_ordering_fields_lookups() ]
[ "def", "get_ordering", "(", "self", ")", ":", "if", "self", ".", "sorting_field", ":", "return", "[", "self", ".", "sorting_field", "]", "prefix", "=", "self", ".", "get_prefix", "(", ")", "fields", "=", "self", ".", "get_ordering_with_prefix", "(", ")", ...
Ordering used for queryset filtering (should not contain prefix).
[ "Ordering", "used", "for", "queryset", "filtering", "(", "should", "not", "contain", "prefix", ")", "." ]
python
train
crytic/slither
slither/__main__.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/__main__.py#L38-L53
def process(filename, args, detector_classes, printer_classes): """ The core high-level code for running Slither static analysis. Returns: list(result), int: Result list and number of contracts analyzed """ ast = '--ast-compact-json' if args.legacy_ast: ast = '--ast-json' args.filter_paths = parse_filter_paths(args) slither = Slither(filename, ast_format=ast, **vars(args)) return _process(slither, detector_classes, printer_classes)
[ "def", "process", "(", "filename", ",", "args", ",", "detector_classes", ",", "printer_classes", ")", ":", "ast", "=", "'--ast-compact-json'", "if", "args", ".", "legacy_ast", ":", "ast", "=", "'--ast-json'", "args", ".", "filter_paths", "=", "parse_filter_paths...
The core high-level code for running Slither static analysis. Returns: list(result), int: Result list and number of contracts analyzed
[ "The", "core", "high", "-", "level", "code", "for", "running", "Slither", "static", "analysis", "." ]
python
train
numenta/nupic
src/nupic/data/utils.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/utils.py#L207-L215
def parseStringList(s): """ Parse a string of space-separated numbers, returning a Python list. :param s: (string) to parse :returns: (list) binary SDR """ assert isinstance(s, basestring) return [int(i) for i in s.split()]
[ "def", "parseStringList", "(", "s", ")", ":", "assert", "isinstance", "(", "s", ",", "basestring", ")", "return", "[", "int", "(", "i", ")", "for", "i", "in", "s", ".", "split", "(", ")", "]" ]
Parse a string of space-separated numbers, returning a Python list. :param s: (string) to parse :returns: (list) binary SDR
[ "Parse", "a", "string", "of", "space", "-", "separated", "numbers", "returning", "a", "Python", "list", "." ]
python
valid
pyviz/holoviews
holoviews/util/parser.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/parser.py#L349-L362
def parse_options(cls, line, ns={}): """ Similar to parse but returns a list of Options objects instead of the dictionary format. """ parsed = cls.parse(line, ns=ns) options_list = [] for spec in sorted(parsed.keys()): options = parsed[spec] merged = {} for group in options.values(): merged = dict(group.kwargs, **merged) options_list.append(Options(spec, **merged)) return options_list
[ "def", "parse_options", "(", "cls", ",", "line", ",", "ns", "=", "{", "}", ")", ":", "parsed", "=", "cls", ".", "parse", "(", "line", ",", "ns", "=", "ns", ")", "options_list", "=", "[", "]", "for", "spec", "in", "sorted", "(", "parsed", ".", "...
Similar to parse but returns a list of Options objects instead of the dictionary format.
[ "Similar", "to", "parse", "but", "returns", "a", "list", "of", "Options", "objects", "instead", "of", "the", "dictionary", "format", "." ]
python
train
trailofbits/manticore
manticore/platforms/decree.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/decree.py#L798-L831
def wait(self, readfds, writefds, timeout): """ Wait for filedescriptors or timeout. Adds the current process to the corresponding waiting list and yields the cpu to another running process. """ logger.info("WAIT:") logger.info("\tProcess %d is going to wait for [ %r %r %r ]", self._current, readfds, writefds, timeout) logger.info("\tProcess: %r", self.procs) logger.info("\tRunning: %r", self.running) logger.info("\tRWait: %r", self.rwait) logger.info("\tTWait: %r", self.twait) logger.info("\tTimers: %r", self.timers) for fd in readfds: self.rwait[fd].add(self._current) for fd in writefds: self.twait[fd].add(self._current) if timeout is not None: self.timers[self._current] = self.clocks + timeout else: self.timers[self._current] = None procid = self._current # self.sched() next_index = (self.running.index(procid) + 1) % len(self.running) self._current = self.running[next_index] logger.info("\tTransfer control from process %d to %d", procid, self._current) logger.info("\tREMOVING %r from %r. Current: %r", procid, self.running, self._current) self.running.remove(procid) if self._current not in self.running: logger.info("\tCurrent not running. Checking for timers...") self._current = None if all([x is None for x in self.timers]): raise Deadlock() self.check_timers()
[ "def", "wait", "(", "self", ",", "readfds", ",", "writefds", ",", "timeout", ")", ":", "logger", ".", "info", "(", "\"WAIT:\"", ")", "logger", ".", "info", "(", "\"\\tProcess %d is going to wait for [ %r %r %r ]\"", ",", "self", ".", "_current", ",", "readfds"...
Wait for filedescriptors or timeout. Adds the current process to the corresponding waiting list and yields the cpu to another running process.
[ "Wait", "for", "filedescriptors", "or", "timeout", ".", "Adds", "the", "current", "process", "to", "the", "corresponding", "waiting", "list", "and", "yields", "the", "cpu", "to", "another", "running", "process", "." ]
python
valid
maximkulkin/lollipop
lollipop/types.py
https://github.com/maximkulkin/lollipop/blob/042e8a24508cc3b28630863253c38ffbfc52c882/lollipop/types.py#L1534-L1550
def validate_for(self, obj, data, *args, **kwargs): """Takes target object and serialized data, tries to update that object with data and validate result. Returns validation errors or None. Object is not updated. :param obj: Object to check data validity against. In case the data is partial object is used to get the rest of data from. :param data: Data to validate. Can be partial (not all schema field data is present). :param kwargs: Same keyword arguments as for :meth:`Type.load`. :returns: validation errors or None """ try: self.load_into(obj, data, inplace=False, *args, **kwargs) return None except ValidationError as ve: return ve.messages
[ "def", "validate_for", "(", "self", ",", "obj", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "load_into", "(", "obj", ",", "data", ",", "inplace", "=", "False", ",", "*", "args", ",", "*", "*", "kwa...
Takes target object and serialized data, tries to update that object with data and validate result. Returns validation errors or None. Object is not updated. :param obj: Object to check data validity against. In case the data is partial object is used to get the rest of data from. :param data: Data to validate. Can be partial (not all schema field data is present). :param kwargs: Same keyword arguments as for :meth:`Type.load`. :returns: validation errors or None
[ "Takes", "target", "object", "and", "serialized", "data", "tries", "to", "update", "that", "object", "with", "data", "and", "validate", "result", ".", "Returns", "validation", "errors", "or", "None", ".", "Object", "is", "not", "updated", "." ]
python
train
neurodata/ndio
ndio/remote/ndingest.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L220-L236
def dataset_dict( self, dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling): """Generate the dataset dictionary""" dataset_dict = {} dataset_dict['dataset_name'] = dataset_name dataset_dict['imagesize'] = imagesize dataset_dict['voxelres'] = voxelres if offset is not None: dataset_dict['offset'] = offset if timerange is not None: dataset_dict['timerange'] = timerange if scalinglevels is not None: dataset_dict['scalinglevels'] = scalinglevels if scaling is not None: dataset_dict['scaling'] = scaling return dataset_dict
[ "def", "dataset_dict", "(", "self", ",", "dataset_name", ",", "imagesize", ",", "voxelres", ",", "offset", ",", "timerange", ",", "scalinglevels", ",", "scaling", ")", ":", "dataset_dict", "=", "{", "}", "dataset_dict", "[", "'dataset_name'", "]", "=", "data...
Generate the dataset dictionary
[ "Generate", "the", "dataset", "dictionary" ]
python
test
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L2308-L2369
def screenshot(self, filename=None, transparent_background=None, return_img=None, window_size=None): """ Takes screenshot at current camera position Parameters ---------- filename : str, optional Location to write image to. If None, no image is written. transparent_background : bool, optional Makes the background transparent. Default False. return_img : bool, optional If a string filename is given and this is true, a NumPy array of the image will be returned. Returns ------- img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 3] for transparent_background=False [Window height x Window width x 4] for transparent_background=True Examples -------- >>> import vtki >>> sphere = vtki.Sphere() >>> plotter = vtki.Plotter() >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP """ if window_size is not None: self.window_size = window_size # configure image filter if transparent_background is None: transparent_background = rcParams['transparent_background'] self.image_transparent_background = transparent_background # This if statement allows you to save screenshots of closed plotters # This is needed for the sphinx-gallery work if not hasattr(self, 'ren_win'): # If plotter has been closed... # check if last_image exists if hasattr(self, 'last_image'): # Save last image return self._save_image(self.last_image, filename, return_img) # Plotter hasn't been rendered or was improperly closed raise AttributeError('This plotter is unable to save a screenshot.') if isinstance(self, Plotter): # TODO: we need a consistent rendering function self.render() else: self._render() # debug: this needs to be called twice for some reason, img = self.image img = self.image return self._save_image(img, filename, return_img)
[ "def", "screenshot", "(", "self", ",", "filename", "=", "None", ",", "transparent_background", "=", "None", ",", "return_img", "=", "None", ",", "window_size", "=", "None", ")", ":", "if", "window_size", "is", "not", "None", ":", "self", ".", "window_size"...
Takes screenshot at current camera position Parameters ---------- filename : str, optional Location to write image to. If None, no image is written. transparent_background : bool, optional Makes the background transparent. Default False. return_img : bool, optional If a string filename is given and this is true, a NumPy array of the image will be returned. Returns ------- img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 3] for transparent_background=False [Window height x Window width x 4] for transparent_background=True Examples -------- >>> import vtki >>> sphere = vtki.Sphere() >>> plotter = vtki.Plotter() >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP
[ "Takes", "screenshot", "at", "current", "camera", "position" ]
python
train
pvlib/pvlib-python
pvlib/tracking.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/tracking.py#L260-L580
def singleaxis(apparent_zenith, apparent_azimuth, axis_tilt=0, axis_azimuth=0, max_angle=90, backtrack=True, gcr=2.0/7.0): """ Determine the rotation angle of a single axis tracker using the equations in [1] when given a particular sun zenith and azimuth angle. backtracking may be specified, and if so, a ground coverage ratio is required. Rotation angle is determined in a panel-oriented coordinate system. The tracker azimuth axis_azimuth defines the positive y-axis; the positive x-axis is 90 degress clockwise from the y-axis and parallel to the earth surface, and the positive z-axis is normal and oriented towards the sun. Rotation angle tracker_theta indicates tracker position relative to horizontal: tracker_theta = 0 is horizontal, and positive tracker_theta is a clockwise rotation around the y axis in the x, y, z coordinate system. For example, if tracker azimuth axis_azimuth is 180 (oriented south), tracker_theta = 30 is a rotation of 30 degrees towards the west, and tracker_theta = -90 is a rotation to the vertical plane facing east. Parameters ---------- apparent_zenith : float, 1d array, or Series Solar apparent zenith angles in decimal degrees. apparent_azimuth : float, 1d array, or Series Solar apparent azimuth angles in decimal degrees. axis_tilt : float, default 0 The tilt of the axis of rotation (i.e, the y-axis defined by axis_azimuth) with respect to horizontal, in decimal degrees. axis_azimuth : float, default 0 A value denoting the compass direction along which the axis of rotation lies. Measured in decimal degrees East of North. max_angle : float, default 90 A value denoting the maximum rotation angle, in decimal degrees, of the one-axis tracker from its horizontal position (horizontal if axis_tilt = 0). A max_angle of 90 degrees allows the tracker to rotate to a vertical position to point the panel towards a horizon. max_angle of 180 degrees allows for full rotation. backtrack : bool, default True Controls whether the tracker has the capability to "backtrack" to avoid row-to-row shading. False denotes no backtrack capability. True denotes backtrack capability. gcr : float, default 2.0/7.0 A value denoting the ground coverage ratio of a tracker system which utilizes backtracking; i.e. the ratio between the PV array surface area to total ground area. A tracker system with modules 2 meters wide, centered on the tracking axis, with 6 meters between the tracking axes has a gcr of 2/6=0.333. If gcr is not provided, a gcr of 2/7 is default. gcr must be <=1. Returns ------- dict or DataFrame with the following columns: * tracker_theta: The rotation angle of the tracker. tracker_theta = 0 is horizontal, and positive rotation angles are clockwise. * aoi: The angle-of-incidence of direct irradiance onto the rotated panel surface. * surface_tilt: The angle between the panel surface and the earth surface, accounting for panel rotation. * surface_azimuth: The azimuth of the rotated panel, determined by projecting the vector normal to the panel's surface to the earth's surface. References ---------- [1] Lorenzo, E et al., 2011, "Tracking and back-tracking", Prog. in Photovoltaics: Research and Applications, v. 19, pp. 747-753. """ # MATLAB to Python conversion by # Will Holmgren (@wholmgren), U. Arizona. March, 2015. if isinstance(apparent_zenith, pd.Series): index = apparent_zenith.index else: index = None # convert scalars to arrays apparent_azimuth = np.atleast_1d(apparent_azimuth) apparent_zenith = np.atleast_1d(apparent_zenith) if apparent_azimuth.ndim > 1 or apparent_zenith.ndim > 1: raise ValueError('Input dimensions must not exceed 1') # Calculate sun position x, y, z using coordinate system as in [1], Eq 2. # Positive y axis is oriented parallel to earth surface along tracking axis # (for the purpose of illustration, assume y is oriented to the south); # positive x axis is orthogonal, 90 deg clockwise from y-axis, and parallel # to the earth's surface (if y axis is south, x axis is west); # positive z axis is normal to x, y axes, pointed upward. # Equations in [1] assume solar azimuth is relative to reference vector # pointed south, with clockwise positive. # Here, the input solar azimuth is degrees East of North, # i.e., relative to a reference vector pointed # north with clockwise positive. # Rotate sun azimuth to coordinate system as in [1] # to calculate sun position. az = apparent_azimuth - 180 apparent_elevation = 90 - apparent_zenith x = cosd(apparent_elevation) * sind(az) y = cosd(apparent_elevation) * cosd(az) z = sind(apparent_elevation) # translate array azimuth from compass bearing to [1] coord system # wholmgren: strange to see axis_azimuth calculated differently from az, # (not that it matters, or at least it shouldn't...). axis_azimuth_south = axis_azimuth - 180 # translate input array tilt angle axis_tilt to [1] coordinate system. # In [1] coordinates, axis_tilt is a rotation about the x-axis. # For a system with array azimuth (y-axis) oriented south, # the x-axis is oriented west, and a positive axis_tilt is a # counterclockwise rotation, i.e, lifting the north edge of the panel. # Thus, in [1] coordinate system, in the northern hemisphere a positive # axis_tilt indicates a rotation toward the equator, # whereas in the southern hemisphere rotation toward the equator is # indicated by axis_tilt<0. Here, the input axis_tilt is # always positive and is a rotation toward the equator. # Calculate sun position (xp, yp, zp) in panel-oriented coordinate system: # positive y-axis is oriented along tracking axis at panel tilt; # positive x-axis is orthogonal, clockwise, parallel to earth surface; # positive z-axis is normal to x-y axes, pointed upward. # Calculate sun position (xp,yp,zp) in panel coordinates using [1] Eq 11 # note that equation for yp (y' in Eq. 11 of Lorenzo et al 2011) is # corrected, after conversation with paper's authors. xp = x*cosd(axis_azimuth_south) - y*sind(axis_azimuth_south) yp = (x*cosd(axis_tilt)*sind(axis_azimuth_south) + y*cosd(axis_tilt)*cosd(axis_azimuth_south) - z*sind(axis_tilt)) zp = (x*sind(axis_tilt)*sind(axis_azimuth_south) + y*sind(axis_tilt)*cosd(axis_azimuth_south) + z*cosd(axis_tilt)) # The ideal tracking angle wid is the rotation to place the sun position # vector (xp, yp, zp) in the (y, z) plane; i.e., normal to the panel and # containing the axis of rotation. wid = 0 indicates that the panel is # horizontal. Here, our convention is that a clockwise rotation is # positive, to view rotation angles in the same frame of reference as # azimuth. For example, for a system with tracking axis oriented south, # a rotation toward the east is negative, and a rotation to the west is # positive. # Use arctan2 and avoid the tmp corrections. # angle from x-y plane to projection of sun vector onto x-z plane # tmp = np.degrees(np.arctan(zp/xp)) # Obtain wid by translating tmp to convention for rotation angles. # Have to account for which quadrant of the x-z plane in which the sun # vector lies. Complete solution here but probably not necessary to # consider QIII and QIV. # wid = pd.Series(index=times) # wid[(xp>=0) & (zp>=0)] = 90 - tmp[(xp>=0) & (zp>=0)] # QI # wid[(xp<0) & (zp>=0)] = -90 - tmp[(xp<0) & (zp>=0)] # QII # wid[(xp<0) & (zp<0)] = -90 - tmp[(xp<0) & (zp<0)] # QIII # wid[(xp>=0) & (zp<0)] = 90 - tmp[(xp>=0) & (zp<0)] # QIV # Calculate angle from x-y plane to projection of sun vector onto x-z plane # and then obtain wid by translating tmp to convention for rotation angles. wid = 90 - np.degrees(np.arctan2(zp, xp)) # filter for sun above panel horizon zen_gt_90 = apparent_zenith > 90 wid[zen_gt_90] = np.nan # Account for backtracking; modified from [1] to account for rotation # angle convention being used here. if backtrack: axes_distance = 1/gcr temp = np.minimum(axes_distance*cosd(wid), 1) # backtrack angle # (always positive b/c acosd returns values between 0 and 180) wc = np.degrees(np.arccos(temp)) # Eq 4 applied when wid in QIV (wid < 0 evalulates True), QI tracker_theta = np.where(wid < 0, wid + wc, wid - wc) else: tracker_theta = wid tracker_theta[tracker_theta > max_angle] = max_angle tracker_theta[tracker_theta < -max_angle] = -max_angle # calculate panel normal vector in panel-oriented x, y, z coordinates. # y-axis is axis of tracker rotation. tracker_theta is a compass angle # (clockwise is positive) rather than a trigonometric angle. # the *0 is a trick to preserve NaN values. panel_norm = np.array([sind(tracker_theta), tracker_theta*0, cosd(tracker_theta)]) # sun position in vector format in panel-oriented x, y, z coordinates sun_vec = np.array([xp, yp, zp]) # calculate angle-of-incidence on panel aoi = np.degrees(np.arccos(np.abs(np.sum(sun_vec*panel_norm, axis=0)))) # calculate panel tilt and azimuth # in a coordinate system where the panel tilt is the # angle from horizontal, and the panel azimuth is # the compass angle (clockwise from north) to the projection # of the panel's normal to the earth's surface. # These outputs are provided for convenience and comparison # with other PV software which use these angle conventions. # project normal vector to earth surface. # First rotate about x-axis by angle -axis_tilt so that y-axis is # also parallel to earth surface, then project. # Calculate standard rotation matrix rot_x = np.array([[1, 0, 0], [0, cosd(-axis_tilt), -sind(-axis_tilt)], [0, sind(-axis_tilt), cosd(-axis_tilt)]]) # panel_norm_earth contains the normal vector # expressed in earth-surface coordinates # (z normal to surface, y aligned with tracker axis parallel to earth) panel_norm_earth = np.dot(rot_x, panel_norm).T # projection to plane tangent to earth surface, # in earth surface coordinates projected_normal = np.array([panel_norm_earth[:, 0], panel_norm_earth[:, 1], panel_norm_earth[:, 2]*0]).T # calculate vector magnitudes projected_normal_mag = np.sqrt(np.nansum(projected_normal**2, axis=1)) # renormalize the projected vector # avoid creating nan values. non_zeros = projected_normal_mag != 0 projected_normal[non_zeros] = (projected_normal[non_zeros].T / projected_normal_mag[non_zeros]).T # calculation of surface_azimuth # 1. Find the angle. # surface_azimuth = pd.Series( # np.degrees(np.arctan(projected_normal[:,1]/projected_normal[:,0])), # index=times) surface_azimuth = \ np.degrees(np.arctan2(projected_normal[:, 1], projected_normal[:, 0])) # 2. Clean up atan when x-coord or y-coord is zero # surface_azimuth[(projected_normal[:,0]==0) & (projected_normal[:,1]>0)] = 90 # surface_azimuth[(projected_normal[:,0]==0) & (projected_normal[:,1]<0)] = -90 # surface_azimuth[(projected_normal[:,1]==0) & (projected_normal[:,0]>0)] = 0 # surface_azimuth[(projected_normal[:,1]==0) & (projected_normal[:,0]<0)] = 180 # 3. Correct atan for QII and QIII # surface_azimuth[(projected_normal[:,0]<0) & (projected_normal[:,1]>0)] += 180 # QII # surface_azimuth[(projected_normal[:,0]<0) & (projected_normal[:,1]<0)] += 180 # QIII # 4. Skip to below # at this point surface_azimuth contains angles between -90 and +270, # where 0 is along the positive x-axis, # the y-axis is in the direction of the tracker azimuth, # and positive angles are rotations from the positive x axis towards # the positive y-axis. # Adjust to compass angles # (clockwise rotation from 0 along the positive y-axis) # surface_azimuth[surface_azimuth<=90] = 90 - surface_azimuth[surface_azimuth<=90] # surface_azimuth[surface_azimuth>90] = 450 - surface_azimuth[surface_azimuth>90] # finally rotate to align y-axis with true north # PVLIB_MATLAB has this latitude correction, # but I don't think it's latitude dependent if you always # specify axis_azimuth with respect to North. # if latitude > 0 or True: # surface_azimuth = surface_azimuth - axis_azimuth # else: # surface_azimuth = surface_azimuth - axis_azimuth - 180 # surface_azimuth[surface_azimuth<0] = 360 + surface_azimuth[surface_azimuth<0] # the commented code above is mostly part of PVLIB_MATLAB. # My (wholmgren) take is that it can be done more simply. # Say that we're pointing along the postive x axis (likely west). # We just need to rotate 90 degrees to get from the x axis # to the y axis (likely south), # and then add the axis_azimuth to get back to North. # Anything left over is the azimuth that we want, # and we can map it into the [0,360) domain. # 4. Rotate 0 reference from panel's x axis to it's y axis and # then back to North. surface_azimuth = 90 - surface_azimuth + axis_azimuth # 5. Map azimuth into [0,360) domain. surface_azimuth[surface_azimuth < 0] += 360 surface_azimuth[surface_azimuth >= 360] -= 360 # Calculate surface_tilt dotproduct = (panel_norm_earth * projected_normal).sum(axis=1) surface_tilt = 90 - np.degrees(np.arccos(dotproduct)) # Bundle DataFrame for return values and filter for sun below horizon. out = {'tracker_theta': tracker_theta, 'aoi': aoi, 'surface_azimuth': surface_azimuth, 'surface_tilt': surface_tilt} if index is not None: out = pd.DataFrame(out, index=index) out = out[['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']] out[zen_gt_90] = np.nan else: out = {k: np.where(zen_gt_90, np.nan, v) for k, v in out.items()} return out
[ "def", "singleaxis", "(", "apparent_zenith", ",", "apparent_azimuth", ",", "axis_tilt", "=", "0", ",", "axis_azimuth", "=", "0", ",", "max_angle", "=", "90", ",", "backtrack", "=", "True", ",", "gcr", "=", "2.0", "/", "7.0", ")", ":", "# MATLAB to Python c...
Determine the rotation angle of a single axis tracker using the equations in [1] when given a particular sun zenith and azimuth angle. backtracking may be specified, and if so, a ground coverage ratio is required. Rotation angle is determined in a panel-oriented coordinate system. The tracker azimuth axis_azimuth defines the positive y-axis; the positive x-axis is 90 degress clockwise from the y-axis and parallel to the earth surface, and the positive z-axis is normal and oriented towards the sun. Rotation angle tracker_theta indicates tracker position relative to horizontal: tracker_theta = 0 is horizontal, and positive tracker_theta is a clockwise rotation around the y axis in the x, y, z coordinate system. For example, if tracker azimuth axis_azimuth is 180 (oriented south), tracker_theta = 30 is a rotation of 30 degrees towards the west, and tracker_theta = -90 is a rotation to the vertical plane facing east. Parameters ---------- apparent_zenith : float, 1d array, or Series Solar apparent zenith angles in decimal degrees. apparent_azimuth : float, 1d array, or Series Solar apparent azimuth angles in decimal degrees. axis_tilt : float, default 0 The tilt of the axis of rotation (i.e, the y-axis defined by axis_azimuth) with respect to horizontal, in decimal degrees. axis_azimuth : float, default 0 A value denoting the compass direction along which the axis of rotation lies. Measured in decimal degrees East of North. max_angle : float, default 90 A value denoting the maximum rotation angle, in decimal degrees, of the one-axis tracker from its horizontal position (horizontal if axis_tilt = 0). A max_angle of 90 degrees allows the tracker to rotate to a vertical position to point the panel towards a horizon. max_angle of 180 degrees allows for full rotation. backtrack : bool, default True Controls whether the tracker has the capability to "backtrack" to avoid row-to-row shading. False denotes no backtrack capability. True denotes backtrack capability. gcr : float, default 2.0/7.0 A value denoting the ground coverage ratio of a tracker system which utilizes backtracking; i.e. the ratio between the PV array surface area to total ground area. A tracker system with modules 2 meters wide, centered on the tracking axis, with 6 meters between the tracking axes has a gcr of 2/6=0.333. If gcr is not provided, a gcr of 2/7 is default. gcr must be <=1. Returns ------- dict or DataFrame with the following columns: * tracker_theta: The rotation angle of the tracker. tracker_theta = 0 is horizontal, and positive rotation angles are clockwise. * aoi: The angle-of-incidence of direct irradiance onto the rotated panel surface. * surface_tilt: The angle between the panel surface and the earth surface, accounting for panel rotation. * surface_azimuth: The azimuth of the rotated panel, determined by projecting the vector normal to the panel's surface to the earth's surface. References ---------- [1] Lorenzo, E et al., 2011, "Tracking and back-tracking", Prog. in Photovoltaics: Research and Applications, v. 19, pp. 747-753.
[ "Determine", "the", "rotation", "angle", "of", "a", "single", "axis", "tracker", "using", "the", "equations", "in", "[", "1", "]", "when", "given", "a", "particular", "sun", "zenith", "and", "azimuth", "angle", ".", "backtracking", "may", "be", "specified", ...
python
train
sixty-north/asq
asq/queryables.py
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L1537-L1567
def first(self, predicate=None): '''The first element in a sequence (optionally satisfying a predicate). If the predicate is omitted or is None this query returns the first element in the sequence; otherwise, it returns the first element in the sequence for which the predicate evaluates to True. Exceptions are raised if there is no such element. Note: This method uses immediate execution. Args: predicate: An optional unary predicate function, the only argument to which is the element. The return value should be True for matching elements, otherwise False. If the predicate is omitted or None the first element of the source sequence will be returned. Returns: The first element of the sequence if predicate is None, otherwise the first element for which the predicate returns True. Raises: ValueError: If the Queryable is closed. ValueError: If the source sequence is empty. ValueError: If there are no elements matching the predicate. TypeError: If the predicate is not callable. ''' if self.closed(): raise ValueError("Attempt to call first() on a closed Queryable.") return self._first() if predicate is None else self._first_predicate(predicate)
[ "def", "first", "(", "self", ",", "predicate", "=", "None", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "raise", "ValueError", "(", "\"Attempt to call first() on a closed Queryable.\"", ")", "return", "self", ".", "_first", "(", ")", "if", "predica...
The first element in a sequence (optionally satisfying a predicate). If the predicate is omitted or is None this query returns the first element in the sequence; otherwise, it returns the first element in the sequence for which the predicate evaluates to True. Exceptions are raised if there is no such element. Note: This method uses immediate execution. Args: predicate: An optional unary predicate function, the only argument to which is the element. The return value should be True for matching elements, otherwise False. If the predicate is omitted or None the first element of the source sequence will be returned. Returns: The first element of the sequence if predicate is None, otherwise the first element for which the predicate returns True. Raises: ValueError: If the Queryable is closed. ValueError: If the source sequence is empty. ValueError: If there are no elements matching the predicate. TypeError: If the predicate is not callable.
[ "The", "first", "element", "in", "a", "sequence", "(", "optionally", "satisfying", "a", "predicate", ")", "." ]
python
train
erijo/tellcore-py
tellcore/telldus.py
https://github.com/erijo/tellcore-py/blob/7a1eb53e12ef039a2350933e502633df7560f6a8/tellcore/telldus.py#L478-L486
def value(self, datatype): """Return the :class:`SensorValue` for the given data type. sensor.value(TELLSTICK_TEMPERATURE) is identical to calling sensor.temperature(). """ value = self.lib.tdSensorValue( self.protocol, self.model, self.id, datatype) return SensorValue(datatype, value['value'], value['timestamp'])
[ "def", "value", "(", "self", ",", "datatype", ")", ":", "value", "=", "self", ".", "lib", ".", "tdSensorValue", "(", "self", ".", "protocol", ",", "self", ".", "model", ",", "self", ".", "id", ",", "datatype", ")", "return", "SensorValue", "(", "data...
Return the :class:`SensorValue` for the given data type. sensor.value(TELLSTICK_TEMPERATURE) is identical to calling sensor.temperature().
[ "Return", "the", ":", "class", ":", "SensorValue", "for", "the", "given", "data", "type", "." ]
python
train
acutesoftware/AIKIF
aikif/ontology/cyc_extract.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/cyc_extract.py#L75-L80
def create_sample_file(ip, op, num_lines): """ make a short version of an RDF file """ with open(ip, "rb") as f: with open(op, "wb") as fout: for _ in range(num_lines): fout.write(f.readline() )
[ "def", "create_sample_file", "(", "ip", ",", "op", ",", "num_lines", ")", ":", "with", "open", "(", "ip", ",", "\"rb\"", ")", "as", "f", ":", "with", "open", "(", "op", ",", "\"wb\"", ")", "as", "fout", ":", "for", "_", "in", "range", "(", "num_l...
make a short version of an RDF file
[ "make", "a", "short", "version", "of", "an", "RDF", "file" ]
python
train
helixyte/everest
everest/repositories/memory/cache.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/repositories/memory/cache.py#L118-L133
def retrieve(self, filter_expression=None, order_expression=None, slice_key=None): """ Retrieve entities from this cache, possibly after filtering, ordering and slicing. """ ents = iter(self.__entities) if not filter_expression is None: ents = filter_expression(ents) if not order_expression is None: # Ordering always involves a copy and conversion to a list, so # we have to wrap in an iterator. ents = iter(order_expression(ents)) if not slice_key is None: ents = islice(ents, slice_key.start, slice_key.stop) return ents
[ "def", "retrieve", "(", "self", ",", "filter_expression", "=", "None", ",", "order_expression", "=", "None", ",", "slice_key", "=", "None", ")", ":", "ents", "=", "iter", "(", "self", ".", "__entities", ")", "if", "not", "filter_expression", "is", "None", ...
Retrieve entities from this cache, possibly after filtering, ordering and slicing.
[ "Retrieve", "entities", "from", "this", "cache", "possibly", "after", "filtering", "ordering", "and", "slicing", "." ]
python
train
gwpy/gwpy
gwpy/types/io/hdf5.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L226-L233
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs): """Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords. """ if attrs is None: attrs = format_index_array_attrs(series) return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs)
[ "def", "write_hdf5_series", "(", "series", ",", "output", ",", "path", "=", "None", ",", "attrs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "format_index_array_attrs", "(", "series", ")", "return", ...
Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords.
[ "Write", "a", "Series", "to", "HDF5", "." ]
python
train
odlgroup/odl
odl/util/vectorization.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/vectorization.py#L62-L68
def out_shape_from_array(arr): """Get the output shape from an array.""" arr = np.asarray(arr) if arr.ndim == 1: return arr.shape else: return (arr.shape[1],)
[ "def", "out_shape_from_array", "(", "arr", ")", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "if", "arr", ".", "ndim", "==", "1", ":", "return", "arr", ".", "shape", "else", ":", "return", "(", "arr", ".", "shape", "[", "1", "]", ",", ...
Get the output shape from an array.
[ "Get", "the", "output", "shape", "from", "an", "array", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9149-L9163
def attitude_send(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, force_mavlink1=False): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right). time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) roll : Roll angle (rad, -pi..+pi) (float) pitch : Pitch angle (rad, -pi..+pi) (float) yaw : Yaw angle (rad, -pi..+pi) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) ''' return self.send(self.attitude_encode(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed), force_mavlink1=force_mavlink1)
[ "def", "attitude_send", "(", "self", ",", "time_boot_ms", ",", "roll", ",", "pitch", ",", "yaw", ",", "rollspeed", ",", "pitchspeed", ",", "yawspeed", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "attitu...
The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right). time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) roll : Roll angle (rad, -pi..+pi) (float) pitch : Pitch angle (rad, -pi..+pi) (float) yaw : Yaw angle (rad, -pi..+pi) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float)
[ "The", "attitude", "in", "the", "aeronautical", "frame", "(", "right", "-", "handed", "Z", "-", "down", "X", "-", "front", "Y", "-", "right", ")", "." ]
python
train
ungarj/mapchete
mapchete/formats/default/gtiff.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L271-L281
def open(self, tile, process, **kwargs): """ Open process output as input for other process. Parameters ---------- tile : ``Tile`` process : ``MapcheteProcess`` kwargs : keyword arguments """ return InputTile(tile, process, kwargs.get("resampling", None))
[ "def", "open", "(", "self", ",", "tile", ",", "process", ",", "*", "*", "kwargs", ")", ":", "return", "InputTile", "(", "tile", ",", "process", ",", "kwargs", ".", "get", "(", "\"resampling\"", ",", "None", ")", ")" ]
Open process output as input for other process. Parameters ---------- tile : ``Tile`` process : ``MapcheteProcess`` kwargs : keyword arguments
[ "Open", "process", "output", "as", "input", "for", "other", "process", "." ]
python
valid
Hackerfleet/hfos
hfos/tool/templates.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/templates.py#L27-L39
def format_template(template, content): """Render a given pystache template with given content""" import pystache result = u"" if True: # try: result = pystache.render(template, content, string_encoding='utf-8') # except (ValueError, KeyError) as e: # print("Templating error: %s %s" % (e, type(e))) # pprint(result) return result
[ "def", "format_template", "(", "template", ",", "content", ")", ":", "import", "pystache", "result", "=", "u\"\"", "if", "True", ":", "# try:", "result", "=", "pystache", ".", "render", "(", "template", ",", "content", ",", "string_encoding", "=", "'utf-8'",...
Render a given pystache template with given content
[ "Render", "a", "given", "pystache", "template", "with", "given", "content" ]
python
train
HDI-Project/MLBlocks
mlblocks/mlpipeline.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L276-L323
def to_dict(self): """Return all the details of this MLPipeline in a dict. The dict structure contains all the `__init__` arguments of the MLPipeline, as well as the current hyperparameter values and the specification of the tunable_hyperparameters:: { "primitives": [ "a_primitive", "another_primitive" ], "init_params": { "a_primitive": { "an_argument": "a_value" } }, "hyperparameters": { "a_primitive#1": { "an_argument": "a_value", "another_argument": "another_value", }, "another_primitive#1": { "yet_another_argument": "yet_another_value" } }, "tunable_hyperparameters": { "another_primitive#1": { "yet_another_argument": { "type": "str", "default": "a_default_value", "values": [ "a_default_value", "yet_another_value" ] } } } } """ return { 'primitives': self.primitives, 'init_params': self.init_params, 'input_names': self.input_names, 'output_names': self.output_names, 'hyperparameters': self.get_hyperparameters(), 'tunable_hyperparameters': self._tunable_hyperparameters }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'primitives'", ":", "self", ".", "primitives", ",", "'init_params'", ":", "self", ".", "init_params", ",", "'input_names'", ":", "self", ".", "input_names", ",", "'output_names'", ":", "self", ".", "...
Return all the details of this MLPipeline in a dict. The dict structure contains all the `__init__` arguments of the MLPipeline, as well as the current hyperparameter values and the specification of the tunable_hyperparameters:: { "primitives": [ "a_primitive", "another_primitive" ], "init_params": { "a_primitive": { "an_argument": "a_value" } }, "hyperparameters": { "a_primitive#1": { "an_argument": "a_value", "another_argument": "another_value", }, "another_primitive#1": { "yet_another_argument": "yet_another_value" } }, "tunable_hyperparameters": { "another_primitive#1": { "yet_another_argument": { "type": "str", "default": "a_default_value", "values": [ "a_default_value", "yet_another_value" ] } } } }
[ "Return", "all", "the", "details", "of", "this", "MLPipeline", "in", "a", "dict", "." ]
python
train
getpelican/pelican-plugins
slim/slim.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/slim/slim.py#L154-L173
def register(): """Plugin registration.""" if not plim: logger.warning('`slim` failed to load dependency `plim`. ' '`slim` plugin not loaded.') return if not mako: logger.warning('`slim` failed to load dependency `mako`. ' '`slim` plugin not loaded.') return if not bs: logger.warning('`slim` failed to load dependency `BeautifulSoup4`. ' '`slim` plugin not loaded.') return if not minify: logger.warning('`slim` failed to load dependency `htmlmin`. ' '`slim` plugin not loaded.') return signals.get_writer.connect(get_writer)
[ "def", "register", "(", ")", ":", "if", "not", "plim", ":", "logger", ".", "warning", "(", "'`slim` failed to load dependency `plim`. '", "'`slim` plugin not loaded.'", ")", "return", "if", "not", "mako", ":", "logger", ".", "warning", "(", "'`slim` failed to load d...
Plugin registration.
[ "Plugin", "registration", "." ]
python
train
v1k45/python-qBittorrent
qbittorrent/client.py
https://github.com/v1k45/python-qBittorrent/blob/04f9482a022dcc78c56b0b9acb9ca455f855ae24/qbittorrent/client.py#L609-L616
def toggle_sequential_download(self, infohash_list): """ Toggle sequential download in supplied torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('command/toggleSequentialDownload', data=data)
[ "def", "toggle_sequential_download", "(", "self", ",", "infohash_list", ")", ":", "data", "=", "self", ".", "_process_infohash_list", "(", "infohash_list", ")", "return", "self", ".", "_post", "(", "'command/toggleSequentialDownload'", ",", "data", "=", "data", ")...
Toggle sequential download in supplied torrents. :param infohash_list: Single or list() of infohashes.
[ "Toggle", "sequential", "download", "in", "supplied", "torrents", "." ]
python
train
heikomuller/sco-datastore
scodata/__init__.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/__init__.py#L200-L226
def experiments_fmri_delete(self, experiment_id): """Delete fMRI data object associated with given experiment. Raises ValueError if an attempt is made to delete a read-only resource. Parameters ---------- experiment_id : string Unique experiment identifier Returns ------- FMRIDataHandle Handle for deleted data object or None if experiment is unknown or has no fMRI data object associated with it """ # Get experiment fMRI to ensure that it exists fmri = self.experiments_fmri_get(experiment_id) if fmri is None: return None # Delete reference fMRI data object and set reference in experiment to # None. If the result of delete fMRI object is None we return None. # Alternatively, throw an exception to signal invalid database state. fmri = self.funcdata.delete_object(fmri.identifier) if not fmri is None: self.experiments.update_fmri_data(experiment_id, None) return funcdata.FMRIDataHandle(fmri, experiment_id)
[ "def", "experiments_fmri_delete", "(", "self", ",", "experiment_id", ")", ":", "# Get experiment fMRI to ensure that it exists", "fmri", "=", "self", ".", "experiments_fmri_get", "(", "experiment_id", ")", "if", "fmri", "is", "None", ":", "return", "None", "# Delete r...
Delete fMRI data object associated with given experiment. Raises ValueError if an attempt is made to delete a read-only resource. Parameters ---------- experiment_id : string Unique experiment identifier Returns ------- FMRIDataHandle Handle for deleted data object or None if experiment is unknown or has no fMRI data object associated with it
[ "Delete", "fMRI", "data", "object", "associated", "with", "given", "experiment", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L2270-L2295
def _construct_w(self, inputs): """Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1]. """ weight_shape = self._kernel_shape + (1, 1) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2], dtype=inputs.dtype) w = tf.get_variable("w", shape=weight_shape, dtype=inputs.dtype, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) return w
[ "def", "_construct_w", "(", "self", ",", "inputs", ")", ":", "weight_shape", "=", "self", ".", "_kernel_shape", "+", "(", "1", ",", "1", ")", "if", "\"w\"", "not", "in", "self", ".", "_initializers", ":", "self", ".", "_initializers", "[", "\"w\"", "]"...
Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1].
[ "Construct", "the", "convolution", "weight", "matrix", "." ]
python
train
aaugustin/django-sesame
sesame/backends.py
https://github.com/aaugustin/django-sesame/blob/4bf902c637b0674b8fc78bc95937271948c99282/sesame/backends.py#L58-L67
def unsign(self, token): """ Extract the data from a signed ``token``. """ if self.max_age is None: data = self.signer.unsign(token) else: data = self.signer.unsign(token, max_age=self.max_age) return signing.b64_decode(data.encode())
[ "def", "unsign", "(", "self", ",", "token", ")", ":", "if", "self", ".", "max_age", "is", "None", ":", "data", "=", "self", ".", "signer", ".", "unsign", "(", "token", ")", "else", ":", "data", "=", "self", ".", "signer", ".", "unsign", "(", "tok...
Extract the data from a signed ``token``.
[ "Extract", "the", "data", "from", "a", "signed", "token", "." ]
python
train
bwohlberg/sporco
docs/source/docntbk.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L914-L959
def substitute_ref_with_url(self, txt): """ In the string `txt`, replace sphinx references with corresponding links to online docs. """ # Find sphinx cross-references mi = re.finditer(r':([^:]+):`([^`]+)`', txt) if mi: # Iterate over match objects in iterator returned by re.finditer for mo in mi: # Initialize link label and url for substitution lbl = None url = None # Get components of current match: full matching text, the # role label in the reference, and the name of the # referenced type mtxt = mo.group(0) role = mo.group(1) name = mo.group(2) # If role is 'ref', the name component is in the form # label <name> if role == 'ref': ma = re.match(r'\s*([^\s<]+)\s*<([^>]+)+>', name) if ma: name = ma.group(2) lbl = ma.group(1) # Try to look up the current cross-reference. Issue a # warning if the lookup fails, and do the substitution # if it succeeds. try: url = self.get_docs_url(role, name) if role != 'ref': lbl = self.get_docs_label(role, name) except KeyError as ex: if len(ex.args) == 1 or ex.args[1] != 'role': print('Warning: %s' % ex.args[0]) else: # If the cross-reference lookup was successful, replace # it with an appropriate link to the online docs rtxt = '[%s](%s)' % (lbl, url) txt = re.sub(mtxt, rtxt, txt, flags=re.M) return txt
[ "def", "substitute_ref_with_url", "(", "self", ",", "txt", ")", ":", "# Find sphinx cross-references", "mi", "=", "re", ".", "finditer", "(", "r':([^:]+):`([^`]+)`'", ",", "txt", ")", "if", "mi", ":", "# Iterate over match objects in iterator returned by re.finditer", "...
In the string `txt`, replace sphinx references with corresponding links to online docs.
[ "In", "the", "string", "txt", "replace", "sphinx", "references", "with", "corresponding", "links", "to", "online", "docs", "." ]
python
train
SmokinCaterpillar/pypet
examples/example_17_wrapping_an_existing_project/pypetwrap.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_17_wrapping_an_existing_project/pypetwrap.py#L34-L49
def wrap_automaton(traj): """ Simple wrapper function for compatibility with *pypet*. We will call the original simulation functions with data extracted from ``traj``. The resulting automaton patterns wil also be stored into the trajectory. :param traj: Trajectory container for data """ # Make initial state initial_state = make_initial_state(traj.initial_name, traj.ncells, traj.seed) # Run simulation pattern = cellular_automaton_1D(initial_state, traj.rule_number, traj.steps) # Store the computed pattern traj.f_add_result('pattern', pattern, comment='Development of CA over time')
[ "def", "wrap_automaton", "(", "traj", ")", ":", "# Make initial state", "initial_state", "=", "make_initial_state", "(", "traj", ".", "initial_name", ",", "traj", ".", "ncells", ",", "traj", ".", "seed", ")", "# Run simulation", "pattern", "=", "cellular_automaton...
Simple wrapper function for compatibility with *pypet*. We will call the original simulation functions with data extracted from ``traj``. The resulting automaton patterns wil also be stored into the trajectory. :param traj: Trajectory container for data
[ "Simple", "wrapper", "function", "for", "compatibility", "with", "*", "pypet", "*", "." ]
python
test
wavycloud/pyboto3
pyboto3/rds.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/rds.py#L8639-L8941
def restore_db_instance_from_db_snapshot(DBInstanceIdentifier=None, DBSnapshotIdentifier=None, DBInstanceClass=None, Port=None, AvailabilityZone=None, DBSubnetGroupName=None, MultiAZ=None, PubliclyAccessible=None, AutoMinorVersionUpgrade=None, LicenseModel=None, DBName=None, Engine=None, Iops=None, OptionGroupName=None, Tags=None, StorageType=None, TdeCredentialArn=None, TdeCredentialPassword=None, Domain=None, CopyTagsToSnapshot=None, DomainIAMRoleName=None, EnableIAMDatabaseAuthentication=None): """ Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment. If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. See also: AWS API Documentation :example: response = client.restore_db_instance_from_db_snapshot( DBInstanceIdentifier='string', DBSnapshotIdentifier='string', DBInstanceClass='string', Port=123, AvailabilityZone='string', DBSubnetGroupName='string', MultiAZ=True|False, PubliclyAccessible=True|False, AutoMinorVersionUpgrade=True|False, LicenseModel='string', DBName='string', Engine='string', Iops=123, OptionGroupName='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ], StorageType='string', TdeCredentialArn='string', TdeCredentialPassword='string', Domain='string', CopyTagsToSnapshot=True|False, DomainIAMRoleName='string', EnableIAMDatabaseAuthentication=True|False ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server) First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens Example: my-snapshot-id :type DBSnapshotIdentifier: string :param DBSnapshotIdentifier: [REQUIRED] The identifier for the DB snapshot to restore from. Constraints: Must contain from 1 to 255 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. :type DBInstanceClass: string :param DBInstanceClass: The compute and memory capacity of the Amazon RDS DB instance. Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large :type Port: integer :param Port: The port number on which the database accepts connections. Default: The same port as the original DB instance Constraints: Value must be 1150-65535 :type AvailabilityZone: string :param AvailabilityZone: The EC2 Availability Zone that the database instance will be created in. Default: A random, system-chosen Availability Zone. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true . Example: us-east-1a :type DBSubnetGroupName: string :param DBSubnetGroupName: The DB subnet group name to use for the new instance. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default. Example: mySubnetgroup :type MultiAZ: boolean :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true . :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. Default VPC: true VPC: false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. :type LicenseModel: string :param LicenseModel: License model information for the restored DB instance. Default: Same as source. Valid values: license-included | bring-your-own-license | general-public-license :type DBName: string :param DBName: The database name for the restored DB instance. Note This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines. :type Engine: string :param Engine: The database engine to use for the new instance. Default: The same as source Constraint: Must be compatible with the engine of the source. You can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora :type Iops: integer :param Iops: Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts. Constraints: Must be an integer greater than 1000. SQL Server Setting the IOPS value for the SQL Server database engine is not supported. :type OptionGroupName: string :param OptionGroupName: The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type Tags: list :param Tags: A list of tags. (dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair. Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type StorageType: string :param StorageType: Specifies the storage type to be associated with the DB instance. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type TdeCredentialArn: string :param TdeCredentialArn: The ARN from the Key Store with which to associate the instance for TDE encryption. :type TdeCredentialPassword: string :param TdeCredentialPassword: The password for the given ARN from the Key Store in order to access the device. :type Domain: string :param Domain: Specify the Active Directory Domain to restore the instance in. :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false. :type DomainIAMRoleName: string :param DomainIAMRoleName: Specify the name of the IAM role to be used when making API calls to the Directory Service. :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher. Default: false :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: CreateDBInstance DeleteDBInstance ModifyDBInstance """ pass
[ "def", "restore_db_instance_from_db_snapshot", "(", "DBInstanceIdentifier", "=", "None", ",", "DBSnapshotIdentifier", "=", "None", ",", "DBInstanceClass", "=", "None", ",", "Port", "=", "None", ",", "AvailabilityZone", "=", "None", ",", "DBSubnetGroupName", "=", "No...
Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment. If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. See also: AWS API Documentation :example: response = client.restore_db_instance_from_db_snapshot( DBInstanceIdentifier='string', DBSnapshotIdentifier='string', DBInstanceClass='string', Port=123, AvailabilityZone='string', DBSubnetGroupName='string', MultiAZ=True|False, PubliclyAccessible=True|False, AutoMinorVersionUpgrade=True|False, LicenseModel='string', DBName='string', Engine='string', Iops=123, OptionGroupName='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ], StorageType='string', TdeCredentialArn='string', TdeCredentialPassword='string', Domain='string', CopyTagsToSnapshot=True|False, DomainIAMRoleName='string', EnableIAMDatabaseAuthentication=True|False ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server) First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens Example: my-snapshot-id :type DBSnapshotIdentifier: string :param DBSnapshotIdentifier: [REQUIRED] The identifier for the DB snapshot to restore from. Constraints: Must contain from 1 to 255 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. :type DBInstanceClass: string :param DBInstanceClass: The compute and memory capacity of the Amazon RDS DB instance. Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large :type Port: integer :param Port: The port number on which the database accepts connections. Default: The same port as the original DB instance Constraints: Value must be 1150-65535 :type AvailabilityZone: string :param AvailabilityZone: The EC2 Availability Zone that the database instance will be created in. Default: A random, system-chosen Availability Zone. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true . Example: us-east-1a :type DBSubnetGroupName: string :param DBSubnetGroupName: The DB subnet group name to use for the new instance. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default. Example: mySubnetgroup :type MultiAZ: boolean :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true . :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case. Default VPC: true VPC: false If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. :type LicenseModel: string :param LicenseModel: License model information for the restored DB instance. Default: Same as source. Valid values: license-included | bring-your-own-license | general-public-license :type DBName: string :param DBName: The database name for the restored DB instance. Note This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines. :type Engine: string :param Engine: The database engine to use for the new instance. Default: The same as source Constraint: Must be compatible with the engine of the source. You can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora :type Iops: integer :param Iops: Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts. Constraints: Must be an integer greater than 1000. SQL Server Setting the IOPS value for the SQL Server database engine is not supported. :type OptionGroupName: string :param OptionGroupName: The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type Tags: list :param Tags: A list of tags. (dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair. Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type StorageType: string :param StorageType: Specifies the storage type to be associated with the DB instance. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type TdeCredentialArn: string :param TdeCredentialArn: The ARN from the Key Store with which to associate the instance for TDE encryption. :type TdeCredentialPassword: string :param TdeCredentialPassword: The password for the given ARN from the Key Store in order to access the device. :type Domain: string :param Domain: Specify the Active Directory Domain to restore the instance in. :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false. :type DomainIAMRoleName: string :param DomainIAMRoleName: Specify the name of the IAM role to be used when making API calls to the Directory Service. :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher. Default: false :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: CreateDBInstance DeleteDBInstance ModifyDBInstance
[ "Creates", "a", "new", "DB", "instance", "from", "a", "DB", "snapshot", ".", "The", "target", "database", "is", "created", "from", "the", "source", "database", "restore", "point", "with", "the", "most", "of", "original", "configuration", "with", "the", "defa...
python
train
mwickert/scikit-dsp-comm
sk_dsp_comm/fir_design_helper.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L373-L396
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR bandstop filter using remez with order determination. The filter order is determined based on f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop # Initially make sure the number of taps is even so N_bump needs to be odd if np.mod(n,2) != 0: n += 1 N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2, maxiter = 25, grid_density = 16) print('N_bump must be odd to maintain odd filter length') print('Remez filter taps = %d.' % N_taps) return b
[ "def", "fir_remez_bsf", "(", "f_pass1", ",", "f_stop1", ",", "f_stop2", ",", "f_pass2", ",", "d_pass", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "5", ")", ":", "n", ",", "ff", ",", "aa", ",", "wts", "=", "bandstop_order", "(", "f_pa...
Design an FIR bandstop filter using remez with order determination. The filter order is determined based on f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018
[ "Design", "an", "FIR", "bandstop", "filter", "using", "remez", "with", "order", "determination", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass1", "Hz", "f_stop1", "Hz", "f_stop2", "Hz", "f_pass2", "Hz", "and", "the", "desired", "p...
python
valid
arista-eosplus/pyeapi
pyeapi/api/staticroute.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/staticroute.py#L182-L201
def create(self, ip_dest, next_hop, **kwargs): """Create a static route Args: ip_dest (string): The ip address of the destination in the form of A.B.C.D/E next_hop (string): The next hop interface or ip address **kwargs['next_hop_ip'] (string): The next hop address on destination interface **kwargs['distance'] (string): Administrative distance for this route **kwargs['tag'] (string): Route tag **kwargs['route_name'] (string): Route name Returns: True if the operation succeeds, otherwise False. """ # Call _set_route with delete and default set to False return self._set_route(ip_dest, next_hop, **kwargs)
[ "def", "create", "(", "self", ",", "ip_dest", ",", "next_hop", ",", "*", "*", "kwargs", ")", ":", "# Call _set_route with delete and default set to False", "return", "self", ".", "_set_route", "(", "ip_dest", ",", "next_hop", ",", "*", "*", "kwargs", ")" ]
Create a static route Args: ip_dest (string): The ip address of the destination in the form of A.B.C.D/E next_hop (string): The next hop interface or ip address **kwargs['next_hop_ip'] (string): The next hop address on destination interface **kwargs['distance'] (string): Administrative distance for this route **kwargs['tag'] (string): Route tag **kwargs['route_name'] (string): Route name Returns: True if the operation succeeds, otherwise False.
[ "Create", "a", "static", "route" ]
python
train