repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
jart/fabulous
fabulous/casts.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/casts.py#L32-L42
def file(value, **kwarg): """value should be a path to file in the filesystem. returns a file object """ #a bit weird, but I don't want to hard code default values try: f = open(value, **kwarg) except IOError as e: raise ValueError("unable to open %s : %s" % (path.abspath(value), e)) return f
[ "def", "file", "(", "value", ",", "*", "*", "kwarg", ")", ":", "#a bit weird, but I don't want to hard code default values", "try", ":", "f", "=", "open", "(", "value", ",", "*", "*", "kwarg", ")", "except", "IOError", "as", "e", ":", "raise", "ValueError", ...
value should be a path to file in the filesystem. returns a file object
[ "value", "should", "be", "a", "path", "to", "file", "in", "the", "filesystem", ".", "returns", "a", "file", "object" ]
python
train
COALAIP/pycoalaip
coalaip/model_validators.py
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L21-L42
def does_not_contain(*avoid_keys, error_cls=ValueError): """Decorator: value must not contain any of the :attr:`avoid_keys`. """ def decorator(func): def not_contains(instance, attribute, value): instance_name = instance.__class__.__name__ num_matched_keys = len(set(avoid_keys) & value.keys()) if num_matched_keys > 0: avoid_keys_str = ', '.join(avoid_keys) err_str = ("Given keys ({num_matched} of {{avoid_keys}} " "that must not be given in the '{attr}' of a " "'{cls}'").format(num_matched=num_matched_keys, avoid_keys=avoid_keys_str, attr=attribute.name, cls=instance_name) raise error_cls(err_str) return func(instance, attribute, value) return not_contains return decorator
[ "def", "does_not_contain", "(", "*", "avoid_keys", ",", "error_cls", "=", "ValueError", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "not_contains", "(", "instance", ",", "attribute", ",", "value", ")", ":", "instance_name", "=", "instance", ...
Decorator: value must not contain any of the :attr:`avoid_keys`.
[ "Decorator", ":", "value", "must", "not", "contain", "any", "of", "the", ":", "attr", ":", "avoid_keys", "." ]
python
train
rouk1/django-image-renderer
renderer/models.py
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L49-L64
def get_rendition_size(self, width=0, height=0): '''returns real rendition URL''' if width == 0 and height == 0: return (self.master_width, self.master_height) target_width = int(width) target_height = int(height) ratio = self.master_width / float(self.master_height) if target_height == 0 and target_width != 0: target_height = int(target_width / ratio) if target_height != 0 and target_width == 0: target_width = int(target_height * ratio) return target_width, target_height
[ "def", "get_rendition_size", "(", "self", ",", "width", "=", "0", ",", "height", "=", "0", ")", ":", "if", "width", "==", "0", "and", "height", "==", "0", ":", "return", "(", "self", ".", "master_width", ",", "self", ".", "master_height", ")", "targe...
returns real rendition URL
[ "returns", "real", "rendition", "URL" ]
python
train
python-odin/odinweb
odinweb/signing.py
https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/signing.py#L49-L67
def sign_url_path(url, secret_key, expire_in=None, digest=None): # type: (str, bytes, int, Callable) -> str """ Sign a URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :param expire_in: Expiry time. :param digest: Specify the digest function to use; default is sha256 from hashlib :return: Signed URL """ result = urlparse(url) query_args = MultiValueDict(parse_qs(result.query)) query_args['_'] = token() if expire_in is not None: query_args['expires'] = int(time() + expire_in) query_args['signature'] = _generate_signature(result.path, secret_key, query_args, digest) return "%s?%s" % (result.path, urlencode(list(query_args.sorteditems(True))))
[ "def", "sign_url_path", "(", "url", ",", "secret_key", ",", "expire_in", "=", "None", ",", "digest", "=", "None", ")", ":", "# type: (str, bytes, int, Callable) -> str", "result", "=", "urlparse", "(", "url", ")", "query_args", "=", "MultiValueDict", "(", "parse...
Sign a URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :param expire_in: Expiry time. :param digest: Specify the digest function to use; default is sha256 from hashlib :return: Signed URL
[ "Sign", "a", "URL", "(", "excluding", "the", "domain", "and", "scheme", ")", "." ]
python
train
has2k1/plotnine
plotnine/aes.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/aes.py#L169-L177
def get_calculated_aes(aesthetics): """ Return a list of the aesthetics that are calculated """ calculated_aesthetics = [] for name, value in aesthetics.items(): if is_calculated_aes(value): calculated_aesthetics.append(name) return calculated_aesthetics
[ "def", "get_calculated_aes", "(", "aesthetics", ")", ":", "calculated_aesthetics", "=", "[", "]", "for", "name", ",", "value", "in", "aesthetics", ".", "items", "(", ")", ":", "if", "is_calculated_aes", "(", "value", ")", ":", "calculated_aesthetics", ".", "...
Return a list of the aesthetics that are calculated
[ "Return", "a", "list", "of", "the", "aesthetics", "that", "are", "calculated" ]
python
train
saltstack/salt
salt/modules/opkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L1368-L1376
def _set_trusted_option_if_needed(repostr, trusted): ''' Set trusted option to repo if needed ''' if trusted is True: repostr += ' [trusted=yes]' elif trusted is False: repostr += ' [trusted=no]' return repostr
[ "def", "_set_trusted_option_if_needed", "(", "repostr", ",", "trusted", ")", ":", "if", "trusted", "is", "True", ":", "repostr", "+=", "' [trusted=yes]'", "elif", "trusted", "is", "False", ":", "repostr", "+=", "' [trusted=no]'", "return", "repostr" ]
Set trusted option to repo if needed
[ "Set", "trusted", "option", "to", "repo", "if", "needed" ]
python
train
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L3203-L3209
def updateObservers(self): """ Fire update method in all attached observers in order of attachment. """ for observer in self.m_observers: try: observer.update(self.m_req) except: ekm_log(traceback.format_exc(sys.exc_info()))
[ "def", "updateObservers", "(", "self", ")", ":", "for", "observer", "in", "self", ".", "m_observers", ":", "try", ":", "observer", ".", "update", "(", "self", ".", "m_req", ")", "except", ":", "ekm_log", "(", "traceback", ".", "format_exc", "(", "sys", ...
Fire update method in all attached observers in order of attachment.
[ "Fire", "update", "method", "in", "all", "attached", "observers", "in", "order", "of", "attachment", "." ]
python
test
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L346-L375
def autoLayout( self, padX = None, padY = None, direction = Qt.Horizontal, layout = 'Layered', animate = 0, centerOn = None, center = None, debug=False ): """ Automatically lays out all the nodes in the scene using the \ autoLayoutNodes method. :param padX | <int> || None | default is 2 * cell width padY | <int> || None | default is 2 * cell height direction | <Qt.Direction> layout | <str> | name of the layout plugin to use animate | <int> | number of seconds to animate over :return {<XNode>: <QRectF>, ..} | new rects per affected node """ return self.autoLayoutNodes(self.nodes(), padX, padY, direction, layout, animate, centerOn, center, debug)
[ "def", "autoLayout", "(", "self", ",", "padX", "=", "None", ",", "padY", "=", "None", ",", "direction", "=", "Qt", ".", "Horizontal", ",", "layout", "=", "'Layered'", ",", "animate", "=", "0", ",", "centerOn", "=", "None", ",", "center", "=", "None",...
Automatically lays out all the nodes in the scene using the \ autoLayoutNodes method. :param padX | <int> || None | default is 2 * cell width padY | <int> || None | default is 2 * cell height direction | <Qt.Direction> layout | <str> | name of the layout plugin to use animate | <int> | number of seconds to animate over :return {<XNode>: <QRectF>, ..} | new rects per affected node
[ "Automatically", "lays", "out", "all", "the", "nodes", "in", "the", "scene", "using", "the", "\\", "autoLayoutNodes", "method", ".", ":", "param", "padX", "|", "<int", ">", "||", "None", "|", "default", "is", "2", "*", "cell", "width", "padY", "|", "<i...
python
train
defunkt/pystache
pystache/init.py
https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/init.py#L13-L19
def render(template, context=None, **kwargs): """ Return the given template string rendered using the given context. """ renderer = Renderer() return renderer.render(template, context, **kwargs)
[ "def", "render", "(", "template", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "renderer", "=", "Renderer", "(", ")", "return", "renderer", ".", "render", "(", "template", ",", "context", ",", "*", "*", "kwargs", ")" ]
Return the given template string rendered using the given context.
[ "Return", "the", "given", "template", "string", "rendered", "using", "the", "given", "context", "." ]
python
train
oseledets/ttpy
tt/solvers.py
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/solvers.py#L10-L95
def GMRES(A, u_0, b, eps=1e-6, maxit=100, m=20, _iteration=0, callback=None, verbose=0): """ Flexible TT GMRES :param A: matvec(x[, eps]) :param u_0: initial vector :param b: answer :param maxit: max number of iterations :param eps: required accuracy :param m: number of iteration without restart :param _iteration: iteration counter :param callback: :param verbose: to print debug info or not :return: answer, residual >>> from tt import GMRES >>> def matvec(x, eps): >>> return tt.matvec(S, x).round(eps) >>> answer, res = GMRES(matvec, u_0, b, eps=1e-8) """ maxitexceeded = False converged = False if verbose: print('GMRES(m=%d, _iteration=%d, maxit=%d)' % (m, _iteration, maxit)) v = np.ones((m + 1), dtype=object) * np.nan R = np.ones((m, m)) * np.nan g = np.zeros(m) s = np.ones(m) * np.nan c = np.ones(m) * np.nan v[0] = b - A(u_0, eps=eps) v[0] = v[0].round(eps) resnorm = v[0].norm() curr_beta = resnorm bnorm = b.norm() wlen = resnorm q = m for j in range(m): _iteration += 1 delta = eps / (curr_beta / resnorm) if verbose: print("it = %d delta = " % _iteration, delta) v[j] *= 1.0 / wlen v[j + 1] = A(v[j], eps=delta) for i in range(j + 1): R[i, j] = tt.dot(v[j + 1], v[i]) v[j + 1] = v[j + 1] - R[i, j] * v[i] v[j + 1] = v[j + 1].round(delta) wlen = v[j + 1].norm() for i in range(j): r1 = R[i, j] r2 = R[i + 1, j] R[i, j] = c[i] * r1 - s[i] * r2 R[i + 1, j] = c[i] * r2 + s[i] * r1 denom = np.hypot(wlen, R[j, j]) s[j] = wlen / denom c[j] = -R[j, j] / denom R[j, j] = -denom g[j] = c[j] * curr_beta curr_beta *= s[j] if verbose: print("it = {}, ||r|| = {}".format(_iteration, curr_beta / bnorm)) converged = (curr_beta / bnorm) < eps or (curr_beta / resnorm) < eps maxitexceeded = _iteration >= maxit if converged or maxitexceeded: q = j + 1 break y = la.solve_triangular(R[:q, :q], g[:q], check_finite=False) for idx in range(q): u_0 += v[idx] * y[idx] u_0 = u_0.round(eps) if callback is not None: callback(u_0) if converged or maxitexceeded: return u_0, resnorm / bnorm return GMRES(A, u_0, b, eps, maxit, m, _iteration, callback=callback, verbose=verbose)
[ "def", "GMRES", "(", "A", ",", "u_0", ",", "b", ",", "eps", "=", "1e-6", ",", "maxit", "=", "100", ",", "m", "=", "20", ",", "_iteration", "=", "0", ",", "callback", "=", "None", ",", "verbose", "=", "0", ")", ":", "maxitexceeded", "=", "False"...
Flexible TT GMRES :param A: matvec(x[, eps]) :param u_0: initial vector :param b: answer :param maxit: max number of iterations :param eps: required accuracy :param m: number of iteration without restart :param _iteration: iteration counter :param callback: :param verbose: to print debug info or not :return: answer, residual >>> from tt import GMRES >>> def matvec(x, eps): >>> return tt.matvec(S, x).round(eps) >>> answer, res = GMRES(matvec, u_0, b, eps=1e-8)
[ "Flexible", "TT", "GMRES", ":", "param", "A", ":", "matvec", "(", "x", "[", "eps", "]", ")", ":", "param", "u_0", ":", "initial", "vector", ":", "param", "b", ":", "answer", ":", "param", "maxit", ":", "max", "number", "of", "iterations", ":", "par...
python
train
quikmile/trellio
trellio/services.py
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/services.py#L105-L118
def api(func=None, timeout=API_TIMEOUT): # incoming """ provide a request/response api receives any requests here and return value is the response all functions must have the following signature - request_id - entity (partition/routing key) followed by kwargs """ if func is None: return partial(api, timeout=timeout) else: wrapper = _get_api_decorator(func=func, timeout=timeout) return wrapper
[ "def", "api", "(", "func", "=", "None", ",", "timeout", "=", "API_TIMEOUT", ")", ":", "# incoming", "if", "func", "is", "None", ":", "return", "partial", "(", "api", ",", "timeout", "=", "timeout", ")", "else", ":", "wrapper", "=", "_get_api_decorator", ...
provide a request/response api receives any requests here and return value is the response all functions must have the following signature - request_id - entity (partition/routing key) followed by kwargs
[ "provide", "a", "request", "/", "response", "api", "receives", "any", "requests", "here", "and", "return", "value", "is", "the", "response", "all", "functions", "must", "have", "the", "following", "signature", "-", "request_id", "-", "entity", "(", "partition"...
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/writer.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/writer.py#L163-L170
def Forget(self, obj): '''Forget we've seen this object. ''' obj = _get_idstr(obj) try: self.memo.remove(obj) except ValueError: pass
[ "def", "Forget", "(", "self", ",", "obj", ")", ":", "obj", "=", "_get_idstr", "(", "obj", ")", "try", ":", "self", ".", "memo", ".", "remove", "(", "obj", ")", "except", "ValueError", ":", "pass" ]
Forget we've seen this object.
[ "Forget", "we", "ve", "seen", "this", "object", "." ]
python
train
RealGeeks/batman
batman/path_utils.py
https://github.com/RealGeeks/batman/blob/ac61d193cbc6cc736f61ae8cf5e933a576b50698/batman/path_utils.py#L4-L26
def normalize_path(path, basedir=None): """ Just a utility function that will both expand user paths if they are there: >>> normalize_path("~/ssh") '/home/kevin/ssh' but won't if they are not >>> normalize_path("ssh") 'ssh' and you can pass a basepath to combine >>> normalize_path("ssh","foo") '/foo/ssh' and wont' mess up absolute paths: >>> normalize_path("/home/kevin/ssh") '/home/kevin/ssh' """ out = os.path.expanduser(path) if not os.path.isabs(path) and basedir: out = os.path.join(basedir, out) return out
[ "def", "normalize_path", "(", "path", ",", "basedir", "=", "None", ")", ":", "out", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", "and", "basedir", ":", "out", "=", "o...
Just a utility function that will both expand user paths if they are there: >>> normalize_path("~/ssh") '/home/kevin/ssh' but won't if they are not >>> normalize_path("ssh") 'ssh' and you can pass a basepath to combine >>> normalize_path("ssh","foo") '/foo/ssh' and wont' mess up absolute paths: >>> normalize_path("/home/kevin/ssh") '/home/kevin/ssh'
[ "Just", "a", "utility", "function", "that", "will", "both", "expand", "user", "paths", "if", "they", "are", "there", ":", ">>>", "normalize_path", "(", "~", "/", "ssh", ")", "/", "home", "/", "kevin", "/", "ssh" ]
python
train
paramiko/paramiko
paramiko/agent.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/agent.py#L167-L180
def get_connection(self): """ Return a pair of socket object and string address. May block! """ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: conn.bind(self._agent._get_filename()) conn.listen(1) (r, addr) = conn.accept() return r, addr except: raise
[ "def", "get_connection", "(", "self", ")", ":", "conn", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "conn", ".", "bind", "(", "self", ".", "_agent", ".", "_get_filename", "(", ")", ...
Return a pair of socket object and string address. May block!
[ "Return", "a", "pair", "of", "socket", "object", "and", "string", "address", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L86-L119
def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True): """ Adds a directed edge going from head_id to tail_id. Arbitrary data can be attached to the edge via edge_data. It may create the nodes if adding edges between nonexisting ones. :param head_id: head node :param tail_id: tail node :param edge_data: (optional) data attached to the edge :param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist """ # shorcut edge = self.next_edge # add nodes if on automatic node creation if create_nodes: self.add_node(head_id) self.add_node(tail_id) # update the corresponding incoming and outgoing lists in the nodes # index 0 -> incoming edges # index 1 -> outgoing edges try: self.nodes[tail_id][0].append(edge) self.nodes[head_id][1].append(edge) except KeyError: raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id)) # store edge information self.edges[edge] = (head_id, tail_id, edge_data) self.next_edge += 1
[ "def", "add_edge", "(", "self", ",", "head_id", ",", "tail_id", ",", "edge_data", "=", "1", ",", "create_nodes", "=", "True", ")", ":", "# shorcut", "edge", "=", "self", ".", "next_edge", "# add nodes if on automatic node creation", "if", "create_nodes", ":", ...
Adds a directed edge going from head_id to tail_id. Arbitrary data can be attached to the edge via edge_data. It may create the nodes if adding edges between nonexisting ones. :param head_id: head node :param tail_id: tail node :param edge_data: (optional) data attached to the edge :param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist
[ "Adds", "a", "directed", "edge", "going", "from", "head_id", "to", "tail_id", ".", "Arbitrary", "data", "can", "be", "attached", "to", "the", "edge", "via", "edge_data", ".", "It", "may", "create", "the", "nodes", "if", "adding", "edges", "between", "nonex...
python
train
NuGrid/NuGridPy
nugridpy/ppn.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ppn.py#L209-L273
def plot_xtime(self, y, x='time', label='default', labelx=None, labely=None ,title=None, shape='.', logx=False, logy=True, base=10): ''' make a simple plot of two columns against each other. An example would be instance.plot_xtime('PB206', label='PB206 vs t_y' Recomend using the plot function DataPlot.plot() it has more functionality. Parameters ---------- Y : string Column on Y-axis. X : string, optional Column on X-axis. The default is "time". label : string, optional Legend label. The default is "default". labelX : string, optional The label on the X axis. The default is None. labelY : string, optional The label on the Y axis. The default is None. title : string, optional The Title of the Graph. The default is None. shape : string, optional What shape and colour the user would like their plot in. The default is '.'. logX : boolean, optional A boolean of weather the user wants the x axis logarithmically. The default is False. logY : boolean, optional A boolean of weather the user wants the Y axis logarithmically. The default is True. base : integer, optional The base of the logarithm. The default is 10. Notes ----- For all possable choices visit, <http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot> ''' if label is 'default': lab_str=y else: lab_str=label try: self.get(x) except KeyError: x='age' DataPlot.plot(self,x,y,legend=lab_str,labelx=labelx, labely=labely, title=title, shape=shape,logx=logx, logy=logy, base=base) ''' print X,Y xdat=self.get(X) ydat=self.get(Y) self.xdat = xdat self.ydat = ydat plot(xdat,log10(ydat),label=lab_str) legend() '''
[ "def", "plot_xtime", "(", "self", ",", "y", ",", "x", "=", "'time'", ",", "label", "=", "'default'", ",", "labelx", "=", "None", ",", "labely", "=", "None", ",", "title", "=", "None", ",", "shape", "=", "'.'", ",", "logx", "=", "False", ",", "log...
make a simple plot of two columns against each other. An example would be instance.plot_xtime('PB206', label='PB206 vs t_y' Recomend using the plot function DataPlot.plot() it has more functionality. Parameters ---------- Y : string Column on Y-axis. X : string, optional Column on X-axis. The default is "time". label : string, optional Legend label. The default is "default". labelX : string, optional The label on the X axis. The default is None. labelY : string, optional The label on the Y axis. The default is None. title : string, optional The Title of the Graph. The default is None. shape : string, optional What shape and colour the user would like their plot in. The default is '.'. logX : boolean, optional A boolean of weather the user wants the x axis logarithmically. The default is False. logY : boolean, optional A boolean of weather the user wants the Y axis logarithmically. The default is True. base : integer, optional The base of the logarithm. The default is 10. Notes ----- For all possable choices visit, <http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot>
[ "make", "a", "simple", "plot", "of", "two", "columns", "against", "each", "other", "." ]
python
train
opennode/waldur-core
waldur_core/quotas/views.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/quotas/views.py#L96-L151
def history(self, request, uuid=None): """ Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp. """ mapped = { 'start': request.query_params.get('start'), 'end': request.query_params.get('end'), 'points_count': request.query_params.get('points_count'), 'point_list': request.query_params.getlist('point'), } history_serializer = HistorySerializer(data={k: v for k, v in mapped.items() if v}) history_serializer.is_valid(raise_exception=True) quota = self.get_object() serializer = self.get_serializer(quota) serialized_versions = [] for point_date in history_serializer.get_filter_data(): serialized = {'point': datetime_to_timestamp(point_date)} version = Version.objects.get_for_object(quota).filter(revision__date_created__lte=point_date) if version.exists(): # make copy of serialized data and update field that are stored in version version_object = version.first()._object_version.object serialized['object'] = serializer.data.copy() serialized['object'].update({ f: getattr(version_object, f) for f in quota.get_version_fields() }) serialized_versions.append(serialized) return response.Response(serialized_versions, status=status.HTTP_200_OK)
[ "def", "history", "(", "self", ",", "request", ",", "uuid", "=", "None", ")", ":", "mapped", "=", "{", "'start'", ":", "request", ".", "query_params", ".", "get", "(", "'start'", ")", ",", "'end'", ":", "request", ".", "query_params", ".", "get", "("...
Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp.
[ "Historical", "data", "endpoints", "could", "be", "available", "for", "any", "objects", "(", "currently", "implemented", "for", "quotas", "and", "events", "count", ")", ".", "The", "data", "is", "available", "at", "*", "<object_endpoint", ">", "/", "history", ...
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1516-L1527
def from_json(cls, json): """Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the values of json. """ return cls(json[cls.BLOB_KEY_PARAM], json[cls.START_INDEX_PARAM], json[cls.END_INDEX_PARAM])
[ "def", "from_json", "(", "cls", ",", "json", ")", ":", "return", "cls", "(", "json", "[", "cls", ".", "BLOB_KEY_PARAM", "]", ",", "json", "[", "cls", ".", "START_INDEX_PARAM", "]", ",", "json", "[", "cls", ".", "END_INDEX_PARAM", "]", ")" ]
Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the values of json.
[ "Creates", "an", "instance", "of", "the", "InputReader", "for", "the", "given", "input", "shard", "state", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L1127-L1193
def rebuild( self ): """ Rebuilds the grid lines based on the current settings and \ scene width. This method is triggered automatically, and \ shouldn't need to be manually called. """ rect = self.sceneRect() x = rect.left() y = rect.top() w = rect.width() h = rect.height() # calculate background gridlines cx = x + (w / 2) cy = y + (h / 2) self._centerLines = [QLine(cx, rect.top(), cx, rect.bottom()), QLine(rect.left(), cy, rect.right(), cy) ] # create the horizontal grid lines delta = self.cellHeight() minor_lines = [] major_lines = [] count = 1 while delta < (h / 2): pos_line = QLine(x, cy + delta, x + w, cy + delta) neg_line = QLine(x, cy - delta, x + w, cy - delta) # every 10th line will be a major line if count == 10: major_lines.append(pos_line) major_lines.append(neg_line) count = 1 else: minor_lines.append(pos_line) minor_lines.append(neg_line) # update the current y location delta += self.cellHeight() count += 1 # create the vertical grid lines delta = self.cellWidth() count = 1 while delta < (w / 2): pos_line = QLine(cx + delta, y, cx + delta, y + h) neg_line = QLine(cx - delta, y, cx - delta, y + h) # every 10th line will be a major line if count == 10: major_lines.append(pos_line) major_lines.append(neg_line) count = 1 else: minor_lines.append(pos_line) minor_lines.append(neg_line) # update the current y location delta += self.cellWidth() count += 1 # set the line cache self._majorLines = major_lines self._minorLines = minor_lines # unmark the scene as being dirty self.setDirty(False)
[ "def", "rebuild", "(", "self", ")", ":", "rect", "=", "self", ".", "sceneRect", "(", ")", "x", "=", "rect", ".", "left", "(", ")", "y", "=", "rect", ".", "top", "(", ")", "w", "=", "rect", ".", "width", "(", ")", "h", "=", "rect", ".", "hei...
Rebuilds the grid lines based on the current settings and \ scene width. This method is triggered automatically, and \ shouldn't need to be manually called.
[ "Rebuilds", "the", "grid", "lines", "based", "on", "the", "current", "settings", "and", "\\", "scene", "width", ".", "This", "method", "is", "triggered", "automatically", "and", "\\", "shouldn", "t", "need", "to", "be", "manually", "called", "." ]
python
train
junzis/pyModeS
pyModeS/decoder/adsb.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/adsb.py#L311-L345
def nic_v2(msg, NICa, NICbc): """Calculate NIC, navigation integrity category, for ADS-B version 2 Args: msg (string): 28 bytes hexadecimal message string NICa (int or string): NIC supplement - A NICbc (int or srting): NIC supplement - B or C Returns: int or string: Horizontal Radius of Containment """ if typecode(msg) < 5 or typecode(msg) > 22: raise RuntimeError( "%s: Not a surface position message (5<TC<8), \ airborne position message (8<TC<19), \ or airborne position with GNSS height (20<TC<22)" % msg ) tc = typecode(msg) NIC = uncertainty.TC_NICv2_lookup[tc] if 20<=tc<=22: NICs = 0 else: NICs = NICa*2 + NICbc try: if isinstance(NIC, dict): NIC = NIC[NICs] Rc = uncertainty.NICv2[NIC][NICs]['Rc'] except KeyError: Rc = uncertainty.NA return Rc
[ "def", "nic_v2", "(", "msg", ",", "NICa", ",", "NICbc", ")", ":", "if", "typecode", "(", "msg", ")", "<", "5", "or", "typecode", "(", "msg", ")", ">", "22", ":", "raise", "RuntimeError", "(", "\"%s: Not a surface position message (5<TC<8), \\\n airb...
Calculate NIC, navigation integrity category, for ADS-B version 2 Args: msg (string): 28 bytes hexadecimal message string NICa (int or string): NIC supplement - A NICbc (int or srting): NIC supplement - B or C Returns: int or string: Horizontal Radius of Containment
[ "Calculate", "NIC", "navigation", "integrity", "category", "for", "ADS", "-", "B", "version", "2" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/mattermost.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/mattermost.py#L297-L311
def posts(self, channel, page=None): """Fetch the history of a channel.""" entrypoint = self.RCHANNELS + '/' + channel + '/' + self.RPOSTS params = { self.PPER_PAGE: self.max_items } if page is not None: params[self.PPAGE] = page response = self._fetch(entrypoint, params) return response
[ "def", "posts", "(", "self", ",", "channel", ",", "page", "=", "None", ")", ":", "entrypoint", "=", "self", ".", "RCHANNELS", "+", "'/'", "+", "channel", "+", "'/'", "+", "self", ".", "RPOSTS", "params", "=", "{", "self", ".", "PPER_PAGE", ":", "se...
Fetch the history of a channel.
[ "Fetch", "the", "history", "of", "a", "channel", "." ]
python
test
NYUCCL/psiTurk
psiturk/psiturk_shell.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L525-L532
def print_config(self, _): ''' Print configuration. ''' for section in self.config.sections(): print '[%s]' % section items = dict(self.config.items(section)) for k in items: print "%(a)s=%(b)s" % {'a': k, 'b': items[k]} print ''
[ "def", "print_config", "(", "self", ",", "_", ")", ":", "for", "section", "in", "self", ".", "config", ".", "sections", "(", ")", ":", "print", "'[%s]'", "%", "section", "items", "=", "dict", "(", "self", ".", "config", ".", "items", "(", "section", ...
Print configuration.
[ "Print", "configuration", "." ]
python
train
saltstack/salt
salt/sdb/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/vault.py#L55-L85
def set_(key, value, profile=None): ''' Set a key/value pair in the vault service ''' if '?' in key: __utils__['versions.warn_until']( 'Neon', ( 'Using ? to seperate between the path and key for vault has been deprecated ' 'and will be removed in {version}. Please just use a /.' ), ) path, key = key.split('?') else: path, key = key.rsplit('/', 1) try: url = 'v1/{0}'.format(path) data = {key: value} response = __utils__['vault.make_request']( 'POST', url, profile, json=data) if response.status_code != 204: response.raise_for_status() return True except Exception as e: log.error('Failed to write secret! %s: %s', type(e).__name__, e) raise salt.exceptions.CommandExecutionError(e)
[ "def", "set_", "(", "key", ",", "value", ",", "profile", "=", "None", ")", ":", "if", "'?'", "in", "key", ":", "__utils__", "[", "'versions.warn_until'", "]", "(", "'Neon'", ",", "(", "'Using ? to seperate between the path and key for vault has been deprecated '", ...
Set a key/value pair in the vault service
[ "Set", "a", "key", "/", "value", "pair", "in", "the", "vault", "service" ]
python
train
elliterate/capybara.py
capybara/selector/selector.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/selector/selector.py#L172-L184
def build_selector(self): """ Selector: Returns a new :class:`Selector` instance with the current configuration. """ kwargs = { 'label': self.label, 'descriptions': self.descriptions, 'filters': self.filters} if self.format == "xpath": kwargs['xpath'] = self.func if self.format == "css": kwargs['css'] = self.func return Selector(self.name, **kwargs)
[ "def", "build_selector", "(", "self", ")", ":", "kwargs", "=", "{", "'label'", ":", "self", ".", "label", ",", "'descriptions'", ":", "self", ".", "descriptions", ",", "'filters'", ":", "self", ".", "filters", "}", "if", "self", ".", "format", "==", "\...
Selector: Returns a new :class:`Selector` instance with the current configuration.
[ "Selector", ":", "Returns", "a", "new", ":", "class", ":", "Selector", "instance", "with", "the", "current", "configuration", "." ]
python
test
deepmipt/DeepPavlov
deeppavlov/core/models/tf_backend.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/tf_backend.py#L22-L28
def _graph_wrap(func, graph): """Constructs function encapsulated in the graph.""" @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): return func(*args, **kwargs) return _wrapped
[ "def", "_graph_wrap", "(", "func", ",", "graph", ")", ":", "@", "wraps", "(", "func", ")", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "graph", ".", "as_default", "(", ")", ":", "return", "func", "(", "*", "arg...
Constructs function encapsulated in the graph.
[ "Constructs", "function", "encapsulated", "in", "the", "graph", "." ]
python
test
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1417-L1430
def simxGetObjectVelocity(clientID, objectHandle, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' linearVel = (ct.c_float*3)() angularVel = (ct.c_float*3)() ret = c_GetObjectVelocity(clientID, objectHandle, linearVel, angularVel, operationMode) arr1 = [] for i in range(3): arr1.append(linearVel[i]) arr2 = [] for i in range(3): arr2.append(angularVel[i]) return ret, arr1, arr2
[ "def", "simxGetObjectVelocity", "(", "clientID", ",", "objectHandle", ",", "operationMode", ")", ":", "linearVel", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", ")", "angularVel", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", ")", "ret", ...
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
synw/dataswim
dataswim/charts/__init__.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/__init__.py#L458-L467
def ropt(self, name): """ Remove one option """ try: del self.chart_opts[name] except KeyError: self.warning("Option " + name + " is not set") except: self.err("Can not remove option " + name)
[ "def", "ropt", "(", "self", ",", "name", ")", ":", "try", ":", "del", "self", ".", "chart_opts", "[", "name", "]", "except", "KeyError", ":", "self", ".", "warning", "(", "\"Option \"", "+", "name", "+", "\" is not set\"", ")", "except", ":", "self", ...
Remove one option
[ "Remove", "one", "option" ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L186-L192
def resize(self, size:Union[int,TensorImageSize])->'Image': "Resize the image to `size`, size can be a single int." assert self._flow is None if isinstance(size, int): size=(self.shape[0], size, size) if tuple(size)==tuple(self.shape): return self self.flow = _affine_grid(size) return self
[ "def", "resize", "(", "self", ",", "size", ":", "Union", "[", "int", ",", "TensorImageSize", "]", ")", "->", "'Image'", ":", "assert", "self", ".", "_flow", "is", "None", "if", "isinstance", "(", "size", ",", "int", ")", ":", "size", "=", "(", "sel...
Resize the image to `size`, size can be a single int.
[ "Resize", "the", "image", "to", "size", "size", "can", "be", "a", "single", "int", "." ]
python
train
ejeschke/ginga
ginga/opengl/Camera.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/opengl/Camera.py#L155-L175
def pan_delta(self, dx_px, dy_px): """ This causes the scene to appear to translate right and up (i.e., what really happens is the camera is translated left and down). This is also called "panning" in some software packages. Passing in negative delta values causes the opposite motion. """ direction = self.target - self.position distance_from_target = direction.length() direction = direction.normalized() speed_per_radius = self.get_translation_speed(distance_from_target) px_per_unit = self.vport_radius_px / speed_per_radius right = direction ^ self.up translation = (right * (-dx_px / px_per_unit) + self.up * (-dy_px / px_per_unit)) self.position = self.position + translation self.target = self.target + translation
[ "def", "pan_delta", "(", "self", ",", "dx_px", ",", "dy_px", ")", ":", "direction", "=", "self", ".", "target", "-", "self", ".", "position", "distance_from_target", "=", "direction", ".", "length", "(", ")", "direction", "=", "direction", ".", "normalized...
This causes the scene to appear to translate right and up (i.e., what really happens is the camera is translated left and down). This is also called "panning" in some software packages. Passing in negative delta values causes the opposite motion.
[ "This", "causes", "the", "scene", "to", "appear", "to", "translate", "right", "and", "up", "(", "i", ".", "e", ".", "what", "really", "happens", "is", "the", "camera", "is", "translated", "left", "and", "down", ")", ".", "This", "is", "also", "called",...
python
train
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L1333-L1346
def delete(self, path_list, **kwargs): """ 删除文件或文件夹 :param path_list: 待删除的文件或文件夹列表,每一项为服务器路径 :type path_list: list """ data = { 'filelist': json.dumps([path for path in path_list]) } url = 'http://{0}/api/filemanager?opera=delete'.format(BAIDUPAN_SERVER) return self._request('filemanager', 'delete', url=url, data=data, **kwargs)
[ "def", "delete", "(", "self", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'filelist'", ":", "json", ".", "dumps", "(", "[", "path", "for", "path", "in", "path_list", "]", ")", "}", "url", "=", "'http://{0}/api/filemanager?op...
删除文件或文件夹 :param path_list: 待删除的文件或文件夹列表,每一项为服务器路径 :type path_list: list
[ "删除文件或文件夹" ]
python
train
iotaledger/iota.lib.py
iota/codecs.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/codecs.py#L197-L220
def check_trytes_codec(encoding): """ Determines which codec to use for the specified encoding. References: - https://docs.python.org/3/library/codecs.html#codecs.register """ if encoding == AsciiTrytesCodec.name: return AsciiTrytesCodec.get_codec_info() elif encoding == AsciiTrytesCodec.compat_name: warn( '"{old_codec}" codec will be removed in PyOTA v2.1. ' 'Use "{new_codec}" instead.'.format( new_codec=AsciiTrytesCodec.name, old_codec=AsciiTrytesCodec.compat_name, ), DeprecationWarning, ) return AsciiTrytesCodec.get_codec_info() return None
[ "def", "check_trytes_codec", "(", "encoding", ")", ":", "if", "encoding", "==", "AsciiTrytesCodec", ".", "name", ":", "return", "AsciiTrytesCodec", ".", "get_codec_info", "(", ")", "elif", "encoding", "==", "AsciiTrytesCodec", ".", "compat_name", ":", "warn", "(...
Determines which codec to use for the specified encoding. References: - https://docs.python.org/3/library/codecs.html#codecs.register
[ "Determines", "which", "codec", "to", "use", "for", "the", "specified", "encoding", "." ]
python
test
aestrivex/bctpy
bct/algorithms/motifs.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/motifs.py#L398-L477
def motif3struct_wei(W): ''' Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Motif intensity and coherence are weighted generalizations of motif frequency. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix (all weights between 0 and 1) Returns ------- I : 13xN np.ndarray motif intensity matrix Q : 13xN np.ndarray motif coherence matrix F : 13xN np.ndarray motif frequency matrix Notes ----- Average intensity and coherence are given by I./F and Q./F. ''' from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m3 = mot['m3'] m3n = mot['m3n'] id3 = mot['id3'].squeeze() n3 = mot['n3'].squeeze() n = len(W) # number of vertices in W I = np.zeros((13, n)) # intensity Q = np.zeros((13, n)) # coherence F = np.zeros((13, n)) # frequency A = binarize(W, copy=True) # create binary adjmat As = np.logical_or(A, A.T) # symmetrized adjmat for u in range(n - 2): # v1: neighbors of u (>u) V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: # v2: neighbors of v1 (>u) V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 # not already in V1 # and all neighbors of u (>v1) V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: a = np.array((A[v1, u], A[v2, u], A[u, v1], A[v2, v1], A[u, v2], A[v1, 2])) s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a)) ix = np.squeeze(s == m3n) w = np.array((W[v1, u], W[v2, u], W[u, v1], W[v2, v1], W[u, v2], W[v1, v2])) M = w * m3[ix, :] id = id3[ix] - 1 l = n3[ix] x = np.sum(M, axis=1) / l # arithmetic mean M[M == 0] = 1 # enable geometric mean i = np.prod(M, axis=1)**(1 / l) # intensity q = i / x # coherence # add to cumulative counts I[id, u] += i I[id, v1] += i I[id, v2] += i Q[id, u] += q Q[id, v1] += q Q[id, v2] += q F[id, u] += 1 F[id, v1] += 1 F[id, v1] += 1 return I, Q, F
[ "def", "motif3struct_wei", "(", "W", ")", ":", "from", "scipy", "import", "io", "import", "os", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "motiflib", ")", "mot", "=", "io", ".",...
Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Motif intensity and coherence are weighted generalizations of motif frequency. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix (all weights between 0 and 1) Returns ------- I : 13xN np.ndarray motif intensity matrix Q : 13xN np.ndarray motif coherence matrix F : 13xN np.ndarray motif frequency matrix Notes ----- Average intensity and coherence are given by I./F and Q./F.
[ "Structural", "motifs", "are", "patterns", "of", "local", "connectivity", ".", "Motif", "frequency", "is", "the", "frequency", "of", "occurrence", "of", "motifs", "around", "a", "node", ".", "Motif", "intensity", "and", "coherence", "are", "weighted", "generaliz...
python
train
pvlib/pvlib-python
pvlib/pvsystem.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L2667-L2795
def adrinverter(v_dc, p_dc, inverter, vtol=0.10): r''' Converts DC power and voltage to AC power using Anton Driesse's Grid-Connected PV Inverter efficiency model Parameters ---------- v_dc : numeric A scalar or pandas series of DC voltages, in volts, which are provided as input to the inverter. If Vdc and Pdc are vectors, they must be of the same size. v_dc must be >= 0. (V) p_dc : numeric A scalar or pandas series of DC powers, in watts, which are provided as input to the inverter. If Vdc and Pdc are vectors, they must be of the same size. p_dc must be >= 0. (W) inverter : dict-like A dict-like object defining the inverter to be used, giving the inverter performance parameters according to the model developed by Anton Driesse [1]. A set of inverter performance parameters may be loaded from the supplied data table using retrievesam. See Notes for required keys. vtol : numeric, default 0.1 A unit-less fraction that determines how far the efficiency model is allowed to extrapolate beyond the inverter's normal input voltage operating range. 0.0 <= vtol <= 1.0 Returns ------- ac_power : numeric A numpy array or pandas series of modeled AC power output given the input DC voltage, v_dc, and input DC power, p_dc. When ac_power would be greater than pac_max, it is set to p_max to represent inverter "clipping". When ac_power would be less than -p_nt (energy consumed rather than produced) then ac_power is set to -p_nt to represent nightly power losses. ac_power is not adjusted for maximum power point tracking (MPPT) voltage windows or maximum current limits of the inverter. Notes ----- Required inverter keys are: ======= ============================================================ Column Description ======= ============================================================ p_nom The nominal power value used to normalize all power values, typically the DC power needed to produce maximum AC power output, (W). v_nom The nominal DC voltage value used to normalize DC voltage values, typically the level at which the highest efficiency is achieved, (V). pac_max The maximum AC output power value, used to clip the output if needed, (W). ce_list This is a list of 9 coefficients that capture the influence of input voltage and power on inverter losses, and thereby efficiency. p_nt ac-power consumed by inverter at night (night tare) to maintain circuitry required to sense PV array voltage, (W). ======= ============================================================ References ---------- [1] Beyond the Curves: Modeling the Electrical Efficiency of Photovoltaic Inverters, PVSC 2008, Anton Driesse et. al. See also -------- sapm singlediode ''' p_nom = inverter['Pnom'] v_nom = inverter['Vnom'] pac_max = inverter['Pacmax'] p_nt = inverter['Pnt'] ce_list = inverter['ADRCoefficients'] v_max = inverter['Vmax'] v_min = inverter['Vmin'] vdc_max = inverter['Vdcmax'] mppt_hi = inverter['MPPTHi'] mppt_low = inverter['MPPTLow'] v_lim_upper = float(np.nanmax([v_max, vdc_max, mppt_hi]) * (1 + vtol)) v_lim_lower = float(np.nanmax([v_min, mppt_low]) * (1 - vtol)) pdc = p_dc / p_nom vdc = v_dc / v_nom # zero voltage will lead to division by zero, but since power is # set to night time value later, these errors can be safely ignored with np.errstate(invalid='ignore', divide='ignore'): poly = np.array([pdc**0, # replace with np.ones_like? pdc, pdc**2, vdc - 1, pdc * (vdc - 1), pdc**2 * (vdc - 1), 1. / vdc - 1, # divide by 0 pdc * (1. / vdc - 1), # invalid 0./0. --> nan pdc**2 * (1. / vdc - 1)]) # divide by 0 p_loss = np.dot(np.array(ce_list), poly) ac_power = p_nom * (pdc-p_loss) p_nt = -1 * np.absolute(p_nt) # set output to nan where input is outside of limits # errstate silences case where input is nan with np.errstate(invalid='ignore'): invalid = (v_lim_upper < v_dc) | (v_dc < v_lim_lower) ac_power = np.where(invalid, np.nan, ac_power) # set night values ac_power = np.where(vdc == 0, p_nt, ac_power) ac_power = np.maximum(ac_power, p_nt) # set max ac output ac_power = np.minimum(ac_power, pac_max) if isinstance(p_dc, pd.Series): ac_power = pd.Series(ac_power, index=pdc.index) return ac_power
[ "def", "adrinverter", "(", "v_dc", ",", "p_dc", ",", "inverter", ",", "vtol", "=", "0.10", ")", ":", "p_nom", "=", "inverter", "[", "'Pnom'", "]", "v_nom", "=", "inverter", "[", "'Vnom'", "]", "pac_max", "=", "inverter", "[", "'Pacmax'", "]", "p_nt", ...
r''' Converts DC power and voltage to AC power using Anton Driesse's Grid-Connected PV Inverter efficiency model Parameters ---------- v_dc : numeric A scalar or pandas series of DC voltages, in volts, which are provided as input to the inverter. If Vdc and Pdc are vectors, they must be of the same size. v_dc must be >= 0. (V) p_dc : numeric A scalar or pandas series of DC powers, in watts, which are provided as input to the inverter. If Vdc and Pdc are vectors, they must be of the same size. p_dc must be >= 0. (W) inverter : dict-like A dict-like object defining the inverter to be used, giving the inverter performance parameters according to the model developed by Anton Driesse [1]. A set of inverter performance parameters may be loaded from the supplied data table using retrievesam. See Notes for required keys. vtol : numeric, default 0.1 A unit-less fraction that determines how far the efficiency model is allowed to extrapolate beyond the inverter's normal input voltage operating range. 0.0 <= vtol <= 1.0 Returns ------- ac_power : numeric A numpy array or pandas series of modeled AC power output given the input DC voltage, v_dc, and input DC power, p_dc. When ac_power would be greater than pac_max, it is set to p_max to represent inverter "clipping". When ac_power would be less than -p_nt (energy consumed rather than produced) then ac_power is set to -p_nt to represent nightly power losses. ac_power is not adjusted for maximum power point tracking (MPPT) voltage windows or maximum current limits of the inverter. Notes ----- Required inverter keys are: ======= ============================================================ Column Description ======= ============================================================ p_nom The nominal power value used to normalize all power values, typically the DC power needed to produce maximum AC power output, (W). v_nom The nominal DC voltage value used to normalize DC voltage values, typically the level at which the highest efficiency is achieved, (V). pac_max The maximum AC output power value, used to clip the output if needed, (W). ce_list This is a list of 9 coefficients that capture the influence of input voltage and power on inverter losses, and thereby efficiency. p_nt ac-power consumed by inverter at night (night tare) to maintain circuitry required to sense PV array voltage, (W). ======= ============================================================ References ---------- [1] Beyond the Curves: Modeling the Electrical Efficiency of Photovoltaic Inverters, PVSC 2008, Anton Driesse et. al. See also -------- sapm singlediode
[ "r", "Converts", "DC", "power", "and", "voltage", "to", "AC", "power", "using", "Anton", "Driesse", "s", "Grid", "-", "Connected", "PV", "Inverter", "efficiency", "model" ]
python
train
iclab/centinel
centinel/primitives/traceroute.py
https://github.com/iclab/centinel/blob/9a25dcf30c6a1db3c046f7ccb8ab8873e455c1a4/centinel/primitives/traceroute.py#L22-L145
def traceroute(domain, method="udp", cmd_arguments=None, external=None, log_prefix=''): """ This function uses centinel.command to issue a traceroute command, wait for it to finish execution and parse the results out to a dictionary. :param domain: the domain to be queried :param method: the packet type used for traceroute, UDP by default :param cmd_arguments: the list of arguments that need to be passed to traceroute. :param external: :param log_prefix: :return: """ # the method specified by the function parameter here will # over-ride the ones given in cmd_arguments because # traceroute will use the last one in the argument list. _cmd_arguments = [] logging.debug("%sRunning traceroute for " "%s using %s probes." % (log_prefix, domain, method)) results = {"method": method} if cmd_arguments is not None: _cmd_arguments = copy.deepcopy(cmd_arguments) if method == "tcp": if platform in ['linux', 'linux2']: _cmd_arguments.append('-T') elif platform == "darwin": _cmd_arguments.append('-P') _cmd_arguments.append('tcp') elif method == "udp": if platform in ['linux', 'linux2']: _cmd_arguments.append('-U') elif platform == "darwin": _cmd_arguments.append('-P') _cmd_arguments.append('udp') elif method == "icmp": if platform in ['linux', 'linux2']: _cmd_arguments.append('-I') elif platform == "darwin": _cmd_arguments.append('-P') _cmd_arguments.append('icmp') cmd = ['traceroute'] + _cmd_arguments + [domain] caller = command.Command(cmd, _traceroute_callback) caller.start() if not caller.started: if caller.exception is not None: if "No such file or directory" in caller.exception: message = "traceroute not found or not installed" else: message = ("traceroute thread threw an " "exception: %s" % caller.exception) elif "enough privileges" in caller.notifications: message = "not enough privileges" elif "not known" in caller.notifications: message = "name or service not known" else: message = caller.notifications results["dest_name"] = domain results["error"] = message if external is not None and type(external) is dict: external[domain] = results return results forcefully_terminated = False timeout = 60 start_time = time.time() # check every second to see if the execution has stopped while caller.thread.isAlive(): if (time.time() - start_time) > timeout: caller.stop() forcefully_terminated = True break time.sleep(1) # we are only accurate down to seconds, so we have # to round up time_elapsed = int(time.time() - start_time) output_string = caller.notifications try: parsed_output = trparse.loads(output_string) except Exception as exc: results["dest_name"] = domain results["error"] = str(exc) results["raw"] = output_string if external is not None and type(external) is dict: external[domain] = results return results hops = list() for hop in parsed_output.hops: hop_json = {"index": hop.idx, "asn": hop.asn} probes_json = [] for probe in hop.probes: probes_json.append({"name": probe.name, "ip": probe.ip, "rtt": probe.rtt, "anno": probe.anno}) hop_json["probes"] = probes_json hops.append(hop_json) results["dest_name"] = parsed_output.dest_name results["dest_ip"] = parsed_output.dest_ip results["hops"] = hops results["forcefully_terminated"] = forcefully_terminated results["time_elapsed"] = time_elapsed # the external result is used when threading to store # the results in the list container provided. if external is not None and type(external) is dict: external[domain] = results return results
[ "def", "traceroute", "(", "domain", ",", "method", "=", "\"udp\"", ",", "cmd_arguments", "=", "None", ",", "external", "=", "None", ",", "log_prefix", "=", "''", ")", ":", "# the method specified by the function parameter here will", "# over-ride the ones given in cmd_a...
This function uses centinel.command to issue a traceroute command, wait for it to finish execution and parse the results out to a dictionary. :param domain: the domain to be queried :param method: the packet type used for traceroute, UDP by default :param cmd_arguments: the list of arguments that need to be passed to traceroute. :param external: :param log_prefix: :return:
[ "This", "function", "uses", "centinel", ".", "command", "to", "issue", "a", "traceroute", "command", "wait", "for", "it", "to", "finish", "execution", "and", "parse", "the", "results", "out", "to", "a", "dictionary", "." ]
python
train
SmokinCaterpillar/pypet
pypet/parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/parameter.py#L1792-L1832
def _load(self, load_dict): """Reconstructs objects from the pickle dumps in `load_dict`. The 'explored_data' entry in `load_dict` is used to reconstruct the exploration range in the correct order. Sets the `v_protocol` property to the protocol used to store 'data'. """ if self.v_locked: raise pex.ParameterLockedException('Parameter `%s` is locked!' % self.v_full_name) if 'data' in load_dict: dump = load_dict['data'] self._data = pickle.loads(dump) else: self._logger.warning('Your parameter `%s` is empty, ' 'I did not find any data on disk.' % self.v_full_name) try: self.v_protocol = load_dict[PickleParameter.PROTOCOL] except KeyError: # For backwards compatibility self.v_protocol = PickleParameter._get_protocol(dump) if 'explored_data' in load_dict: explore_table = load_dict['explored_data'] name_col = explore_table['idx'] explore_list = [] for name_id in name_col: arrayname = self._build_name(name_id) loaded = pickle.loads(load_dict[arrayname]) explore_list.append(loaded) self._explored_range = explore_list self._explored = True self._default = self._data self._locked = True
[ "def", "_load", "(", "self", ",", "load_dict", ")", ":", "if", "self", ".", "v_locked", ":", "raise", "pex", ".", "ParameterLockedException", "(", "'Parameter `%s` is locked!'", "%", "self", ".", "v_full_name", ")", "if", "'data'", "in", "load_dict", ":", "d...
Reconstructs objects from the pickle dumps in `load_dict`. The 'explored_data' entry in `load_dict` is used to reconstruct the exploration range in the correct order. Sets the `v_protocol` property to the protocol used to store 'data'.
[ "Reconstructs", "objects", "from", "the", "pickle", "dumps", "in", "load_dict", "." ]
python
test
python-bonobo/bonobo
bonobo/nodes/io/file.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/nodes/io/file.py#L40-L58
def output(self, context, *args, **kwargs): """ Allow all readers to use eventually use output_fields XOR output_type options. """ output_fields = self.output_fields output_type = self.output_type if output_fields and output_type: raise UnrecoverableError("Cannot specify both output_fields and output_type option.") if self.output_type: context.set_output_type(self.output_type) if self.output_fields: context.set_output_fields(self.output_fields) yield
[ "def", "output", "(", "self", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "output_fields", "=", "self", ".", "output_fields", "output_type", "=", "self", ".", "output_type", "if", "output_fields", "and", "output_type", ":", "raise"...
Allow all readers to use eventually use output_fields XOR output_type options.
[ "Allow", "all", "readers", "to", "use", "eventually", "use", "output_fields", "XOR", "output_type", "options", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L9128-L9154
def delete_namespaced_config_map(self, name, namespace, **kwargs): """ delete a ConfigMap This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_config_map(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_config_map_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_config_map_with_http_info(name, namespace, **kwargs) return data
[ "def", "delete_namespaced_config_map", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", "...
delete a ConfigMap This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_config_map(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "a", "ConfigMap", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_n...
python
train
daler/metaseq
metaseq/plotutils.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/plotutils.py#L598-L660
def clustered_sortind(x, k=10, scorefunc=None): """ Uses MiniBatch k-means clustering to cluster matrix into groups. Each cluster of rows is then sorted by `scorefunc` -- by default, the max peak height when all rows in a cluster are averaged, or cluster.mean(axis=0).max(). Returns the index that will sort the rows of `x` and a list of "breaks". `breaks` is essentially a cumulative row count for each cluster boundary. In other words, after plotting the array you can use axhline on each "break" to plot the cluster boundary. If `k` is a list or tuple, iteratively try each one and select the best with the lowest mean distance from cluster centers. :param x: Matrix whose rows are to be clustered :param k: Number of clusters to create or a list of potential clusters; the optimum will be chosen from the list :param scorefunc: Optional function for sorting rows within clusters. Must accept a single argument of a NumPy array. """ try: from sklearn.cluster import MiniBatchKMeans except ImportError: raise ImportError('please install scikits.learn for ' 'clustering.') # If integer, do it once and we're done if isinstance(k, int): best_k = k else: mean_dists = {} for _k in k: mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k) mbk.fit(x) mean_dists[_k] = mbk.transform(x).mean() best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0] mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k) mbk.fit(x) k = best_k labels = mbk.labels_ scores = np.zeros(labels.shape, dtype=float) if not scorefunc: def scorefunc(x): return x.mean(axis=0).max() for label in range(k): ind = labels == label score = scorefunc(x[ind, :]) scores[ind] = score pos = 0 breaks = [] ind = np.argsort(scores) for k, g in itertools.groupby(labels[ind]): pos += len(list(g)) breaks.append(pos) return ind, breaks
[ "def", "clustered_sortind", "(", "x", ",", "k", "=", "10", ",", "scorefunc", "=", "None", ")", ":", "try", ":", "from", "sklearn", ".", "cluster", "import", "MiniBatchKMeans", "except", "ImportError", ":", "raise", "ImportError", "(", "'please install scikits....
Uses MiniBatch k-means clustering to cluster matrix into groups. Each cluster of rows is then sorted by `scorefunc` -- by default, the max peak height when all rows in a cluster are averaged, or cluster.mean(axis=0).max(). Returns the index that will sort the rows of `x` and a list of "breaks". `breaks` is essentially a cumulative row count for each cluster boundary. In other words, after plotting the array you can use axhline on each "break" to plot the cluster boundary. If `k` is a list or tuple, iteratively try each one and select the best with the lowest mean distance from cluster centers. :param x: Matrix whose rows are to be clustered :param k: Number of clusters to create or a list of potential clusters; the optimum will be chosen from the list :param scorefunc: Optional function for sorting rows within clusters. Must accept a single argument of a NumPy array.
[ "Uses", "MiniBatch", "k", "-", "means", "clustering", "to", "cluster", "matrix", "into", "groups", "." ]
python
train
boriel/zxbasic
asmlex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmlex.py#L299-L303
def t_RP(self, t): r'[])]' if t.value != ']' and OPTIONS.bracket.value: t.type = 'RPP' return t
[ "def", "t_RP", "(", "self", ",", "t", ")", ":", "if", "t", ".", "value", "!=", "']'", "and", "OPTIONS", ".", "bracket", ".", "value", ":", "t", ".", "type", "=", "'RPP'", "return", "t" ]
r'[])]
[ "r", "[]", ")", "]" ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/phystokens.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/phystokens.py#L112-L210
def source_encoding(source): """Determine the encoding for `source` (a string), according to PEP 263. Returns a string, the name of the encoding. """ # Note: this function should never be called on Python 3, since py3 has # built-in tools to do this. assert sys.version_info < (3, 0) # This is mostly code adapted from Py3.2's tokenize module. cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") # Do this so the detect_encode code we copied will work. readline = iter(source.splitlines(True)).next def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if re.match(r"^utf-8($|-)", enc): return "utf-8" if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc): return "iso-8859-1" return orig_enc # From detect_encode(): # It detects the encoding from the presence of a utf-8 bom or an encoding # cookie as specified in pep-0263. If both a bom and a cookie are present, # but disagree, a SyntaxError will be raised. If the encoding cookie is an # invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, # 'utf-8-sig' is returned. # If no encoding is specified, then the default will be returned. The # default varied with version. if sys.version_info <= (2, 4): default = 'iso-8859-1' else: default = 'ascii' bom_found = False encoding = None def read_or_stop(): """Get the next source line, or ''.""" try: return readline() except StopIteration: return '' def find_cookie(line): """Find an encoding cookie in `line`.""" try: line_string = line.decode('ascii') except UnicodeDecodeError: return None matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = codecs.lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: # codecs in 2.3 were raw tuples of functions, assume the best. codec_name = getattr(codec, 'name', encoding) if codec_name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(codecs.BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default encoding = find_cookie(first) if encoding: return encoding second = read_or_stop() if not second: return default encoding = find_cookie(second) if encoding: return encoding return default
[ "def", "source_encoding", "(", "source", ")", ":", "# Note: this function should never be called on Python 3, since py3 has", "# built-in tools to do this.", "assert", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", "# This is mostly code adapted from Py3.2's tokenize m...
Determine the encoding for `source` (a string), according to PEP 263. Returns a string, the name of the encoding.
[ "Determine", "the", "encoding", "for", "source", "(", "a", "string", ")", "according", "to", "PEP", "263", "." ]
python
test
jerith/txTwitter
txtwitter/twitter.py
https://github.com/jerith/txTwitter/blob/f07afd21184cd1bee697737bf98fd143378dbdff/txtwitter/twitter.py#L175-L221
def set_list_param(params, name, value, min_len=None, max_len=None): """ Set a list parameter if applicable. :param dict params: A dict containing API call parameters. :param str name: The name of the parameter to set. :param list value: The value of the parameter. If ``None``, the field will not be set. If an instance of ``set``, ``tuple``, or type that can be turned into a ``list``, the relevant field will be set. If ``dict``, will raise ``ValueError``. Any other value will raise a ``ValueError``. :param int min_len: If provided, values shorter than this will raise ``ValueError``. :param int max_len: If provided, values longer than this will raise ``ValueError``. """ if value is None: return if type(value) is dict: raise ValueError( "Parameter '%s' cannot be a dict." % name) try: value = list(value) except: raise ValueError( "Parameter '%s' must be a list (or a type that can be turned into" "a list) or None, got %r." % (name, value)) if min_len is not None and len(value) < min_len: raise ValueError( "Parameter '%s' must not be shorter than %r, got %r." % ( name, min_len, value)) if max_len is not None and len(value) > max_len: raise ValueError( "Parameter '%s' must not be longer than %r, got %r." % ( name, max_len, value)) list_str = '' for item in value: list_str += '%s,' % item set_str_param(params, name, list_str)
[ "def", "set_list_param", "(", "params", ",", "name", ",", "value", ",", "min_len", "=", "None", ",", "max_len", "=", "None", ")", ":", "if", "value", "is", "None", ":", "return", "if", "type", "(", "value", ")", "is", "dict", ":", "raise", "ValueErro...
Set a list parameter if applicable. :param dict params: A dict containing API call parameters. :param str name: The name of the parameter to set. :param list value: The value of the parameter. If ``None``, the field will not be set. If an instance of ``set``, ``tuple``, or type that can be turned into a ``list``, the relevant field will be set. If ``dict``, will raise ``ValueError``. Any other value will raise a ``ValueError``. :param int min_len: If provided, values shorter than this will raise ``ValueError``. :param int max_len: If provided, values longer than this will raise ``ValueError``.
[ "Set", "a", "list", "parameter", "if", "applicable", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/skarlatoudis_2013.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/skarlatoudis_2013.py#L122-L130
def _compute_distance(self, rup, dists, C): """ equation 3 pag 1960: ``c31 * logR + c32 * (R-Rref)`` """ rref = 1.0 c31 = -1.7 return (c31 * np.log10(dists.rhypo) + C['c32'] * (dists.rhypo - rref))
[ "def", "_compute_distance", "(", "self", ",", "rup", ",", "dists", ",", "C", ")", ":", "rref", "=", "1.0", "c31", "=", "-", "1.7", "return", "(", "c31", "*", "np", ".", "log10", "(", "dists", ".", "rhypo", ")", "+", "C", "[", "'c32'", "]", "*",...
equation 3 pag 1960: ``c31 * logR + c32 * (R-Rref)``
[ "equation", "3", "pag", "1960", ":" ]
python
train
aio-libs/yarl
yarl/__init__.py
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L619-L627
def _validate_authority_uri_abs_path(host, path): """Ensure that path in URL with authority starts with a leading slash. Raise ValueError if not. """ if len(host) > 0 and len(path) > 0 and not path.startswith("/"): raise ValueError( "Path in a URL with authority " "should start with a slash ('/') if set" )
[ "def", "_validate_authority_uri_abs_path", "(", "host", ",", "path", ")", ":", "if", "len", "(", "host", ")", ">", "0", "and", "len", "(", "path", ")", ">", "0", "and", "not", "path", ".", "startswith", "(", "\"/\"", ")", ":", "raise", "ValueError", ...
Ensure that path in URL with authority starts with a leading slash. Raise ValueError if not.
[ "Ensure", "that", "path", "in", "URL", "with", "authority", "starts", "with", "a", "leading", "slash", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/coords.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/coords.py#L88-L93
def to_gtp(coord): """Converts from a Minigo coordinate to a GTP coordinate.""" if coord is None: return 'pass' y, x = coord return '{}{}'.format(_GTP_COLUMNS[x], go.N - y)
[ "def", "to_gtp", "(", "coord", ")", ":", "if", "coord", "is", "None", ":", "return", "'pass'", "y", ",", "x", "=", "coord", "return", "'{}{}'", ".", "format", "(", "_GTP_COLUMNS", "[", "x", "]", ",", "go", ".", "N", "-", "y", ")" ]
Converts from a Minigo coordinate to a GTP coordinate.
[ "Converts", "from", "a", "Minigo", "coordinate", "to", "a", "GTP", "coordinate", "." ]
python
train
PaloAltoNetworks/pancloud
pancloud/credentials.py
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/credentials.py#L502-L533
def revoke_access_token(self, **kwargs): """Revoke access token.""" c = self.get_credentials() data = { 'client_id': c.client_id, 'client_secret': c.client_secret, 'token': c.access_token, 'token_type_hint': 'access_token' } r = self._httpclient.request( method='POST', url=self.token_url, json=data, path='/api/oauth2/RevokeToken', **kwargs ) if not r.ok: raise PanCloudError( '%s %s: %s' % (r.status_code, r.reason, r.text) ) try: r_json = r.json() except ValueError as e: raise PanCloudError("Invalid JSON: %s" % e) else: if r.json().get( 'error_description' ) or r.json().get( 'error' ): raise PanCloudError(r.text) return r_json
[ "def", "revoke_access_token", "(", "self", ",", "*", "*", "kwargs", ")", ":", "c", "=", "self", ".", "get_credentials", "(", ")", "data", "=", "{", "'client_id'", ":", "c", ".", "client_id", ",", "'client_secret'", ":", "c", ".", "client_secret", ",", ...
Revoke access token.
[ "Revoke", "access", "token", "." ]
python
train
aio-libs/aiohttp
aiohttp/client_reqrep.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/client_reqrep.py#L322-L343
def update_headers(self, headers: Optional[LooseHeaders]) -> None: """Update request headers.""" self.headers = CIMultiDict() # type: CIMultiDict[str] # add host netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = '[{}]'.format(netloc) if not self.url.is_default_port(): netloc += ':' + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() # type: ignore for key, value in headers: # A special case for Host header if key.lower() == 'host': self.headers[key] = value else: self.headers.add(key, value)
[ "def", "update_headers", "(", "self", ",", "headers", ":", "Optional", "[", "LooseHeaders", "]", ")", "->", "None", ":", "self", ".", "headers", "=", "CIMultiDict", "(", ")", "# type: CIMultiDict[str]", "# add host", "netloc", "=", "cast", "(", "str", ",", ...
Update request headers.
[ "Update", "request", "headers", "." ]
python
train
wiredrive/wtframework
wtframework/wtf/web/page.py
https://github.com/wiredrive/wtframework/blob/ef7f86c4d4cf7fb17745fd627b3cc4a41f4c0216/wtframework/wtf/web/page.py#L167-L296
def create_page(page_object_class_or_interface, webdriver=None, **kwargs): """ Instantiate a page object from a given Interface or Abstract class. Args: page_object_class_or_interface (Class): PageObject class, AbstractBaseClass, or Interface to attempt to consturct. Kwargs: webdriver (WebDriver): Selenium Webdriver to use to instantiate the page. If none is provided, then it was use the default from WTF_WEBDRIVER_MANAGER Returns: PageObject Raises: NoMatchingPageError Instantiating a Page from PageObject from class usage:: my_page_instance = PageFactory.create_page(MyPageClass) Instantiating a Page from an Interface or base class:: import pages.mysite.* # Make sure you import classes first, or else PageFactory will not know about it. my_page_instance = PageFactory.create_page(MyPageInterfaceClass) Instantiating a Page from a list of classes.:: my_page_instance = PageFactory.create_page([PossiblePage1, PossiblePage2]) Note: It'll only be able to detect pages that are imported. To it's best to do an import of all pages implementing a base class or the interface inside the __init__.py of the package directory. """ if not webdriver: webdriver = WTF_WEBDRIVER_MANAGER.get_driver() # will be used later when tracking best matched page. current_matched_page = None # used to track if there is a valid page object within the set of PageObjects searched. was_validate_called = False # Walk through all classes if a list was passed. if type(page_object_class_or_interface) == list: subclasses = [] for page_class in page_object_class_or_interface: # attempt to instantiate class. page = PageFactory.__instantiate_page_object(page_class, webdriver, **kwargs) if isinstance(page, PageObject): was_validate_called = True if (current_matched_page == None or page > current_matched_page): current_matched_page = page elif page is True: was_validate_called = True # check for subclasses subclasses += PageFactory.__itersubclasses(page_class) else: # A single class was passed in, try to instantiate the class. page_class = page_object_class_or_interface page = PageFactory.__instantiate_page_object(page_class, webdriver, **kwargs) # Check if we got a valid PageObject back. if isinstance(page, PageObject): was_validate_called = True current_matched_page = page elif page is True: was_validate_called = True # check for subclasses subclasses = PageFactory.__itersubclasses( page_object_class_or_interface) # Iterate over subclasses of the passed in classes to see if we have a # better match. for pageClass in subclasses: try: page = PageFactory.__instantiate_page_object(pageClass, webdriver, **kwargs) # If we get a valid PageObject match, check to see if the ranking is higher # than our current PageObject. if isinstance(page, PageObject): was_validate_called = True if current_matched_page == None or page > current_matched_page: current_matched_page = page elif page is True: was_validate_called = True except InvalidPageError as e: _wtflog.debug("InvalidPageError: %s", e) pass # This happens when the page fails check. except TypeError as e: _wtflog.debug("TypeError: %s", e) # this happens when it tries to instantiate the original # abstract class. pass except Exception as e: _wtflog.debug("Exception during page instantiation: %s", e) # Unexpected exception. raise e # If no matching classes. if not isinstance(current_matched_page, PageObject): # Check that there is at least 1 valid page object that was passed in. if was_validate_called is False: raise TypeError("Neither the PageObjects nor it's subclasses have implemented " + "'PageObject._validate(self, webdriver)'.") try: current_url = webdriver.current_url raise NoMatchingPageError(u("There's, no matching classes to this page. URL:{0}") .format(current_url)) except: raise NoMatchingPageError(u("There's, no matching classes to this page. ")) else: return current_matched_page
[ "def", "create_page", "(", "page_object_class_or_interface", ",", "webdriver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "webdriver", ":", "webdriver", "=", "WTF_WEBDRIVER_MANAGER", ".", "get_driver", "(", ")", "# will be used later when tracking ...
Instantiate a page object from a given Interface or Abstract class. Args: page_object_class_or_interface (Class): PageObject class, AbstractBaseClass, or Interface to attempt to consturct. Kwargs: webdriver (WebDriver): Selenium Webdriver to use to instantiate the page. If none is provided, then it was use the default from WTF_WEBDRIVER_MANAGER Returns: PageObject Raises: NoMatchingPageError Instantiating a Page from PageObject from class usage:: my_page_instance = PageFactory.create_page(MyPageClass) Instantiating a Page from an Interface or base class:: import pages.mysite.* # Make sure you import classes first, or else PageFactory will not know about it. my_page_instance = PageFactory.create_page(MyPageInterfaceClass) Instantiating a Page from a list of classes.:: my_page_instance = PageFactory.create_page([PossiblePage1, PossiblePage2]) Note: It'll only be able to detect pages that are imported. To it's best to do an import of all pages implementing a base class or the interface inside the __init__.py of the package directory.
[ "Instantiate", "a", "page", "object", "from", "a", "given", "Interface", "or", "Abstract", "class", "." ]
python
train
xtream1101/cutil
cutil/database.py
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/database.py#L10-L20
def _check_values(in_values): """ Check if values need to be converted before they get mogrify'd """ out_values = [] for value in in_values: # if isinstance(value, (dict, list)): # out_values.append(json.dumps(value)) # else: out_values.append(value) return tuple(out_values)
[ "def", "_check_values", "(", "in_values", ")", ":", "out_values", "=", "[", "]", "for", "value", "in", "in_values", ":", "# if isinstance(value, (dict, list)):", "# out_values.append(json.dumps(value))", "# else:", "out_values", ".", "append", "(", "value", ")", "...
Check if values need to be converted before they get mogrify'd
[ "Check", "if", "values", "need", "to", "be", "converted", "before", "they", "get", "mogrify", "d" ]
python
train
Metatab/metapack
metapack/html.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L174-L242
def make_citation_dict(td): """ Update a citation dictionary by editing the Author field :param td: A BixTex format citation dict or a term :return: """ from datetime import datetime if isinstance(td, dict): d = td name = d['name_link'] else: d = td.as_dict() d['_term'] = td try: d['name_link'] = td.name except AttributeError: d['name_link'] = td['name_link'].value if 'author' in d and isinstance(d['author'], str): authors = [] for e in d['author'].split(';'): author_d = HumanName(e).as_dict(include_empty=False) if 'suffix' in author_d: author_d['lineage'] = author_d['suffix'] del author_d['suffix'] authors.append(author_d) d['author'] = authors if not 'type' in d: if '_term' in d: t = d['_term'] if t.term_is('Root.Reference') or t.term_is('Root.Resource'): d['type'] = 'dataset' elif t.term_is('Root.Citation'): d['type'] = 'article' else: d['type'] = 'article' if d['type'] == 'dataset': if not 'editor' in d: d['editor'] = [HumanName('Missing Editor').as_dict(include_empty=False)] if not 'accessdate' in d: d['accessdate'] = datetime.now().strftime('%Y-%m-%d') if not 'author' in d: d['author'] = [HumanName('Missing Author').as_dict(include_empty=False)] if not 'title' in d: d['title'] = d.get('description', '<Missing Title>') if not 'journal' in d: d['journal'] = '<Missing Journal>' if not 'year' in d: d['year'] = '<Missing Year>' if '_term' in d: del d['_term'] return d
[ "def", "make_citation_dict", "(", "td", ")", ":", "from", "datetime", "import", "datetime", "if", "isinstance", "(", "td", ",", "dict", ")", ":", "d", "=", "td", "name", "=", "d", "[", "'name_link'", "]", "else", ":", "d", "=", "td", ".", "as_dict", ...
Update a citation dictionary by editing the Author field :param td: A BixTex format citation dict or a term :return:
[ "Update", "a", "citation", "dictionary", "by", "editing", "the", "Author", "field", ":", "param", "td", ":", "A", "BixTex", "format", "citation", "dict", "or", "a", "term", ":", "return", ":" ]
python
train
senaite/senaite.core
bika/lims/browser/widgets/analysisprofileanalyseswidget.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/analysisprofileanalyseswidget.py#L190-L195
def folderitems(self): """TODO: Refactor to non-classic mode """ items = super(AnalysisProfileAnalysesView, self).folderitems() self.categories.sort() return items
[ "def", "folderitems", "(", "self", ")", ":", "items", "=", "super", "(", "AnalysisProfileAnalysesView", ",", "self", ")", ".", "folderitems", "(", ")", "self", ".", "categories", ".", "sort", "(", ")", "return", "items" ]
TODO: Refactor to non-classic mode
[ "TODO", ":", "Refactor", "to", "non", "-", "classic", "mode" ]
python
train
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1008-L1112
def interleave_data(self, prop): r""" Retrieves requested property from associated objects, to produce a full Np or Nt length array. Parameters ---------- prop : string The property name to be retrieved Returns ------- A full length (Np or Nt) array of requested property values. Notes ----- This makes an effort to maintain the data 'type' when possible; however when data are missing this can be tricky. Data can be missing in two different ways: A set of pores is not assisgned to a geometry or the network contains multiple geometries and data does not exist on all. Float and boolean data is fine, but missing ints are converted to float when nans are inserted. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[2, 2, 2]) >>> Ps = pn['pore.top'] >>> Ts = pn.find_neighbor_throats(pores=Ps) >>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts) >>> Ts = ~pn.tomask(throats=Ts) >>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts) >>> g1['pore.value'] = 1 >>> print(g1['pore.value']) [1 1 1 1] >>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2 [nan nan nan nan] >>> print(pn['pore.value']) [nan 1. nan 1. nan 1. nan 1.] >>> g2['pore.value'] = 20 >>> print(pn['pore.value']) [20 1 20 1 20 1 20 1] >>> pn['pore.label'] = False >>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1 [False False False False] """ element = self._parse_element(prop.split('.')[0], single=True) N = self.project.network._count(element) # Fetch sources list depending on object type? proj = self.project if self._isa() in ['network', 'geometry']: sources = list(proj.geometries().values()) elif self._isa() in ['phase', 'physics']: sources = list(proj.find_physics(phase=self)) elif self._isa() in ['algorithm', 'base']: sources = [self] else: raise Exception('Unrecognized object type, cannot find dependents') # Attempt to fetch the requested array from each object arrs = [item.get(prop, None) for item in sources] locs = [self._get_indices(element, item.name) for item in sources] sizes = [sp.size(a) for a in arrs] if sp.all([item is None for item in arrs]): # prop not found anywhere raise KeyError(prop) # Check the general type of each array atype = [] for a in arrs: if a is not None: t = a.dtype.name if t.startswith('int') or t.startswith('float'): atype.append('numeric') elif t.startswith('bool'): atype.append('boolean') else: atype.append('other') if not all([item == atype[0] for item in atype]): raise Exception('The array types are not compatible') else: dummy_val = {'numeric': sp.nan, 'boolean': False, 'other': None} # Create an empty array of the right type and shape for item in arrs: if item is not None: if len(item.shape) == 1: temp_arr = sp.zeros((N, ), dtype=item.dtype) else: temp_arr = sp.zeros((N, item.shape[1]), dtype=item.dtype) temp_arr.fill(dummy_val[atype[0]]) # Convert int arrays to float IF NaNs are expected if temp_arr.dtype.name.startswith('int') and \ (sp.any([i is None for i in arrs]) or sp.sum(sizes) != N): temp_arr = temp_arr.astype(float) temp_arr.fill(sp.nan) # Fill new array with values in the corresponding locations for vals, inds in zip(arrs, locs): if vals is not None: temp_arr[inds] = vals else: temp_arr[inds] = dummy_val[atype[0]] return temp_arr
[ "def", "interleave_data", "(", "self", ",", "prop", ")", ":", "element", "=", "self", ".", "_parse_element", "(", "prop", ".", "split", "(", "'.'", ")", "[", "0", "]", ",", "single", "=", "True", ")", "N", "=", "self", ".", "project", ".", "network...
r""" Retrieves requested property from associated objects, to produce a full Np or Nt length array. Parameters ---------- prop : string The property name to be retrieved Returns ------- A full length (Np or Nt) array of requested property values. Notes ----- This makes an effort to maintain the data 'type' when possible; however when data are missing this can be tricky. Data can be missing in two different ways: A set of pores is not assisgned to a geometry or the network contains multiple geometries and data does not exist on all. Float and boolean data is fine, but missing ints are converted to float when nans are inserted. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[2, 2, 2]) >>> Ps = pn['pore.top'] >>> Ts = pn.find_neighbor_throats(pores=Ps) >>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts) >>> Ts = ~pn.tomask(throats=Ts) >>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts) >>> g1['pore.value'] = 1 >>> print(g1['pore.value']) [1 1 1 1] >>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2 [nan nan nan nan] >>> print(pn['pore.value']) [nan 1. nan 1. nan 1. nan 1.] >>> g2['pore.value'] = 20 >>> print(pn['pore.value']) [20 1 20 1 20 1 20 1] >>> pn['pore.label'] = False >>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1 [False False False False]
[ "r", "Retrieves", "requested", "property", "from", "associated", "objects", "to", "produce", "a", "full", "Np", "or", "Nt", "length", "array", "." ]
python
train
jazzband/sorl-thumbnail
sorl/thumbnail/engines/wand_engine.py
https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/wand_engine.py#L18-L28
def is_valid_image(self, raw_data): ''' Wand library makes sure when opening any image that is fine, when the image is corrupted raises an exception. ''' try: Image(blob=raw_data) return True except (exceptions.CorruptImageError, exceptions.MissingDelegateError): return False
[ "def", "is_valid_image", "(", "self", ",", "raw_data", ")", ":", "try", ":", "Image", "(", "blob", "=", "raw_data", ")", "return", "True", "except", "(", "exceptions", ".", "CorruptImageError", ",", "exceptions", ".", "MissingDelegateError", ")", ":", "retur...
Wand library makes sure when opening any image that is fine, when the image is corrupted raises an exception.
[ "Wand", "library", "makes", "sure", "when", "opening", "any", "image", "that", "is", "fine", "when", "the", "image", "is", "corrupted", "raises", "an", "exception", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1555-L1559
def help_center_article_translations(self, article_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/translations#list-translations" api_path = "/api/v2/help_center/articles/{article_id}/translations.json" api_path = api_path.format(article_id=article_id) return self.call(api_path, **kwargs)
[ "def", "help_center_article_translations", "(", "self", ",", "article_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/help_center/articles/{article_id}/translations.json\"", "api_path", "=", "api_path", ".", "format", "(", "article_id", "=", "article...
https://developer.zendesk.com/rest_api/docs/help_center/translations#list-translations
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "translations#list", "-", "translations" ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_clean_visible.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_visible.py#L252-L278
def make_clean_visible_from_raw(_html, tag_replacement_char=' '): '''Takes an HTML-like Unicode (or UTF-8 encoded) string as input and returns a Unicode string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This *does* detect comments, style, script, link tags and replaces them with whitespace. This is subtle because these tags can be self-closing or not. It does do anything with HTML-escaped characters. Pre-existing whitespace of any kind *except* newlines (\n) and linefeeds (\r\n) is converted to single spaces ' ', which has the same byte length (and character length). Newlines and linefeeds are left unchanged. This is a simple state machine iterator without regexes ''' if not isinstance(_html, unicode): _html = unicode(_html, 'utf-8') #Strip tags with logic above non_tag = ''.join(non_tag_chars_from_raw(_html)) return non_tag.encode('utf-8')
[ "def", "make_clean_visible_from_raw", "(", "_html", ",", "tag_replacement_char", "=", "' '", ")", ":", "if", "not", "isinstance", "(", "_html", ",", "unicode", ")", ":", "_html", "=", "unicode", "(", "_html", ",", "'utf-8'", ")", "#Strip tags with logic above", ...
Takes an HTML-like Unicode (or UTF-8 encoded) string as input and returns a Unicode string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This *does* detect comments, style, script, link tags and replaces them with whitespace. This is subtle because these tags can be self-closing or not. It does do anything with HTML-escaped characters. Pre-existing whitespace of any kind *except* newlines (\n) and linefeeds (\r\n) is converted to single spaces ' ', which has the same byte length (and character length). Newlines and linefeeds are left unchanged. This is a simple state machine iterator without regexes
[ "Takes", "an", "HTML", "-", "like", "Unicode", "(", "or", "UTF", "-", "8", "encoded", ")", "string", "as", "input", "and", "returns", "a", "Unicode", "string", "with", "all", "tags", "replaced", "by", "whitespace", ".", "In", "particular", "all", "Unicod...
python
test
CalebBell/fluids
fluids/nrlmsise00/nrlmsise_00.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/nrlmsise00/nrlmsise_00.py#L487-L586
def densu(alt, dlb, tinf, tlb, xm, alpha, tz, zlb, s2, mn1, zn1, tn1, tgn1): ''' /* Calculate Temperature and Density Profiles for MSIS models * New lower thermo polynomial */ tz, zn1, tn1, and tgn1 are simulated pointers ''' rgas = 831.4 #rgas = 831.44621 #maybe make this a global constant? densu_temp = 1.0 xs = [0.0]*5 ys = [0.0]*5 y2out = [0.0]*5 #/* joining altitudes of Bates and spline */ za=zn1[0]; if (alt>za): z=alt; else: z=za; #/* geopotential altitude difference from ZLB */ zg2 = zeta(z, zlb); #/* Bates temperature */ tt = tinf - (tinf - tlb) * exp(-s2*zg2); ta = tt; tz[0] = tt densu_temp = tz[0] if (alt<za): #/* calculate temperature below ZA # * temperature gradient at ZA from Bates profile */ dta = (tinf - ta) * s2 * pow(((re[0]+zlb)/(re[0]+za)),2.0); tgn1[0]=dta; tn1[0]=ta; if (alt>zn1[mn1-1]): z=alt; else: z=zn1[mn1-1]; mn=mn1; z1=zn1[0]; z2=zn1[mn-1]; t1=tn1[0]; t2=tn1[mn-1]; #/* geopotental difference from z1 */ zg = zeta (z, z1); zgdif = zeta(z2, z1); #/* set up spline nodes */ for k in range(mn): xs[k] = zeta(zn1[k], z1) / zgdif; ys[k] = 1.0 / tn1[k]; #/* end node derivatives */ yd1 = -tgn1[0] / (t1*t1) * zgdif; yd2 = -tgn1[1] / (t2*t2) * zgdif * pow(((re[0]+z2)/(re[0]+z1)),2.0); #/* calculate spline coefficients */ spline (xs, ys, mn, yd1, yd2, y2out); x = zg / zgdif; y = [0.0] splint (xs, ys, y2out, mn, x, y); #/* temperature at altitude */ tz[0] = 1.0 / y[0]; densu_temp = tz[0]; if (xm==0): return densu_temp; #/* calculate density above za */ glb = gsurf[0] / pow((1.0 + zlb/re[0]),2.0); gamma = xm * glb / (s2 * rgas * tinf); expl = exp(-s2 * gamma * zg2); if (expl>50.0): # pragma: no cover expl=50.0; if (tt<=0): # pragma: no cover expl=50.0; #/* density at altitude */ densa = dlb * pow((tlb/tt),((1.0+alpha+gamma))) * expl; densu_temp=densa; if (alt>=za): return densu_temp; #/* calculate density below za */ glb = gsurf[0] / pow((1.0 + z1/re[0]),2.0); gamm = xm * glb * zgdif / rgas; #/* integrate spline temperatures */ yi = [0] splini (xs, ys, y2out, mn, x, yi); expl = gamm * yi[0]; if (expl>50.0): # pragma: no cover expl=50.0; if (tz[0]<=0): # pragma: no cover expl=50.0; #/* density at altitude */ densu_temp = densu_temp * pow ((t1 / tz[0]),(1.0 + alpha)) * exp(-expl); return densu_temp;
[ "def", "densu", "(", "alt", ",", "dlb", ",", "tinf", ",", "tlb", ",", "xm", ",", "alpha", ",", "tz", ",", "zlb", ",", "s2", ",", "mn1", ",", "zn1", ",", "tn1", ",", "tgn1", ")", ":", "rgas", "=", "831.4", "#rgas = 831.44621 #maybe make this a glob...
/* Calculate Temperature and Density Profiles for MSIS models * New lower thermo polynomial */ tz, zn1, tn1, and tgn1 are simulated pointers
[ "/", "*", "Calculate", "Temperature", "and", "Density", "Profiles", "for", "MSIS", "models", "*", "New", "lower", "thermo", "polynomial", "*", "/", "tz", "zn1", "tn1", "and", "tgn1", "are", "simulated", "pointers" ]
python
train
LLNL/certipy
certipy/certipy.py
https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L203-L212
def to_record(self): """Create a CertStore record from this TLSFileBundle""" tf_list = [getattr(self, k, None) for k in [_.value for _ in TLSFileType]] # If a cert isn't defined in this bundle, remove it tf_list = filter(lambda x: x, tf_list) files = {tf.file_type.value: tf.file_path for tf in tf_list} self.record['files'] = files return self.record
[ "def", "to_record", "(", "self", ")", ":", "tf_list", "=", "[", "getattr", "(", "self", ",", "k", ",", "None", ")", "for", "k", "in", "[", "_", ".", "value", "for", "_", "in", "TLSFileType", "]", "]", "# If a cert isn't defined in this bundle, remove it", ...
Create a CertStore record from this TLSFileBundle
[ "Create", "a", "CertStore", "record", "from", "this", "TLSFileBundle" ]
python
train
obsrvbl/flowlogs-reader
flowlogs_reader/__main__.py
https://github.com/obsrvbl/flowlogs-reader/blob/248d8cb3cc586859b6744d30cebce0f359d9900c/flowlogs_reader/__main__.py#L50-L60
def action_ipset(reader, *args): """Show the set of IPs seen in Flow Log records.""" ip_set = set() for record in reader: if record.log_status in (SKIPDATA, NODATA): continue ip_set.add(record.srcaddr) ip_set.add(record.dstaddr) for ip in ip_set: print(ip)
[ "def", "action_ipset", "(", "reader", ",", "*", "args", ")", ":", "ip_set", "=", "set", "(", ")", "for", "record", "in", "reader", ":", "if", "record", ".", "log_status", "in", "(", "SKIPDATA", ",", "NODATA", ")", ":", "continue", "ip_set", ".", "add...
Show the set of IPs seen in Flow Log records.
[ "Show", "the", "set", "of", "IPs", "seen", "in", "Flow", "Log", "records", "." ]
python
train
mjirik/imcut
imcut/graph.py
https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/graph.py#L586-L636
def gen_grid_2d(shape, voxelsize): """ Generate list of edges for a base grid. """ nr, nc = shape nrm1, ncm1 = nr - 1, nc - 1 # sh = nm.asarray(shape) # calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncols) nedges = 0 for direction in range(len(shape)): sh = copy.copy(list(shape)) sh[direction] += -1 nedges += nm.prod(sh) nedges_old = ncm1 * nr + nrm1 * nc edges = nm.zeros((nedges, 2), dtype=nm.int16) edge_dir = nm.zeros((ncm1 * nr + nrm1 * nc,), dtype=nm.bool) nodes = nm.zeros((nm.prod(shape), 3), dtype=nm.float32) # edges idx = 0 row = nm.zeros((ncm1, 2), dtype=nm.int16) row[:, 0] = nm.arange(ncm1) row[:, 1] = nm.arange(ncm1) + 1 for ii in range(nr): edges[slice(idx, idx + ncm1), :] = row + nc * ii idx += ncm1 edge_dir[slice(0, idx)] = 0 # horizontal dir idx0 = idx col = nm.zeros((nrm1, 2), dtype=nm.int16) col[:, 0] = nm.arange(nrm1) * nc col[:, 1] = nm.arange(nrm1) * nc + nc for ii in range(nc): edges[slice(idx, idx + nrm1), :] = col + ii idx += nrm1 edge_dir[slice(idx0, idx)] = 1 # vertical dir # nodes idx = 0 row = nm.zeros((nc, 3), dtype=nm.float32) row[:, 0] = voxelsize[0] * (nm.arange(nc) + 0.5) row[:, 1] = voxelsize[1] * 0.5 for ii in range(nr): nodes[slice(idx, idx + nc), :] = row row[:, 1] += voxelsize[1] idx += nc return nodes, edges, edge_dir
[ "def", "gen_grid_2d", "(", "shape", ",", "voxelsize", ")", ":", "nr", ",", "nc", "=", "shape", "nrm1", ",", "ncm1", "=", "nr", "-", "1", ",", "nc", "-", "1", "# sh = nm.asarray(shape)", "# calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncol...
Generate list of edges for a base grid.
[ "Generate", "list", "of", "edges", "for", "a", "base", "grid", "." ]
python
train
SheffieldML/GPy
GPy/examples/regression.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/examples/regression.py#L404-L420
def silhouette(max_iters=100, optimize=True, plot=True): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" try:import pods except ImportError: print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.silhouette() # create simple GP Model m = GPy.models.GPRegression(data['X'], data['Y']) # optimize if optimize: m.optimize(messages=True, max_iters=max_iters) print(m) return m
[ "def", "silhouette", "(", "max_iters", "=", "100", ",", "optimize", "=", "True", ",", "plot", "=", "True", ")", ":", "try", ":", "import", "pods", "except", "ImportError", ":", "print", "(", "'pods unavailable, see https://github.com/sods/ods for example datasets'",...
Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.
[ "Predict", "the", "pose", "of", "a", "figure", "given", "a", "silhouette", ".", "This", "is", "a", "task", "from", "Agarwal", "and", "Triggs", "2004", "ICML", "paper", "." ]
python
train
Ex-Mente/auxi.0
auxi/modelling/business/structure.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/business/structure.py#L185-L197
def remove_component(self, name): """ Remove a sub component from the component. :param name: The name of the component to remove. """ component_to_remove = None for c in self.components: if c.name == name: component_to_remove = c if component_to_remove is not None: self.components.remove(component_to_remove)
[ "def", "remove_component", "(", "self", ",", "name", ")", ":", "component_to_remove", "=", "None", "for", "c", "in", "self", ".", "components", ":", "if", "c", ".", "name", "==", "name", ":", "component_to_remove", "=", "c", "if", "component_to_remove", "i...
Remove a sub component from the component. :param name: The name of the component to remove.
[ "Remove", "a", "sub", "component", "from", "the", "component", "." ]
python
valid
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L451-L476
def log(level, msg, *args, **kwargs): """Logs 'msg % args' at absl logging level 'level'. If no args are given just print msg, ignoring any interpolation specifiers. Args: level: int, the absl logging level at which to log the message (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging level constants are also supported, callers should prefer explicit logging.vlog() calls for such purpose. msg: str, the message to be logged. *args: The args to be substitued into the msg. **kwargs: May contain exc_info to add exception traceback to message. """ if level > converter.ABSL_DEBUG: # Even though this function supports level that is greater than 1, users # should use logging.vlog instead for such cases. # Treat this as vlog, 1 is equivalent to DEBUG. standard_level = converter.STANDARD_DEBUG - (level - 1) else: if level < converter.ABSL_FATAL: level = converter.ABSL_FATAL standard_level = converter.absl_to_standard(level) _absl_logger.log(standard_level, msg, *args, **kwargs)
[ "def", "log", "(", "level", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "level", ">", "converter", ".", "ABSL_DEBUG", ":", "# Even though this function supports level that is greater than 1, users", "# should use logging.vlog instead for such ...
Logs 'msg % args' at absl logging level 'level'. If no args are given just print msg, ignoring any interpolation specifiers. Args: level: int, the absl logging level at which to log the message (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging level constants are also supported, callers should prefer explicit logging.vlog() calls for such purpose. msg: str, the message to be logged. *args: The args to be substitued into the msg. **kwargs: May contain exc_info to add exception traceback to message.
[ "Logs", "msg", "%", "args", "at", "absl", "logging", "level", "level", "." ]
python
train
mitsei/dlkit
dlkit/json_/type/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/type/objects.py#L193-L203
def clear_description(self): """Clears the description. raise: NoAccess - ``description`` cannot be modified *compliance: mandatory -- This method must be implemented.* """ if (self.get_domain_metadata().is_read_only() or self.get_domain_metadata().is_required()): raise errors.NoAccess() self._my_map['domain'] = self._domain_metadata['default_string_values'][0]
[ "def", "clear_description", "(", "self", ")", ":", "if", "(", "self", ".", "get_domain_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_domain_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".",...
Clears the description. raise: NoAccess - ``description`` cannot be modified *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "description", "." ]
python
train
mfussenegger/cr8
cr8/java_magic.py
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/java_magic.py#L57-L65
def find_java_home(cratedb_version: tuple) -> str: """ Return a path to a JAVA_HOME suites for the given CrateDB version """ if MIN_VERSION_FOR_JVM11 <= cratedb_version < (4, 0): # Supports 8 to 11+, use whatever is set return os.environ.get('JAVA_HOME', '') if cratedb_version < MIN_VERSION_FOR_JVM11: return _find_matching_java_home(lambda ver: ver[0] == 8) else: return _find_matching_java_home(lambda ver: ver[0] >= 11)
[ "def", "find_java_home", "(", "cratedb_version", ":", "tuple", ")", "->", "str", ":", "if", "MIN_VERSION_FOR_JVM11", "<=", "cratedb_version", "<", "(", "4", ",", "0", ")", ":", "# Supports 8 to 11+, use whatever is set", "return", "os", ".", "environ", ".", "get...
Return a path to a JAVA_HOME suites for the given CrateDB version
[ "Return", "a", "path", "to", "a", "JAVA_HOME", "suites", "for", "the", "given", "CrateDB", "version" ]
python
train
klahnakoski/pyLibrary
pyLibrary/env/big_data.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/pyLibrary/env/big_data.py#L390-L408
def scompressed2ibytes(stream): """ :param stream: SOMETHING WITH read() METHOD TO GET MORE BYTES :return: GENERATOR OF UNCOMPRESSED BYTES """ def more(): try: while True: bytes_ = stream.read(4096) if not bytes_: return yield bytes_ except Exception as e: Log.error("Problem iterating through stream", cause=e) finally: with suppress_exception: stream.close() return icompressed2ibytes(more())
[ "def", "scompressed2ibytes", "(", "stream", ")", ":", "def", "more", "(", ")", ":", "try", ":", "while", "True", ":", "bytes_", "=", "stream", ".", "read", "(", "4096", ")", "if", "not", "bytes_", ":", "return", "yield", "bytes_", "except", "Exception"...
:param stream: SOMETHING WITH read() METHOD TO GET MORE BYTES :return: GENERATOR OF UNCOMPRESSED BYTES
[ ":", "param", "stream", ":", "SOMETHING", "WITH", "read", "()", "METHOD", "TO", "GET", "MORE", "BYTES", ":", "return", ":", "GENERATOR", "OF", "UNCOMPRESSED", "BYTES" ]
python
train
merll/docker-map
dockermap/map/client.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/client.py#L276-L292
def restart(self, container, instances=None, map_name=None, **kwargs): """ Restarts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will restart all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container restart. :return: Return values of restarted containers. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs)
[ "def", "restart", "(", "self", ",", "container", ",", "instances", "=", "None", ",", "map_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "run_actions", "(", "'restart'", ",", "container", ",", "instances", "=", "instances",...
Restarts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will restart all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container restart. :return: Return values of restarted containers. :rtype: list[dockermap.map.runner.ActionOutput]
[ "Restarts", "instances", "for", "a", "container", "configuration", "." ]
python
train
slightlynybbled/tk_tools
tk_tools/groups.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L840-L853
def add(self, string: (str, list)): """ Add a new slot to the multi-frame containing the string. :param string: a string to insert :return: None """ slot = _SlotFrame(self, remove_callback=self._redraw, entries=self._slot_columns) slot.add(string) self._slots.append(slot) self._redraw()
[ "def", "add", "(", "self", ",", "string", ":", "(", "str", ",", "list", ")", ")", ":", "slot", "=", "_SlotFrame", "(", "self", ",", "remove_callback", "=", "self", ".", "_redraw", ",", "entries", "=", "self", ".", "_slot_columns", ")", "slot", ".", ...
Add a new slot to the multi-frame containing the string. :param string: a string to insert :return: None
[ "Add", "a", "new", "slot", "to", "the", "multi", "-", "frame", "containing", "the", "string", ".", ":", "param", "string", ":", "a", "string", "to", "insert", ":", "return", ":", "None" ]
python
train
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L1503-L1585
def merge(name, assemblies): """ Creates and returns a new Assembly object in which samples from two or more Assembly objects with matching names are 'merged'. Merging does not affect the actual files written on disk, but rather creates new Samples that are linked to multiple data files, and with stats summed. """ ## checks assemblies = list(assemblies) ## create new Assembly as a branch (deepcopy) merged = assemblies[0].branch(name) ## get all sample names from all Assemblies allsamples = set(merged.samples.keys()) for iterass in assemblies[1:]: allsamples.update(set(iterass.samples.keys())) ## Make sure we have the max of all values for max frag length ## from all merging assemblies. merged._hackersonly["max_fragment_length"] =\ max([x._hackersonly["max_fragment_length"] for x in assemblies]) ## warning message? warning = 0 ## iterate over assembly objects, skip first already copied for iterass in assemblies[1:]: ## iterate over allsamples, add if not in merged for sample in iterass.samples: ## iterate over stats, skip 'state' if sample not in merged.samples: merged.samples[sample] = copy.deepcopy(iterass.samples[sample]) ## if barcodes data present then keep it if iterass.barcodes.get(sample): merged.barcodes[sample] = iterass.barcodes[sample] else: ## merge stats and files of the sample for stat in merged.stats.keys()[1:]: merged.samples[sample].stats[stat] += \ iterass.samples[sample].stats[stat] ## merge file references into a list for filetype in ['fastqs', 'edits']: merged.samples[sample].files[filetype] += \ iterass.samples[sample].files[filetype] if iterass.samples[sample].files["clusters"]: warning += 1 ## print warning if clusters or later was present in merged assembly if warning: print("""\ Warning: the merged Assemblies contained Samples that are identically named, and so ipyrad has attempted to merge these Samples. This is perfectly fine to do up until step 3, but not after, because at step 3 all reads for a Sample should be included during clustering/mapping. Take note, you can merge Assemblies at any step *if they do not contain the same Samples*, however, here that is not the case. If you wish to proceed with this merged Assembly you will have to start from step 3, therefore the 'state' of the Samples in this new merged Assembly ({}) have been set to 2. """.format(name)) for sample in merged.samples: merged.samples[sample].stats.state = 2 ## clear stats for stat in ["refseq_mapped_reads", "refseq_unmapped_reads", "clusters_total", "clusters_hidepth", "hetero_est", "error_est", "reads_consens"]: merged.samples[sample].stats[stat] = 0 ## clear files for ftype in ["mapped_reads", "unmapped_reads", "clusters", "consens", "database"]: merged.samples[sample].files[ftype] = [] ## Set the values for some params that don't make sense inside ## merged assemblies merged_names = ", ".join([x.name for x in assemblies]) merged.paramsdict["raw_fastq_path"] = "Merged: " + merged_names merged.paramsdict["barcodes_path"] = "Merged: " + merged_names merged.paramsdict["sorted_fastq_path"] = "Merged: " + merged_names ## return the new Assembly object merged.save() return merged
[ "def", "merge", "(", "name", ",", "assemblies", ")", ":", "## checks", "assemblies", "=", "list", "(", "assemblies", ")", "## create new Assembly as a branch (deepcopy)", "merged", "=", "assemblies", "[", "0", "]", ".", "branch", "(", "name", ")", "## get all sa...
Creates and returns a new Assembly object in which samples from two or more Assembly objects with matching names are 'merged'. Merging does not affect the actual files written on disk, but rather creates new Samples that are linked to multiple data files, and with stats summed.
[ "Creates", "and", "returns", "a", "new", "Assembly", "object", "in", "which", "samples", "from", "two", "or", "more", "Assembly", "objects", "with", "matching", "names", "are", "merged", ".", "Merging", "does", "not", "affect", "the", "actual", "files", "wri...
python
valid
Neurita/boyle
boyle/dicom/comparison.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L542-L558
def move_to_folder(self, folder_path, groupby_field_name=None): """Copy the file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. If empty or None will use the basename of the group key file. """ try: copy_groups_to_folder(self.dicom_groups, folder_path, groupby_field_name) except IOError as ioe: raise IOError('Error moving dicom groups to {}.'.format(folder_path)) from ioe
[ "def", "move_to_folder", "(", "self", ",", "folder_path", ",", "groupby_field_name", "=", "None", ")", ":", "try", ":", "copy_groups_to_folder", "(", "self", ".", "dicom_groups", ",", "folder_path", ",", "groupby_field_name", ")", "except", "IOError", "as", "ioe...
Copy the file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. If empty or None will use the basename of the group key file.
[ "Copy", "the", "file", "groups", "to", "folder_path", ".", "Each", "group", "will", "be", "copied", "into", "a", "subfolder", "with", "named", "given", "by", "groupby_field", "." ]
python
valid
kejbaly2/members
members/orgchart3.py
https://github.com/kejbaly2/members/blob/28e70a25cceade514c550e3ce9963f73167e8572/members/orgchart3.py#L83-L116
def extract(uid=None, base_url=None, use_default_email_domain=False, default_email_domain=None, user=None, password=None): ''' FIXME: DOCS... ''' assert base_url and isinstance(base_url, (unicode, str)) export_url = os.path.join(base_url, 'export_csv') # ded is shortform for "default email domain" # this will convert uid's to uid@default_email_domain.com use_ded = use_default_email_domain ded = default_email_domain get_args = GET_ARGS.copy() get_args['uid'] = uid # pass get args as-is; requests handles them in dict form automatically csv_path = download(export_url, user=user, password=password, get_args=get_args) members_df = pd.read_csv(csv_path) users = list(members_df['Kerberos'].unique()) # the team lead should be included as a member of their own team users.append(uid) if use_ded: if not ded: raise RuntimeError("No default email domain set!") users = ['@'.join((u, ded)) for u in users] logr.info('{} members FOUND'.format(len(users))) return sorted(users)
[ "def", "extract", "(", "uid", "=", "None", ",", "base_url", "=", "None", ",", "use_default_email_domain", "=", "False", ",", "default_email_domain", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "assert", "base_url", "and",...
FIXME: DOCS...
[ "FIXME", ":", "DOCS", "..." ]
python
train
rbarrois/xworkflows
src/xworkflows/base.py
https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L734-L745
def get_custom_implementations(self): """Retrieve a list of cutom implementations. Yields: (str, str, ImplementationProperty) tuples: The name of the attribute an implementation lives at, the name of the related transition, and the related implementation. """ for trname in self.custom_implems: attr = self.transitions_at[trname] implem = self.implementations[trname] yield (trname, attr, implem)
[ "def", "get_custom_implementations", "(", "self", ")", ":", "for", "trname", "in", "self", ".", "custom_implems", ":", "attr", "=", "self", ".", "transitions_at", "[", "trname", "]", "implem", "=", "self", ".", "implementations", "[", "trname", "]", "yield",...
Retrieve a list of cutom implementations. Yields: (str, str, ImplementationProperty) tuples: The name of the attribute an implementation lives at, the name of the related transition, and the related implementation.
[ "Retrieve", "a", "list", "of", "cutom", "implementations", "." ]
python
train
google/apitools
apitools/base/py/credentials_lib.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L477-L489
def _refresh(self, _): """Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature. """ # pylint: disable=import-error from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
[ "def", "_refresh", "(", "self", ",", "_", ")", ":", "# pylint: disable=import-error", "from", "google", ".", "appengine", ".", "api", "import", "app_identity", "try", ":", "token", ",", "_", "=", "app_identity", ".", "get_access_token", "(", "self", ".", "_s...
Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature.
[ "Refresh", "self", ".", "access_token", "." ]
python
train
python-rope/rope
rope/base/pyscopes.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pyscopes.py#L22-L26
def get_name(self, name): """Return name `PyName` defined in this scope""" if name not in self.get_names(): raise exceptions.NameNotFoundError('name %s not found' % name) return self.get_names()[name]
[ "def", "get_name", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "get_names", "(", ")", ":", "raise", "exceptions", ".", "NameNotFoundError", "(", "'name %s not found'", "%", "name", ")", "return", "self", ".", "get_names", ...
Return name `PyName` defined in this scope
[ "Return", "name", "PyName", "defined", "in", "this", "scope" ]
python
train
zetaops/zengine
zengine/messaging/model.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/model.py#L145-L151
def delete_exchange(self): """ Deletes MQ exchange for this channel Needs to be defined only once. """ mq_channel = self._connect_mq() mq_channel.exchange_delete(exchange=self.code_name)
[ "def", "delete_exchange", "(", "self", ")", ":", "mq_channel", "=", "self", ".", "_connect_mq", "(", ")", "mq_channel", ".", "exchange_delete", "(", "exchange", "=", "self", ".", "code_name", ")" ]
Deletes MQ exchange for this channel Needs to be defined only once.
[ "Deletes", "MQ", "exchange", "for", "this", "channel", "Needs", "to", "be", "defined", "only", "once", "." ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L4248-L4262
def team_2_json(self): """ transform ariane_clip3 team object to Ariane server JSON obj :return: Ariane JSON obj """ LOGGER.debug("Team.team_2_json") json_obj = { 'teamID': self.id, 'teamName': self.name, 'teamDescription': self.description, 'teamColorCode': self.color_code, 'teamOSInstancesID': self.osi_ids, 'teamApplicationsID': self.app_ids } return json.dumps(json_obj)
[ "def", "team_2_json", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"Team.team_2_json\"", ")", "json_obj", "=", "{", "'teamID'", ":", "self", ".", "id", ",", "'teamName'", ":", "self", ".", "name", ",", "'teamDescription'", ":", "self", ".", "des...
transform ariane_clip3 team object to Ariane server JSON obj :return: Ariane JSON obj
[ "transform", "ariane_clip3", "team", "object", "to", "Ariane", "server", "JSON", "obj", ":", "return", ":", "Ariane", "JSON", "obj" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L13801-L13820
def set_storage_controller_bootable(self, name, bootable): """Sets the bootable flag of the storage controller with the given name. in name of type str in bootable of type bool raises :class:`VBoxErrorObjectNotFound` A storage controller with given name doesn't exist. raises :class:`VBoxErrorObjectInUse` Another storage controller is marked as bootable already. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(bootable, bool): raise TypeError("bootable can only be an instance of type bool") self._call("setStorageControllerBootable", in_p=[name, bootable])
[ "def", "set_storage_controller_bootable", "(", "self", ",", "name", ",", "bootable", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "if", "not", ...
Sets the bootable flag of the storage controller with the given name. in name of type str in bootable of type bool raises :class:`VBoxErrorObjectNotFound` A storage controller with given name doesn't exist. raises :class:`VBoxErrorObjectInUse` Another storage controller is marked as bootable already.
[ "Sets", "the", "bootable", "flag", "of", "the", "storage", "controller", "with", "the", "given", "name", "." ]
python
train
slightlynybbled/tk_tools
tk_tools/groups.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L109-L132
def add_row(self, data: list): """ Add a row of data to the current widget :param data: a row of data :return: None """ # validation if self.headers: if len(self.headers) != len(data): raise ValueError if len(data) != self.num_of_columns: raise ValueError offset = 0 if not self.headers else 1 row = list() for i, element in enumerate(data): label = ttk.Label(self, text=str(element), relief=tk.GROOVE, padding=self.padding) label.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(label) self._rows.append(row)
[ "def", "add_row", "(", "self", ",", "data", ":", "list", ")", ":", "# validation", "if", "self", ".", "headers", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "if", "len", "(", "data"...
Add a row of data to the current widget :param data: a row of data :return: None
[ "Add", "a", "row", "of", "data", "to", "the", "current", "widget" ]
python
train
argaen/aiocache
aiocache/base.py
https://github.com/argaen/aiocache/blob/fdd282f37283ca04e22209f4d2ae4900f29e1688/aiocache/base.py#L278-L308
async def multi_set(self, pairs, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None): """ Stores multiple values in the given keys. :param pairs: list of two element iterables. First is key and second is value :param ttl: int the expiration time in seconds. Due to memcached restrictions if you want compatibility use int. In case you need miliseconds, redis and memory support float ttls :param dumps_fn: callable alternative to use as dumps function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: True :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() dumps = dumps_fn or self._serializer.dumps tmp_pairs = [] for key, value in pairs: tmp_pairs.append((self.build_key(key, namespace=namespace), dumps(value))) await self._multi_set(tmp_pairs, ttl=self._get_ttl(ttl), _conn=_conn) logger.debug( "MULTI_SET %s %d (%.4f)s", [key for key, value in tmp_pairs], len(pairs), time.monotonic() - start, ) return True
[ "async", "def", "multi_set", "(", "self", ",", "pairs", ",", "ttl", "=", "SENTINEL", ",", "dumps_fn", "=", "None", ",", "namespace", "=", "None", ",", "_conn", "=", "None", ")", ":", "start", "=", "time", ".", "monotonic", "(", ")", "dumps", "=", "...
Stores multiple values in the given keys. :param pairs: list of two element iterables. First is key and second is value :param ttl: int the expiration time in seconds. Due to memcached restrictions if you want compatibility use int. In case you need miliseconds, redis and memory support float ttls :param dumps_fn: callable alternative to use as dumps function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: True :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
[ "Stores", "multiple", "values", "in", "the", "given", "keys", "." ]
python
train
NoneGG/aredis
aredis/commands/sorted_set.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/sorted_set.py#L81-L110
async def zaddoption(self, name, option=None, *args, **kwargs): """ Differs from zadd in that you can set either 'XX' or 'NX' option as described here: https://redis.io/commands/zadd. Only for Redis 3.0.2 or later. The following example would add four values to the 'my-key' key: redis.zaddoption('my-key', 'XX', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4) redis.zaddoption('my-key', 'NX CH', name1=2.2) """ if not option: raise RedisError("ZADDOPTION must take options") options = set(opt.upper() for opt in option.split()) if options - VALID_ZADD_OPTIONS: raise RedisError("ZADD only takes XX, NX, CH, or INCR") if 'NX' in options and 'XX' in options: raise RedisError("ZADD only takes one of XX or NX") pieces = list(options) members = [] if args: if len(args) % 2 != 0: raise RedisError("ZADD requires an equal number of " "values and scores") members.extend(args) for pair in iteritems(kwargs): members.append(pair[1]) members.append(pair[0]) if 'INCR' in options and len(members) != 2: raise RedisError("ZADD with INCR only takes one score-name pair") return await self.execute_command('ZADD', name, *pieces, *members)
[ "async", "def", "zaddoption", "(", "self", ",", "name", ",", "option", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "option", ":", "raise", "RedisError", "(", "\"ZADDOPTION must take options\"", ")", "options", "=", "set...
Differs from zadd in that you can set either 'XX' or 'NX' option as described here: https://redis.io/commands/zadd. Only for Redis 3.0.2 or later. The following example would add four values to the 'my-key' key: redis.zaddoption('my-key', 'XX', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4) redis.zaddoption('my-key', 'NX CH', name1=2.2)
[ "Differs", "from", "zadd", "in", "that", "you", "can", "set", "either", "XX", "or", "NX", "option", "as", "described", "here", ":", "https", ":", "//", "redis", ".", "io", "/", "commands", "/", "zadd", ".", "Only", "for", "Redis", "3", ".", "0", "....
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L2792-L2814
def disable_code_breakpoint(self, dwProcessId, address): """ Disables the code breakpoint at the given address. @see: L{define_code_breakpoint}, L{has_code_breakpoint}, L{get_code_breakpoint}, L{enable_code_breakpoint} L{enable_one_shot_code_breakpoint}, L{erase_code_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint. """ p = self.system.get_process(dwProcessId) bp = self.get_code_breakpoint(dwProcessId, address) if bp.is_running(): self.__del_running_bp_from_all_threads(bp) bp.disable(p, None)
[ "def", "disable_code_breakpoint", "(", "self", ",", "dwProcessId", ",", "address", ")", ":", "p", "=", "self", ".", "system", ".", "get_process", "(", "dwProcessId", ")", "bp", "=", "self", ".", "get_code_breakpoint", "(", "dwProcessId", ",", "address", ")",...
Disables the code breakpoint at the given address. @see: L{define_code_breakpoint}, L{has_code_breakpoint}, L{get_code_breakpoint}, L{enable_code_breakpoint} L{enable_one_shot_code_breakpoint}, L{erase_code_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint.
[ "Disables", "the", "code", "breakpoint", "at", "the", "given", "address", "." ]
python
train
apache/incubator-mxnet
example/gluon/embedding_learning/train.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/embedding_learning/train.py#L169-L250
def train(epochs, ctx): """Training function.""" if isinstance(ctx, mx.Context): ctx = [ctx] net.initialize(mx.init.Xavier(magnitude=2), ctx=ctx) opt_options = {'learning_rate': opt.lr, 'wd': opt.wd} if opt.optimizer == 'sgd': opt_options['momentum'] = 0.9 if opt.optimizer == 'adam': opt_options['epsilon'] = 1e-7 trainer = gluon.Trainer(net.collect_params(), opt.optimizer, opt_options, kvstore=opt.kvstore) if opt.lr_beta > 0.0: # Jointly train class-specific beta. # See "sampling matters in deep embedding learning" paper for details. beta.initialize(mx.init.Constant(opt.beta), ctx=ctx) trainer_beta = gluon.Trainer([beta], 'sgd', {'learning_rate': opt.lr_beta, 'momentum': 0.9}, kvstore=opt.kvstore) loss = MarginLoss(margin=opt.margin, nu=opt.nu) best_val = 0.0 for epoch in range(epochs): tic = time.time() prev_loss, cumulative_loss = 0.0, 0.0 # Learning rate schedule. trainer.set_learning_rate(get_lr(opt.lr, epoch, steps, opt.factor)) logging.info('Epoch %d learning rate=%f', epoch, trainer.learning_rate) if opt.lr_beta > 0.0: trainer_beta.set_learning_rate(get_lr(opt.lr_beta, epoch, steps, opt.factor)) logging.info('Epoch %d beta learning rate=%f', epoch, trainer_beta.learning_rate) # Inner training loop. for i in range(200): batch = train_data.next() data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) Ls = [] with ag.record(): for x, y in zip(data, label): a_indices, anchors, positives, negatives, _ = net(x) if opt.lr_beta > 0.0: L = loss(anchors, positives, negatives, beta, y[a_indices]) else: L = loss(anchors, positives, negatives, opt.beta, None) # Store the loss and do backward after we have done forward # on all GPUs for better speed on multiple GPUs. Ls.append(L) cumulative_loss += nd.mean(L).asscalar() for L in Ls: L.backward() # Update. trainer.step(batch.data[0].shape[0]) if opt.lr_beta > 0.0: trainer_beta.step(batch.data[0].shape[0]) if (i+1) % opt.log_interval == 0: logging.info('[Epoch %d, Iter %d] training loss=%f' % ( epoch, i+1, cumulative_loss - prev_loss)) prev_loss = cumulative_loss logging.info('[Epoch %d] training loss=%f'%(epoch, cumulative_loss)) logging.info('[Epoch %d] time cost: %f'%(epoch, time.time()-tic)) names, val_accs = test(ctx) for name, val_acc in zip(names, val_accs): logging.info('[Epoch %d] validation: %s=%f'%(epoch, name, val_acc)) if val_accs[0] > best_val: best_val = val_accs[0] logging.info('Saving %s.' % opt.save_model_prefix) net.save_parameters('%s.params' % opt.save_model_prefix) return best_val
[ "def", "train", "(", "epochs", ",", "ctx", ")", ":", "if", "isinstance", "(", "ctx", ",", "mx", ".", "Context", ")", ":", "ctx", "=", "[", "ctx", "]", "net", ".", "initialize", "(", "mx", ".", "init", ".", "Xavier", "(", "magnitude", "=", "2", ...
Training function.
[ "Training", "function", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_auditory.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_auditory.py#L91-L125
def gammatone_erb_constants(n): """ Constants for using the real bandwidth in the gammatone filter, given its order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`. Based on equations from: ``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The Auditory Filter Bank. 1988.`` First returned value is a bandwidth compensation for direct use in the gammatone formula: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> round(x, 3) 1.019 >>> bandwidth = x * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth, 2) 130.52 Second returned value helps us find the ``3 dB`` bandwidth as: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth3dB, 2) 113.55 """ tnt = 2 * n - 2 return (factorial(n - 1) ** 2 / (pi * factorial(tnt) * 2 ** -tnt), 2 * (2 ** (1. / n) - 1) ** .5 )
[ "def", "gammatone_erb_constants", "(", "n", ")", ":", "tnt", "=", "2", "*", "n", "-", "2", "return", "(", "factorial", "(", "n", "-", "1", ")", "**", "2", "/", "(", "pi", "*", "factorial", "(", "tnt", ")", "*", "2", "**", "-", "tnt", ")", ","...
Constants for using the real bandwidth in the gammatone filter, given its order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`. Based on equations from: ``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The Auditory Filter Bank. 1988.`` First returned value is a bandwidth compensation for direct use in the gammatone formula: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> round(x, 3) 1.019 >>> bandwidth = x * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth, 2) 130.52 Second returned value helps us find the ``3 dB`` bandwidth as: >>> x, y = gammatone_erb_constants(4) >>> central_frequency = 1000 >>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency) >>> round(bandwidth3dB, 2) 113.55
[ "Constants", "for", "using", "the", "real", "bandwidth", "in", "the", "gammatone", "filter", "given", "its", "order", ".", "Returns", "a", "pair", ":", "math", ":", "(", "x", "y", ")", "=", "(", "1", "/", "a_n", "c_n", ")", "." ]
python
train
MillionIntegrals/vel
vel/models/imagenet/resnet34.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/imagenet/resnet34.py#L75-L78
def unfreeze(self): """ Unfreeze model layers """ for idx, child in enumerate(self.model.children()): mu.unfreeze_layer(child)
[ "def", "unfreeze", "(", "self", ")", ":", "for", "idx", ",", "child", "in", "enumerate", "(", "self", ".", "model", ".", "children", "(", ")", ")", ":", "mu", ".", "unfreeze_layer", "(", "child", ")" ]
Unfreeze model layers
[ "Unfreeze", "model", "layers" ]
python
train
obulpathi/cdn-fastly-python
bin/fastly_upload_vcl.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/bin/fastly_upload_vcl.py#L34-L111
def main(): """ Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given. """ parser = OptionParser(description= "Upload a vcl file (set as main) to a given fastly service. All arguments are required.") parser.add_option("-k", "--key", dest="apikey", help="fastly api key") parser.add_option("-u", "--user", dest="user", help="fastly user name") parser.add_option("-p", "--password", dest="password", help="fastly password") parser.add_option("-f", "--file", dest="filename", help="vcl file to upload") parser.add_option("-s", "--service", dest="service_name", help="service to update") parser.add_option("-d", "--delete_vcl", action="store_true", dest="delete_vcl", default=False, help="delete existing vcl files from service\ before uploading") parser.add_option("-i", "--include", action="store_true", dest="include_vcl", default=False, help="do not set uploaded vcl as main,\ to be included only") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options:" parser.print_help() sys.exit(1) vcl_name = options.filename.split('/').pop() service_name = options.service_name vcl_file = open(options.filename, 'r') vcl_content = vcl_file.read() # Need to fully authenticate to access all features. client = fastly.connect(options.apikey) client.login(options.user, options.password) service = client.get_service_by_name(service_name) versions = client.list_versions(service.id) latest = versions.pop() if latest.locked is True or latest.active is True: print "\n[ Cloning version %d ]\n"\ % (latest.number) latest = client.clone_version(service.id, latest.number) if options.delete_vcl: vcls = client.list_vcls(service.id, latest.number) for vcl in vcls: print "\n[ Deleting vcl file %s from version %d ]\n" %\ (service_name, latest.number) client.delete_vcl(service.id, latest.number, vcl.name) if vcl_name in latest.vcls: print "\n[ Updating vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.update_vcl(service.id, latest.number, vcl_name, content=vcl_content) else: print "\n[ Uploading new vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.upload_vcl(service.id, latest.number, vcl_name, vcl_content) if options.include_vcl is False: print "\n[ Setting vcl %s as main ]\n" % (vcl_name) client.set_main_vcl(service.id, latest.number, vcl_name) client.activate_version(service.id, latest.number) print "\n[ Activing configuration version %d ]\n" % (latest.number)
[ "def", "main", "(", ")", ":", "parser", "=", "OptionParser", "(", "description", "=", "\"Upload a vcl file (set as main) to a given fastly service. All arguments are required.\"", ")", "parser", ".", "add_option", "(", "\"-k\"", ",", "\"--key\"", ",", "dest", "=", "\"ap...
Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given.
[ "Upload", "a", "vcl", "file", "to", "a", "fastly", "service", "cloning", "the", "current", "version", "if", "necessary", ".", "The", "uploaded", "vcl", "is", "set", "as", "main", "unless", "--", "include", "is", "given", ".", "All", "existing", "vcl", "f...
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/transforms.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/transforms.py#L57-L80
def tlog(x, th=1, r=_display_max, d=_l_mmax): """ Truncated log10 transform. Parameters ---------- x : num | num iterable values to be transformed. th : num values below th are transormed to 0. Must be positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog(10**d) = r Returns ------- Array of transformed values. """ if th <= 0: raise ValueError('Threshold value must be positive. %s given.' % th) return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d)
[ "def", "tlog", "(", "x", ",", "th", "=", "1", ",", "r", "=", "_display_max", ",", "d", "=", "_l_mmax", ")", ":", "if", "th", "<=", "0", ":", "raise", "ValueError", "(", "'Threshold value must be positive. %s given.'", "%", "th", ")", "return", "where", ...
Truncated log10 transform. Parameters ---------- x : num | num iterable values to be transformed. th : num values below th are transormed to 0. Must be positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog(10**d) = r Returns ------- Array of transformed values.
[ "Truncated", "log10", "transform", "." ]
python
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/search.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/search.py#L664-L680
def count(self): """ Return the number of hits matching the query and filters. Note that only the actual number is returned. """ if hasattr(self, '_response'): return self._response.hits.total es = connections.get_connection(self._using) d = self.to_dict(count=True) # TODO: failed shards detection return es.count( index=self._index, body=d, **self._params )['count']
[ "def", "count", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_response'", ")", ":", "return", "self", ".", "_response", ".", "hits", ".", "total", "es", "=", "connections", ".", "get_connection", "(", "self", ".", "_using", ")", "d", "...
Return the number of hits matching the query and filters. Note that only the actual number is returned.
[ "Return", "the", "number", "of", "hits", "matching", "the", "query", "and", "filters", ".", "Note", "that", "only", "the", "actual", "number", "is", "returned", "." ]
python
train
janpipek/physt
physt/histogram_base.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L244-L278
def set_dtype(self, value, check: bool = True): """Change data type of the bin contents. Allowed conversions: - from integral to float types - between the same category of type (float/integer) - from float types to integer if weights are trivial Parameters ---------- value: np.dtype or something convertible to it. check: bool If True (default), all values are checked against the limits """ # TODO? Deal with unsigned types value, type_info = self._eval_dtype(value) if value == self._dtype: return if self.dtype is None or np.can_cast(self.dtype, value): pass # Ok elif check: if np.issubdtype(value, np.integer): if self.dtype.kind == "f": for array in (self._frequencies, self._errors2): if np.any(array % 1.0): raise RuntimeError("Data contain non-integer values.") for array in (self._frequencies, self._errors2): if np.any((array > type_info.max) | (array < type_info.min)): raise RuntimeError("Data contain values outside the specified range.") self._dtype = value self._frequencies = self._frequencies.astype(value) self._errors2 = self._errors2.astype(value) self._missed = self._missed.astype(value)
[ "def", "set_dtype", "(", "self", ",", "value", ",", "check", ":", "bool", "=", "True", ")", ":", "# TODO? Deal with unsigned types", "value", ",", "type_info", "=", "self", ".", "_eval_dtype", "(", "value", ")", "if", "value", "==", "self", ".", "_dtype", ...
Change data type of the bin contents. Allowed conversions: - from integral to float types - between the same category of type (float/integer) - from float types to integer if weights are trivial Parameters ---------- value: np.dtype or something convertible to it. check: bool If True (default), all values are checked against the limits
[ "Change", "data", "type", "of", "the", "bin", "contents", "." ]
python
train
gitenberg-dev/gitberg
gitenberg/util/tenprintcover.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/util/tenprintcover.py#L97-L107
def triangle(self, x1, y1, x2, y2, x3, y3, color): """ See the Processing function triangle(): https://processing.org/reference/triangle_.html """ self.context.set_source_rgb(*color) self.context.move_to(self.tx(x1), self.ty(y1)) self.context.line_to(self.tx(x2), self.ty(y2)) self.context.line_to(self.tx(x3), self.ty(y3)) self.context.line_to(self.tx(x1), self.ty(y1)) self.context.fill()
[ "def", "triangle", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ",", "color", ")", ":", "self", ".", "context", ".", "set_source_rgb", "(", "*", "color", ")", "self", ".", "context", ".", "move_to", "(", "self", ...
See the Processing function triangle(): https://processing.org/reference/triangle_.html
[ "See", "the", "Processing", "function", "triangle", "()", ":", "https", ":", "//", "processing", ".", "org", "/", "reference", "/", "triangle_", ".", "html" ]
python
train
PmagPy/PmagPy
programs/pmag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/pmag_gui.py#L441-L471
def on_btn_thellier_gui(self, event): """ Open Thellier GUI """ if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return outstring = "thellier_gui.py -WD %s"%self.WD print("-I- running python script:\n %s"%(outstring)) if self.data_model_num == 2.5: thellier_gui.main(self.WD, standalone_app=False, parent=self, DM=self.data_model_num) else: # disable and hide Pmag GUI mainframe self.Disable() self.Hide() # show busyinfo wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() # create custom Thellier GUI closing event and bind it ThellierGuiExitEvent, EVT_THELLIER_GUI_EXIT = newevent.NewCommandEvent() self.Bind(EVT_THELLIER_GUI_EXIT, self.on_analysis_gui_exit) # make and show the Thellier GUI frame thellier_gui_frame = thellier_gui.Arai_GUI(self.WD, self, standalone=False, DM=self.data_model_num, evt_quit=ThellierGuiExitEvent) if not thellier_gui_frame: print("Thellier GUI failed to start aborting"); del wait; return thellier_gui_frame.Centre() thellier_gui_frame.Show() del wait
[ "def", "on_btn_thellier_gui", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "check_for_meas_file", "(", ")", ":", "return", "if", "not", "self", ".", "check_for_uncombined_files", "(", ")", ":", "return", "outstring", "=", "\"thellier_gui.py -...
Open Thellier GUI
[ "Open", "Thellier", "GUI" ]
python
train
bpsmith/tia
tia/rlab/table.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L391-L405
def apply_styles(self, cmdmap): """ Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white}) :param cmdmap: dict of commands mapped to the command arguments :return: self """ is_list_like = lambda arg: isinstance(arg, (list, tuple)) is_first_param_list = lambda c: c in ('COLBACKGROUNDS', 'ROWBACKGROUNDS') for cmd, args in cmdmap.iteritems(): if not is_list_like(args): args = [args] elif is_first_param_list(cmd) and is_list_like(args) and not is_list_like(args[0]): args = [args] self.apply_style(cmd, *args) return self
[ "def", "apply_styles", "(", "self", ",", "cmdmap", ")", ":", "is_list_like", "=", "lambda", "arg", ":", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", "is_first_param_list", "=", "lambda", "c", ":", "c", "in", "(", "'COLBACKGROUNDS'"...
Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white}) :param cmdmap: dict of commands mapped to the command arguments :return: self
[ "Apply", "the", "set", "of", "commands", "defined", "in", "cmdmap", ".", "for", "example", "apply_styles", "(", "{", "FONTSIZE", ":", "12", "BACKGROUND", ":", "white", "}", ")", ":", "param", "cmdmap", ":", "dict", "of", "commands", "mapped", "to", "the"...
python
train
modin-project/modin
modin/experimental/engines/pandas_on_ray/sql.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L93-L106
def check_query(query): """ Check query sanity Args: query: query string Returns: None """ q = query.lower() if "select " not in q: raise InvalidQuery("SELECT word not found in the query: {0}".format(query)) if " from " not in q: raise InvalidQuery("FROM word not found in the query: {0}".format(query))
[ "def", "check_query", "(", "query", ")", ":", "q", "=", "query", ".", "lower", "(", ")", "if", "\"select \"", "not", "in", "q", ":", "raise", "InvalidQuery", "(", "\"SELECT word not found in the query: {0}\"", ".", "format", "(", "query", ")", ")", "if", "...
Check query sanity Args: query: query string Returns: None
[ "Check", "query", "sanity" ]
python
train
CZ-NIC/python-rt
rt.py
https://github.com/CZ-NIC/python-rt/blob/e7a9f555e136708aec3317f857045145a2271e16/rt.py#L1079-L1123
def get_user(self, user_id): """ Get user details. :param user_id: Identification of user by username (str) or user ID (int) :returns: User details as strings in dictionary with these keys for RT users: * Lang * RealName * Privileged * Disabled * Gecos * EmailAddress * Password * id * Name Or these keys for external users (e.g. Requestors replying to email from RT: * RealName * Disabled * EmailAddress * Password * id * Name None is returned if user does not exist. :raises UnexpectedMessageFormat: In case that returned status code is not 200 """ msg = self.__request('user/{}'.format(str(user_id), )) status_code = self.__get_status_code(msg) if (status_code == 200): pairs = {} lines = msg.split('\n') if (len(lines) > 2) and self.RE_PATTERNS['does_not_exist_pattern'].match(lines[2]): return None for line in lines[2:]: if ': ' in line: header, content = line.split(': ', 1) pairs[header.strip()] = content.strip() return pairs else: raise UnexpectedMessageFormat('Received status code is {:d} instead of 200.'.format(status_code))
[ "def", "get_user", "(", "self", ",", "user_id", ")", ":", "msg", "=", "self", ".", "__request", "(", "'user/{}'", ".", "format", "(", "str", "(", "user_id", ")", ",", ")", ")", "status_code", "=", "self", ".", "__get_status_code", "(", "msg", ")", "i...
Get user details. :param user_id: Identification of user by username (str) or user ID (int) :returns: User details as strings in dictionary with these keys for RT users: * Lang * RealName * Privileged * Disabled * Gecos * EmailAddress * Password * id * Name Or these keys for external users (e.g. Requestors replying to email from RT: * RealName * Disabled * EmailAddress * Password * id * Name None is returned if user does not exist. :raises UnexpectedMessageFormat: In case that returned status code is not 200
[ "Get", "user", "details", "." ]
python
train
Britefury/batchup
batchup/data_source.py
https://github.com/Britefury/batchup/blob/3fc2304e629f813c05f9e7a85a18acef3581a536/batchup/data_source.py#L1404-L1424
def samples_by_indices_nomapping(self, indices): """ Gather a batch of samples by indices *without* applying any index mapping. Parameters ---------- indices: 1D-array of ints or slice An index array or a slice that selects the samples to retrieve Returns ------- nested list of arrays A mini-batch """ if not self._random_access: raise TypeError('samples_by_indices_nomapping method not ' 'supported as one or more of the underlying ' 'data sources does not support random access') batch = self.source.samples_by_indices_nomapping(indices) return self.fn(*batch)
[ "def", "samples_by_indices_nomapping", "(", "self", ",", "indices", ")", ":", "if", "not", "self", ".", "_random_access", ":", "raise", "TypeError", "(", "'samples_by_indices_nomapping method not '", "'supported as one or more of the underlying '", "'data sources does not suppo...
Gather a batch of samples by indices *without* applying any index mapping. Parameters ---------- indices: 1D-array of ints or slice An index array or a slice that selects the samples to retrieve Returns ------- nested list of arrays A mini-batch
[ "Gather", "a", "batch", "of", "samples", "by", "indices", "*", "without", "*", "applying", "any", "index", "mapping", "." ]
python
train
toomore/grs
grs/twseopen.py
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/twseopen.py#L52-L71
def __loaddate(): ''' 載入檔案 檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php ''' csv_path = os.path.join(os.path.dirname(__file__), 'opendate.csv') with open(csv_path) as csv_file: csv_data = csv.reader(csv_file) result = {} result['close'] = [] result['open'] = [] for i in csv_data: if i[1] == '0': # 0 = 休市 result['close'].append(datetime.strptime(i[0], '%Y/%m/%d').date()) elif i[1] == '1': # 1 = 開市 result['open'].append(datetime.strptime(i[0], '%Y/%m/%d').date()) else: pass return result
[ "def", "__loaddate", "(", ")", ":", "csv_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'opendate.csv'", ")", "with", "open", "(", "csv_path", ")", "as", "csv_file", ":", "csv_data", "=...
載入檔案 檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php
[ "載入檔案", "檔案依據", "http", ":", "//", "www", ".", "twse", ".", "com", ".", "tw", "/", "ch", "/", "trading", "/", "trading_days", ".", "php" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/deepreload.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/deepreload.py#L46-L117
def get_parent(globals, level): """ parent, name = get_parent(globals, level) Return the package that an import is being performed in. If globals comes from the module foo.bar.bat (not itself a package), this returns the sys.modules entry for foo.bar. If globals is from a package's __init__.py, the package's entry in sys.modules is returned. If globals doesn't come from a package or a module in a package, or a corresponding entry is not found in sys.modules, None is returned. """ orig_level = level if not level or not isinstance(globals, dict): return None, '' pkgname = globals.get('__package__', None) if pkgname is not None: # __package__ is set, so use it if not hasattr(pkgname, 'rindex'): raise ValueError('__package__ set to non-string') if len(pkgname) == 0: if level > 0: raise ValueError('Attempted relative import in non-package') return None, '' name = pkgname else: # __package__ not set, so figure it out and set it if '__name__' not in globals: return None, '' modname = globals['__name__'] if '__path__' in globals: # __path__ is set, so modname is already the package name globals['__package__'] = name = modname else: # Normal module, so work out the package name if any lastdot = modname.rfind('.') if lastdot < 0 and level > 0: raise ValueError("Attempted relative import in non-package") if lastdot < 0: globals['__package__'] = None return None, '' globals['__package__'] = name = modname[:lastdot] dot = len(name) for x in xrange(level, 1, -1): try: dot = name.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") name = name[:dot] try: parent = sys.modules[name] except: if orig_level < 1: warn("Parent module '%.200s' not found while handling absolute " "import" % name) parent = None else: raise SystemError("Parent module '%.200s' not loaded, cannot " "perform relative import" % name) # We expect, but can't guarantee, if parent != None, that: # - parent.__name__ == name # - parent.__dict__ is globals # If this is violated... Who cares? return parent, name
[ "def", "get_parent", "(", "globals", ",", "level", ")", ":", "orig_level", "=", "level", "if", "not", "level", "or", "not", "isinstance", "(", "globals", ",", "dict", ")", ":", "return", "None", ",", "''", "pkgname", "=", "globals", ".", "get", "(", ...
parent, name = get_parent(globals, level) Return the package that an import is being performed in. If globals comes from the module foo.bar.bat (not itself a package), this returns the sys.modules entry for foo.bar. If globals is from a package's __init__.py, the package's entry in sys.modules is returned. If globals doesn't come from a package or a module in a package, or a corresponding entry is not found in sys.modules, None is returned.
[ "parent", "name", "=", "get_parent", "(", "globals", "level", ")" ]
python
test
hweickert/where
where/__init__.py
https://github.com/hweickert/where/blob/ff07c5df81a2e854e8e1b88c932b493452b9cb83/where/__init__.py#L32-L37
def iwhere( filename ): """ Like where() but returns an iterator. """ possible_paths = _gen_possible_matches( filename ) existing_file_paths = filter( os.path.isfile, possible_paths ) return existing_file_paths
[ "def", "iwhere", "(", "filename", ")", ":", "possible_paths", "=", "_gen_possible_matches", "(", "filename", ")", "existing_file_paths", "=", "filter", "(", "os", ".", "path", ".", "isfile", ",", "possible_paths", ")", "return", "existing_file_paths" ]
Like where() but returns an iterator.
[ "Like", "where", "()", "but", "returns", "an", "iterator", "." ]
python
train
mitsei/dlkit
dlkit/json_/hierarchy/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/hierarchy/sessions.py#L1146-L1186
def update_hierarchy(self, hierarchy_form): """Updates an existing hierarchy. arg: hierarchy_form (osid.hierarchy.HierarchyForm): the form containing the elements to be updated raise: IllegalState - ``hierarchy_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``hierarchy_id`` or ``hierarchy_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``hierarchy_form`` did not originate from ``get_hierarchy_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.update_bin_template if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=hierarchy_form) collection = JSONClientValidated('hierarchy', collection='Hierarchy', runtime=self._runtime) if not isinstance(hierarchy_form, ABCHierarchyForm): raise errors.InvalidArgument('argument type is not an HierarchyForm') if not hierarchy_form.is_for_update(): raise errors.InvalidArgument('the HierarchyForm is for update only, not create') try: if self._forms[hierarchy_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('hierarchy_form already used in an update transaction') except KeyError: raise errors.Unsupported('hierarchy_form did not originate from this session') if not hierarchy_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(hierarchy_form._my_map) # save is deprecated - change to replace_one self._forms[hierarchy_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned return objects.Hierarchy(osid_object_map=hierarchy_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_hierarchy", "(", "self", ",", "hierarchy_form", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.update_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", "...
Updates an existing hierarchy. arg: hierarchy_form (osid.hierarchy.HierarchyForm): the form containing the elements to be updated raise: IllegalState - ``hierarchy_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``hierarchy_id`` or ``hierarchy_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``hierarchy_form`` did not originate from ``get_hierarchy_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
[ "Updates", "an", "existing", "hierarchy", "." ]
python
train
maaku/python-bitcoin
bitcoin/merkle.py
https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/merkle.py#L14-L40
def _merkle_hash256(*args): """The default transform provided to merkle(), which calculates the hash of its parameters, serializes and joins them together, then performs a has256 of the resulting string. There are two special cases: no arguments, which results in 0, and a single argument, whose hash value is returned unmodified.""" # Return zero-hash is no arguments are provided (the hash of an empty # Merkle tree is defined to be zero). if not args: return 0 # This helper function extracts and returns the hash value of a parameter. # Note that if the parameter is itself a Merkle tree or iterable, this # results in a recursive call to merkle(). def _to_hash(h): if isinstance(h, numbers.Integral): return h if hasattr(h, 'hash'): return h.hash return merkle(h) # As a special case, a tree of length 1 is simply ungrouped - that single # hash value is returned to the user as-is. if len(args) == 1: return _to_hash(args[0]) # Otherwise we are given two parameters, the hash values of which we # serialize, concatenate, and return the hash of. return hash256(b''.join(map(lambda h:hash256.serialize(h), map(_to_hash, args)))).intdigest()
[ "def", "_merkle_hash256", "(", "*", "args", ")", ":", "# Return zero-hash is no arguments are provided (the hash of an empty", "# Merkle tree is defined to be zero).", "if", "not", "args", ":", "return", "0", "# This helper function extracts and returns the hash value of a parameter.",...
The default transform provided to merkle(), which calculates the hash of its parameters, serializes and joins them together, then performs a has256 of the resulting string. There are two special cases: no arguments, which results in 0, and a single argument, whose hash value is returned unmodified.
[ "The", "default", "transform", "provided", "to", "merkle", "()", "which", "calculates", "the", "hash", "of", "its", "parameters", "serializes", "and", "joins", "them", "together", "then", "performs", "a", "has256", "of", "the", "resulting", "string", ".", "The...
python
train
liip/taxi
taxi/plugins.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/plugins.py#L68-L72
def get_backends_by_class(self, backend_class): """ Return a list of backends that are instances of the given `backend_class`. """ return [backend for backend in self._backends_registry.values() if isinstance(backend, backend_class)]
[ "def", "get_backends_by_class", "(", "self", ",", "backend_class", ")", ":", "return", "[", "backend", "for", "backend", "in", "self", ".", "_backends_registry", ".", "values", "(", ")", "if", "isinstance", "(", "backend", ",", "backend_class", ")", "]" ]
Return a list of backends that are instances of the given `backend_class`.
[ "Return", "a", "list", "of", "backends", "that", "are", "instances", "of", "the", "given", "backend_class", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_snackbar.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_snackbar.py#L181-L191
def set_duration(self, duration): """ Android for whatever stupid reason doesn't let you set the time it only allows 1-long or 0-short. So we have to repeatedly call show until the duration expires, hence this method does nothing see `set_show`. """ if duration == 0: self.widget.setDuration(-2) #: Infinite else: self.widget.setDuration(0)
[ "def", "set_duration", "(", "self", ",", "duration", ")", ":", "if", "duration", "==", "0", ":", "self", ".", "widget", ".", "setDuration", "(", "-", "2", ")", "#: Infinite", "else", ":", "self", ".", "widget", ".", "setDuration", "(", "0", ")" ]
Android for whatever stupid reason doesn't let you set the time it only allows 1-long or 0-short. So we have to repeatedly call show until the duration expires, hence this method does nothing see `set_show`.
[ "Android", "for", "whatever", "stupid", "reason", "doesn", "t", "let", "you", "set", "the", "time", "it", "only", "allows", "1", "-", "long", "or", "0", "-", "short", ".", "So", "we", "have", "to", "repeatedly", "call", "show", "until", "the", "duratio...
python
train
bapakode/OmMongo
ommongo/update_expression.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/update_expression.py#L91-L106
def inc(self, *args, **kwargs): ''' Atomically increment ``qfield`` by ``value`` ''' pairs = [] if len(args) == 1: pairs.append((args[0], 1)) elif len(args) == 2: pairs.append(args) elif len(kwargs) != 0: pairs.extend([(k, v) for k, v in kwargs.items()]) else: raise UpdateException('Invalid arguments for set. Requires either two positional arguments or at least one keyword argument') ret = self for qfield, value in pairs: ret = self._atomic_op('$inc', qfield, value) return ret
[ "def", "inc", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pairs", "=", "[", "]", "if", "len", "(", "args", ")", "==", "1", ":", "pairs", ".", "append", "(", "(", "args", "[", "0", "]", ",", "1", ")", ")", "elif", "l...
Atomically increment ``qfield`` by ``value``
[ "Atomically", "increment", "qfield", "by", "value" ]
python
train