text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_all_direct_statements(self): """Get all directlyIncreases/Decreases BEL statements. This method stores the results of the query in self.all_direct_stmts as a list of strings. The SPARQL query used to find direct BEL statements searches for all statements whose predicate is either DirectyIncreases or DirectlyDecreases. """ logger.info("Getting all direct statements...\n") q_stmts = prefixes + """ SELECT ?stmt WHERE { ?stmt a belvoc:Statement . { { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . } UNION { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . } } } """ res_stmts = self.g.query(q_stmts) self.all_direct_stmts = [strip_statement(stmt[0]) for stmt in res_stmts]
[ "def", "get_all_direct_statements", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Getting all direct statements...\\n\"", ")", "q_stmts", "=", "prefixes", "+", "\"\"\"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ...
39.26087
19.521739
def card_names_and_ids(self): """Returns [(name, id), ...] pairs of cards from current board""" b = Board(self.client, self.board_id) cards = b.getCards() card_names_and_ids = [(unidecode(c.name), c.id) for c in cards] return card_names_and_ids
[ "def", "card_names_and_ids", "(", "self", ")", ":", "b", "=", "Board", "(", "self", ".", "client", ",", "self", ".", "board_id", ")", "cards", "=", "b", ".", "getCards", "(", ")", "card_names_and_ids", "=", "[", "(", "unidecode", "(", "c", ".", "name...
46.5
11
async def _handle_watermark_notification(self, watermark_notification): """Receive WatermarkNotification and update the conversation. Args: watermark_notification: hangouts_pb2.WatermarkNotification instance """ conv_id = watermark_notification.conversation_id.id res = parsers.parse_watermark_notification(watermark_notification) await self.on_watermark_notification.fire(res) try: conv = await self._get_or_fetch_conversation(conv_id) except exceptions.NetworkError: logger.warning( 'Failed to fetch conversation for watermark notification: %s', conv_id ) else: await conv.on_watermark_notification.fire(res)
[ "async", "def", "_handle_watermark_notification", "(", "self", ",", "watermark_notification", ")", ":", "conv_id", "=", "watermark_notification", ".", "conversation_id", ".", "id", "res", "=", "parsers", ".", "parse_watermark_notification", "(", "watermark_notification", ...
42.111111
22.111111
def clear_end_timestamp(self): """stub""" if (self.get_end_timestamp_metadata().is_read_only() or self.get_end_timestamp_metadata().is_required()): raise NoAccess() self.my_osid_object_form._my_map['endTimestamp'] = \ self.get_end_timestamp_metadata().get_default_integer_values()
[ "def", "clear_end_timestamp", "(", "self", ")", ":", "if", "(", "self", ".", "get_end_timestamp_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_end_timestamp_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", ...
48.285714
17.714286
def validate_contents(file_contents): """Ensures that all ipynb files in FILE_CONTENTS are valid JSON files.""" for name, contents in file_contents.items(): if os.path.splitext(name)[1] != '.ipynb': continue if not contents: return False try: json_object = json.loads(contents) except ValueError: return False return True
[ "def", "validate_contents", "(", "file_contents", ")", ":", "for", "name", ",", "contents", "in", "file_contents", ".", "items", "(", ")", ":", "if", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", "!=", "'.ipynb'", ":", "continue"...
31.153846
12.384615
def addons(cls, recurse=True): """ Returns a dictionary containing all the available addons for this mixin class. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. :param recurse | <bool> :return {<str> name: <variant> addon, ..} """ cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) out = {} # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addons(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, {})) return out
[ "def", "addons", "(", "cls", ",", "recurse", "=", "True", ")", ":", "cls", ".", "initAddons", "(", ")", "prop", "=", "'_{0}__addons'", ".", "format", "(", "cls", ".", "__name__", ")", "out", "=", "{", "}", "# lookup base classes", "if", "recurse", ":",...
33.434783
18.043478
def step(self, substeps=2): '''Step the world forward by one frame. Parameters ---------- substeps : int, optional Split the step into this many sub-steps. This helps to prevent the time delta for an update from being too large. ''' self.frame_no += 1 dt = self.dt / substeps for _ in range(substeps): self.ode_contactgroup.empty() self.ode_space.collide(None, self.on_collision) self.ode_world.step(dt)
[ "def", "step", "(", "self", ",", "substeps", "=", "2", ")", ":", "self", ".", "frame_no", "+=", "1", "dt", "=", "self", ".", "dt", "/", "substeps", "for", "_", "in", "range", "(", "substeps", ")", ":", "self", ".", "ode_contactgroup", ".", "empty",...
34.266667
16.8
def future_get_sensor(self, name, update=None): """Get the sensor object. Check if we have information for this sensor, if not connect to server and update (if allowed) to get information. Parameters ---------- name : string Name of the sensor. update : bool or None, optional True allow inspect client to inspect katcp server if the sensor is not known. Returns ------- Sensor created by :meth:`sensor_factory` or None if sensor not found. Notes ----- Ensure that self.state.data_synced == True if yielding to future_get_sensor from a state-change callback, or a deadlock will occur. """ obj = None exist = yield self.future_check_sensor(name, update) if exist: sensor_info = self._sensors_index[name] obj = sensor_info.get('obj') if obj is None: sensor_type = katcp.Sensor.parse_type( sensor_info.get('sensor_type')) sensor_params = katcp.Sensor.parse_params( sensor_type, sensor_info.get('params')) obj = self.sensor_factory( name=name, sensor_type=sensor_type, description=sensor_info.get('description'), units=sensor_info.get('units'), params=sensor_params) self._sensors_index[name]['obj'] = obj self._sensor_object_cache[name] = obj raise tornado.gen.Return(obj)
[ "def", "future_get_sensor", "(", "self", ",", "name", ",", "update", "=", "None", ")", ":", "obj", "=", "None", "exist", "=", "yield", "self", ".", "future_check_sensor", "(", "name", ",", "update", ")", "if", "exist", ":", "sensor_info", "=", "self", ...
35.444444
18.955556
def setAvatar(self, image): """ Update the profile picture for the current user. Args: image (file): a file-like object to read the image from """ self.conn("PUT", "{0}/users/{1}/profile/avatar".format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, data=image.read())
[ "def", "setAvatar", "(", "self", ",", "image", ")", ":", "self", ".", "conn", "(", "\"PUT\"", ",", "\"{0}/users/{1}/profile/avatar\"", ".", "format", "(", "SkypeConnection", ".", "API_USER", ",", "self", ".", "userId", ")", ",", "auth", "=", "SkypeConnection...
40.111111
24.333333
def get_net_imbalance(count_per_broker): """Calculate and return net imbalance based on given count of partitions or leaders per broker. Net-imbalance in case of partitions implies total number of extra partitions from optimal count over all brokers. This is also implies, the minimum number of partition movements required for overall balancing. For leaders, net imbalance implies total number of extra brokers as leaders from optimal count. """ net_imbalance = 0 opt_count, extra_allowed = \ compute_optimum(len(count_per_broker), sum(count_per_broker)) for count in count_per_broker: extra_cnt, extra_allowed = \ get_extra_element_count(count, opt_count, extra_allowed) net_imbalance += extra_cnt return net_imbalance
[ "def", "get_net_imbalance", "(", "count_per_broker", ")", ":", "net_imbalance", "=", "0", "opt_count", ",", "extra_allowed", "=", "compute_optimum", "(", "len", "(", "count_per_broker", ")", ",", "sum", "(", "count_per_broker", ")", ")", "for", "count", "in", ...
39.55
15.25
def models_list(self, api_url=None, offset=0, limit=-1, properties=None): """Get list of model resources from a SCO-API. Parameters ---------- api_url : string, optional Base Url of the SCO-API. Uses default API if argument not present. offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(scoserv.ResourceHandle) List of resource handles (one per model in the listing) """ # Get subject listing Url for given SCO-API and return the retrieved # resource listing return sco.get_resource_listing( self.get_api_references(api_url)[sco.REF_MODELS_LIST], offset, limit, properties )
[ "def", "models_list", "(", "self", ",", "api_url", "=", "None", ",", "offset", "=", "0", ",", "limit", "=", "-", "1", ",", "properties", "=", "None", ")", ":", "# Get subject listing Url for given SCO-API and return the retrieved", "# resource listing", "return", ...
35.25
19.785714
def to_clipboard(self, excel=True, sep=None, **kwargs): r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_table. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
[ "def", "to_clipboard", "(", "self", ",", "excel", "=", "True", ",", "sep", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "io", "import", "clipboards", "clipboards", ".", "to_clipboard", "(", "self", ",", "excel", "=", "excel", ...
31.25
22.660714
def set_buf_size(fd): """Set up os pipe buffer size, if applicable""" if OS_PIPE_SZ and hasattr(fcntl, 'F_SETPIPE_SZ'): fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, OS_PIPE_SZ)
[ "def", "set_buf_size", "(", "fd", ")", ":", "if", "OS_PIPE_SZ", "and", "hasattr", "(", "fcntl", ",", "'F_SETPIPE_SZ'", ")", ":", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETPIPE_SZ", ",", "OS_PIPE_SZ", ")" ]
45
11.75
def delete(self, *args, **kwargs): """ delete image when an image record is deleted """ try: os.remove(self.file.file.name) # image does not exist except (OSError, IOError): pass super(Image, self).delete(*args, **kwargs)
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "os", ".", "remove", "(", "self", ".", "file", ".", "file", ".", "name", ")", "# image does not exist", "except", "(", "OSError", ",", "IOError", ")", ":...
34.75
10.75
def RSA(im: array, radius: int, volume_fraction: int = 1, mode: str = 'extended'): r""" Generates a sphere or disk packing using Random Sequential Addition This which ensures that spheres do not overlap but does not guarantee they are tightly packed. Parameters ---------- im : ND-array The image into which the spheres should be inserted. By accepting an image rather than a shape, it allows users to insert spheres into an already existing image. To begin the process, start with an array of zero such as ``im = np.zeros([200, 200], dtype=bool)``. radius : int The radius of the disk or sphere to insert. volume_fraction : scalar The fraction of the image that should be filled with spheres. The spheres are addeds 1's, so each sphere addition increases the ``volume_fraction`` until the specified limit is reach. mode : string Controls how the edges of the image are handled. Options are: 'extended' - Spheres are allowed to extend beyond the edge of the image 'contained' - Spheres are all completely within the image 'periodic' - The portion of a sphere that extends beyond the image is inserted into the opposite edge of the image (Not Implemented Yet!) Returns ------- image : ND-array A copy of ``im`` with spheres of specified radius *added* to the background. Notes ----- Each sphere is filled with 1's, but the center is marked with a 2. This allows easy boolean masking to extract only the centers, which can be converted to coordinates using ``scipy.where`` and used for other purposes. The obtain only the spheres, use``im = im == 1``. This function adds spheres to the background of the received ``im``, which allows iteratively adding spheres of different radii to the unfilled space. References ---------- [1] Random Heterogeneous Materials, S. Torquato (2001) """ # Note: The 2D vs 3D splitting of this just me being lazy...I can't be # bothered to figure it out programmatically right now # TODO: Ideally the spheres should be added periodically print(78*'―') print('RSA: Adding spheres of size ' + str(radius)) d2 = len(im.shape) == 2 mrad = 2*radius if d2: im_strel = ps_disk(radius) mask_strel = ps_disk(mrad) else: im_strel = ps_ball(radius) mask_strel = ps_ball(mrad) if sp.any(im > 0): # Dilate existing objects by im_strel to remove pixels near them # from consideration for sphere placement mask = ps.tools.fftmorphology(im > 0, im_strel > 0, mode='dilate') mask = mask.astype(int) else: mask = sp.zeros_like(im) if mode == 'contained': mask = _remove_edge(mask, radius) elif mode == 'extended': pass elif mode == 'periodic': raise Exception('Periodic edges are not implemented yet') else: raise Exception('Unrecognized mode: ' + mode) vf = im.sum()/im.size free_spots = sp.argwhere(mask == 0) i = 0 while vf <= volume_fraction and len(free_spots) > 0: choice = sp.random.randint(0, len(free_spots), size=1) if d2: [x, y] = free_spots[choice].flatten() im = _fit_strel_to_im_2d(im, im_strel, radius, x, y) mask = _fit_strel_to_im_2d(mask, mask_strel, mrad, x, y) im[x, y] = 2 else: [x, y, z] = free_spots[choice].flatten() im = _fit_strel_to_im_3d(im, im_strel, radius, x, y, z) mask = _fit_strel_to_im_3d(mask, mask_strel, mrad, x, y, z) im[x, y, z] = 2 free_spots = sp.argwhere(mask == 0) vf = im.sum()/im.size i += 1 if vf > volume_fraction: print('Volume Fraction', volume_fraction, 'reached') if len(free_spots) == 0: print('No more free spots', 'Volume Fraction', vf) return im
[ "def", "RSA", "(", "im", ":", "array", ",", "radius", ":", "int", ",", "volume_fraction", ":", "int", "=", "1", ",", "mode", ":", "str", "=", "'extended'", ")", ":", "# Note: The 2D vs 3D splitting of this just me being lazy...I can't be", "# bothered to figure it o...
37.893204
23.631068
def ToMilliseconds(self): """Converts a Duration to milliseconds.""" millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) return self.seconds * _MILLIS_PER_SECOND + millis
[ "def", "ToMilliseconds", "(", "self", ")", ":", "millis", "=", "_RoundTowardZero", "(", "self", ".", "nanos", ",", "_NANOS_PER_MILLISECOND", ")", "return", "self", ".", "seconds", "*", "_MILLIS_PER_SECOND", "+", "millis" ]
47.25
13.25
def list_images(list_all=False, full_ids=False): """ Lists images on the Docker remote host, similar to ``docker images``. :param list_all: Lists all images (e.g. dependencies). Default is ``False``, only shows named images. :type list_all: bool :param full_ids: Shows the full ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ images = docker_fabric().images(all=list_all) _format_output_table(images, IMAGE_COLUMNS, full_ids)
[ "def", "list_images", "(", "list_all", "=", "False", ",", "full_ids", "=", "False", ")", ":", "images", "=", "docker_fabric", "(", ")", ".", "images", "(", "all", "=", "list_all", ")", "_format_output_table", "(", "images", ",", "IMAGE_COLUMNS", ",", "full...
45
24.090909
def md2tvd(self, kind='linear'): """ Provides an transformation and interpolation function that converts MD to TVD. Args: kind (str): The kind of interpolation to do, e.g. 'linear', 'cubic', 'nearest'. Returns: function. """ if self.position is None: return lambda x: x return interp1d(self.md, self.tvd, kind=kind, assume_sorted=True, fill_value="extrapolate", bounds_error=False)
[ "def", "md2tvd", "(", "self", ",", "kind", "=", "'linear'", ")", ":", "if", "self", ".", "position", "is", "None", ":", "return", "lambda", "x", ":", "x", "return", "interp1d", "(", "self", ".", "md", ",", "self", ".", "tvd", ",", "kind", "=", "k...
30.368421
15.315789
async def update_state(self, msg, _context): """Update the status of a service.""" name = msg.get('name') status = msg.get('new_status') await self.service_manager.update_state(name, status)
[ "async", "def", "update_state", "(", "self", ",", "msg", ",", "_context", ")", ":", "name", "=", "msg", ".", "get", "(", "'name'", ")", "status", "=", "msg", ".", "get", "(", "'new_status'", ")", "await", "self", ".", "service_manager", ".", "update_st...
31.142857
16.714286
def _save_pickle(self, filename): """Save sensors to pickle file.""" with open(filename, 'wb') as file_handle: pickle.dump(self._sensors, file_handle, pickle.HIGHEST_PROTOCOL) file_handle.flush() os.fsync(file_handle.fileno())
[ "def", "_save_pickle", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "file_handle", ":", "pickle", ".", "dump", "(", "self", ".", "_sensors", ",", "file_handle", ",", "pickle", ".", "HIGHEST_PROTOCOL", "...
45.5
10.5
def get_sw_dir(sw_dir=None): """ Returns the software directory defined in the ``config.software_dir`` config. When *sw_dir* is not *None*, it is expanded and returned instead. """ if sw_dir is None: sw_dir = Config.instance().get("core", "software_dir") sw_dir = os.path.expandvars(os.path.expanduser(sw_dir)) return sw_dir
[ "def", "get_sw_dir", "(", "sw_dir", "=", "None", ")", ":", "if", "sw_dir", "is", "None", ":", "sw_dir", "=", "Config", ".", "instance", "(", ")", ".", "get", "(", "\"core\"", ",", "\"software_dir\"", ")", "sw_dir", "=", "os", ".", "path", ".", "expan...
32
22.181818
def pylint(ctx, skip_tests=False, skip_root=False, reports=False): """Perform source code checks via pylint.""" cfg = config.load() add_dir2pypath(cfg.project_root) if not os.path.exists(cfg.testjoin('__init__.py')): add_dir2pypath(cfg.testjoin()) namelist = set() for package in cfg.project.get('packages', []): if '.' not in package: namelist.add(cfg.srcjoin(package)) for module in cfg.project.get('py_modules', []): namelist.add(module + '.py') if not skip_tests: test_py = antglob.FileSet(cfg.testdir, '**/*.py') test_py = [cfg.testjoin(i) for i in test_py] if test_py: namelist |= set(test_py) if not skip_root: root_py = antglob.FileSet('.', '*.py') if root_py: namelist |= set(root_py) namelist = set([i[len(os.getcwd())+1:] if i.startswith(os.getcwd() + os.sep) else i for i in namelist]) cmd = 'pylint' cmd += ' "{}"'.format('" "'.join(sorted(namelist))) cmd += ' --reports={0}'.format('y' if reports else 'n') for cfgfile in ('.pylintrc', 'pylint.rc', 'pylint.cfg', 'project.d/pylint.cfg'): if os.path.exists(cfgfile): cmd += ' --rcfile={0}'.format(cfgfile) break try: shell.run(cmd, report_error=False, runner=ctx.run) notify.info("OK - No problems found by pylint.") except exceptions.Failure as exc: # Check bit flags within pylint return code if exc.result.return_code & 32: # Usage error (internal error in this code) notify.error("Usage error, bad arguments in {}?!".format(repr(cmd))) raise else: bits = { 1: "fatal", 2: "error", 4: "warning", 8: "refactor", 16: "convention", } notify.warning("Some messages of type {} issued by pylint.".format( ", ".join([text for bit, text in bits.items() if exc.result.return_code & bit]) )) if exc.result.return_code & 3: notify.error("Exiting due to fatal / error message.") raise
[ "def", "pylint", "(", "ctx", ",", "skip_tests", "=", "False", ",", "skip_root", "=", "False", ",", "reports", "=", "False", ")", ":", "cfg", "=", "config", ".", "load", "(", ")", "add_dir2pypath", "(", "cfg", ".", "project_root", ")", "if", "not", "o...
38.339286
19.125
def projection(self, exprs): """ Like mutate, but do not include existing table columns """ w = self._get_window() windowed_exprs = [] exprs = self.table._resolve(exprs) for expr in exprs: expr = L.windowize_function(expr, w=w) windowed_exprs.append(expr) return self.table.projection(windowed_exprs)
[ "def", "projection", "(", "self", ",", "exprs", ")", ":", "w", "=", "self", ".", "_get_window", "(", ")", "windowed_exprs", "=", "[", "]", "exprs", "=", "self", ".", "table", ".", "_resolve", "(", "exprs", ")", "for", "expr", "in", "exprs", ":", "e...
34.363636
8.727273
def quit(self): """ This function quits PlanarRad, checking if PlanarRad is running before. """ """ Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit. """ if self.is_running == True: warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !', "PlanarRad is running. Stop it before quit !", QtGui.QMessageBox.Ok) else: quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if quit == QtGui.QMessageBox.Yes: QtGui.qApp.quit()
[ "def", "quit", "(", "self", ")", ":", "\"\"\"\n Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.\n \"\"\"", "if", "self", ".", "is_running", "==", "True", ":", "warning_planarrad_running", "=", "QtGui", ".", ...
45.35
29.55
def _gen_4spec(op, path, value, create_path=False, xattr=False, _expand_macros=False): """ Like `_gen_3spec`, but also accepts a mandatory value as its third argument :param bool _expand_macros: Whether macros in the value should be expanded. The macros themselves are defined at the server side """ flags = 0 if create_path: flags |= _P.SDSPEC_F_MKDIR_P if xattr: flags |= _P.SDSPEC_F_XATTR if _expand_macros: flags |= _P.SDSPEC_F_EXPANDMACROS return Spec(op, path, flags, value)
[ "def", "_gen_4spec", "(", "op", ",", "path", ",", "value", ",", "create_path", "=", "False", ",", "xattr", "=", "False", ",", "_expand_macros", "=", "False", ")", ":", "flags", "=", "0", "if", "create_path", ":", "flags", "|=", "_P", ".", "SDSPEC_F_MKD...
36.6
16.066667
def nf_step_to_process(step, out_handle): """Convert CWL step into a nextflow process. """ pprint.pprint(step) directives = [] for req in step["task_definition"]["requirements"]: if req["requirement_type"] == "docker": directives.append("container '%s'" % req["value"]) elif req["requirement_type"] == "cpu": directives.append("cpus %s" % req["value"]) elif req["requirement_type"] == "memory": directives.append("memory '%s'" % req["value"]) task_id = step["task_id"] directives = "\n ".join(directives) inputs = "\n ".join(nf_io_to_process(step["inputs"], step["task_definition"]["inputs"], step["scatter"])) outputs = "\n ".join(nf_io_to_process(step["outputs"], step["task_definition"]["outputs"])) commandline = (step["task_definition"]["baseCommand"] + " " + " ".join([nf_input_to_cl(i) for i in step["task_definition"]["inputs"]])) out_handle.write(_nf_process_tmpl.format(**locals()))
[ "def", "nf_step_to_process", "(", "step", ",", "out_handle", ")", ":", "pprint", ".", "pprint", "(", "step", ")", "directives", "=", "[", "]", "for", "req", "in", "step", "[", "\"task_definition\"", "]", "[", "\"requirements\"", "]", ":", "if", "req", "[...
47.409091
20.272727
def api_key_post(params, request_path, _async=False): """ from 火币demo, 构造post请求并调用post方法 :param params: :param request_path: :return: """ method = 'POST' timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') params_to_sign = { 'AccessKeyId': ACCESS_KEY, 'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'Timestamp': timestamp } host_url = TRADE_URL host_name = urllib.parse.urlparse(host_url).hostname host_name = host_name.lower() secret_sign = createSign(params_to_sign, method, host_name, request_path, SECRET_KEY) params_to_sign['Signature'] = secret_sign if PRIVATE_KEY: params_to_sign['PrivateSignature'] = createPrivateSign(secret_sign, PRIVATE_KEY) url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign) return http_post_request(url, params, _async=_async)
[ "def", "api_key_post", "(", "params", ",", "request_path", ",", "_async", "=", "False", ")", ":", "method", "=", "'POST'", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "params_to_si...
36.269231
18.346154
def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix
[ "def", "set_prefix", "(", "self", ",", "prefix", ")", ":", "warnings", ".", "warn", "(", "\"set_prefix() is deprecated; use the prefix property\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "prefix", "=", "prefix" ]
35.111111
15.555556
def fit( self, durations, event_observed=None, timeline=None, entry=None, label="KM_estimate", left_censorship=False, alpha=None, ci_labels=None, weights=None, ): # pylint: disable=too-many-arguments,too-many-locals """ Fit the model to a right-censored dataset Parameters ---------- durations: an array, list, pd.DataFrame or pd.Series length n -- duration subject was observed for event_observed: an array, list, pd.DataFrame, or pd.Series, optional True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None timeline: an array, list, pd.DataFrame, or pd.Series, optional return the best estimate at the values in timelines (postively increasing) entry: an array, list, pd.DataFrame, or pd.Series, optional relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population entered study when they were "born". label: string, optional a string to name the column of the estimate. alpha: float, optional the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. left_censorship: bool, optional (default=False) Deprecated, use ``fit_left_censoring`` ci_labels: tuple, optional add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2> weights: an array, list, pd.DataFrame, or pd.Series, optional if providing a weighted dataset. For example, instead of providing every subject as a single element of `durations` and `event_observed`, one could weigh subject differently. Returns ------- self: KaplanMeierFitter self with new properties like ``survival_function_``, ``plot()``, ``median`` """ if left_censorship: warnings.warn( "kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.", DeprecationWarning, ) self._censoring_type = CensoringType.RIGHT return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
[ "def", "fit", "(", "self", ",", "durations", ",", "event_observed", "=", "None", ",", "timeline", "=", "None", ",", "entry", "=", "None", ",", "label", "=", "\"KM_estimate\"", ",", "left_censorship", "=", "False", ",", "alpha", "=", "None", ",", "ci_labe...
48.113208
30.037736
def _merge_csv_column(table, csvs): """ Add csv data to each column in a list of columns :param dict table: Table metadata :param str crumbs: Hierarchy crumbs :param str pc: Paleo or Chron table type :return dict: Table metadata with csv "values" entry :return bool ensemble: Ensemble data or not ensemble data """ # Start putting CSV data into corresponding column "values" key try: ensemble = is_ensemble(table["columns"]) if ensemble: # realization columns if len(table["columns"]) == 1: for _name, _column in table["columns"].items(): _column["values"] = csvs # depth column + realization columns elif len(table["columns"]) == 2: _multi_column = False for _name, _column in table["columns"].items(): if isinstance(_column["number"], (int, float)): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] elif isinstance(_column["number"], list): if _multi_column: raise Exception("Error: merge_csv_column: This jsonld metadata looks wrong!\n" "\tAn ensemble table depth should not reference multiple columns of CSV data.\n" "\tPlease manually fix the ensemble columns in 'metadata.jsonld' inside of your LiPD file.") else: _multi_column = True _column["values"] = csvs[2:] else: for _name, _column in table['columns'].items(): col_num = cast_int(_column["number"]) _column['values'] = csvs[col_num - 1] except IndexError: logger_csvs.warning("merge_csv_column: IndexError: index out of range of csv_data list") except KeyError: logger_csvs.error("merge_csv_column: KeyError: missing columns key") except Exception as e: logger_csvs.error("merge_csv_column: Unknown Error: {}".format(e)) print("Quitting...") exit(1) # We want to keep one missing value ONLY at the table level. Remove MVs if they're still in column-level return table, ensemble
[ "def", "_merge_csv_column", "(", "table", ",", "csvs", ")", ":", "# Start putting CSV data into corresponding column \"values\" key", "try", ":", "ensemble", "=", "is_ensemble", "(", "table", "[", "\"columns\"", "]", ")", "if", "ensemble", ":", "# realization columns", ...
46.836735
21.734694
def setWorkingCollisionBoundsInfo(self, unQuadsCount): """Sets the Collision Bounds in the working copy.""" fn = self.function_table.setWorkingCollisionBoundsInfo pQuadsBuffer = HmdQuad_t() fn(byref(pQuadsBuffer), unQuadsCount) return pQuadsBuffer
[ "def", "setWorkingCollisionBoundsInfo", "(", "self", ",", "unQuadsCount", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setWorkingCollisionBoundsInfo", "pQuadsBuffer", "=", "HmdQuad_t", "(", ")", "fn", "(", "byref", "(", "pQuadsBuffer", ")", ",", "un...
40.285714
14.285714
def _state_after_transition(self, current_state: int, target_state: int) -> int: """ Return the state reachable after a transition. Since the state for a gene can only change by 1, if the absolute value of the difference current_state - target_state is greater than 1, we lower it to 1 or -1. Examples -------- >>> model._state_after_transition(0, 2) 1 # Because 2 is too far from 0, the gene can only reach 1. >>> model._state_after_transition(1, 5) 2 # 5 is still is too far from 1, so the gene can only reach 2. >>> model._state_after_transition(2, 1) 1 # No problem here, 1 is at distance 1 from 2 >>> model._state_after_transition(1, 1) 1 # The state does not change here """ return current_state + (current_state < target_state) - (current_state > target_state)
[ "def", "_state_after_transition", "(", "self", ",", "current_state", ":", "int", ",", "target_state", ":", "int", ")", "->", "int", ":", "return", "current_state", "+", "(", "current_state", "<", "target_state", ")", "-", "(", "current_state", ">", "target_sta...
44.55
23.45
def from_structures(cls, structures, specie, temperature, time_step, step_skip, initial_disp=None, initial_structure=None, **kwargs): """ Convenient constructor that takes in a list of Structure objects to perform diffusion analysis. Args: structures ([Structure]): list of Structure objects (must be ordered in sequence of run). E.g., you may have performed sequential VASP runs to obtain sufficient statistics. specie (Element/Specie): Specie to calculate diffusivity for as a String. E.g., "Li". temperature (float): Temperature of the diffusion run in Kelvin. time_step (int): Time step between measurements. step_skip (int): Sampling frequency of the displacements ( time_step is multiplied by this number to get the real time between measurements) initial_disp (np.ndarray): Sometimes, you need to iteratively compute estimates of the diffusivity. This supplies an initial displacement that will be added on to the initial displacements. Note that this makes sense only when smoothed=False. initial_structure (Structure): Like initial_disp, this is used for iterative computations of estimates of the diffusivity. You typically need to supply both variables. This stipulates the initial structure from which the current set of displacements are computed. \\*\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_. Examples include smoothed, min_obs, avg_nsteps. """ p, l = [], [] for i, s in enumerate(structures): if i == 0: structure = s p.append(np.array(s.frac_coords)[:, None]) l.append(s.lattice.matrix) if initial_structure is not None: p.insert(0, np.array(initial_structure.frac_coords)[:, None]) l.insert(0, initial_structure.lattice.matrix) else: p.insert(0, p[0]) l.insert(0, l[0]) p = np.concatenate(p, axis=1) dp = p[:, 1:] - p[:, :-1] dp = dp - np.round(dp) f_disp = np.cumsum(dp, axis=1) c_disp = [] for i in f_disp: c_disp.append( [ np.dot(d, m) for d, m in zip(i, l[1:]) ] ) disp = np.array(c_disp) # If is NVT-AIMD, clear lattice data. if np.array_equal(l[0], l[-1]): l = np.array([l[0]]) else: l = np.array(l) if initial_disp is not None: disp += initial_disp[:, None, :] return cls(structure, disp, specie, temperature, time_step, step_skip=step_skip, lattices=l, **kwargs)
[ "def", "from_structures", "(", "cls", ",", "structures", ",", "specie", ",", "temperature", ",", "time_step", ",", "step_skip", ",", "initial_disp", "=", "None", ",", "initial_structure", "=", "None", ",", "*", "*", "kwargs", ")", ":", "p", ",", "l", "="...
45.301587
20.190476
def _val_fs_regex(self, record, hist=None): """ Perform field-specific validation regex :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ record, hist = self.data_regex_method(fields_list=self.fields, mongo_db_obj=self.mongo, hist=hist, record=record, lookup_type='fieldSpecificRegex') return record, hist
[ "def", "_val_fs_regex", "(", "self", ",", "record", ",", "hist", "=", "None", ")", ":", "record", ",", "hist", "=", "self", ".", "data_regex_method", "(", "fields_list", "=", "self", ".", "fields", ",", "mongo_db_obj", "=", "self", ".", "mongo", ",", "...
42.285714
19.714286
def type(subtag, type): """ Get a :class:`language_tags.Subtag.Subtag` by subtag and type. Can be None if not exists. :param str subtag: subtag. :param str type: type of the subtag. :return: :class:`language_tags.Subtag.Subtag` if exists, otherwise None. """ subtag = subtag.lower() if subtag in index: types = index[subtag] if type in types: return Subtag(subtag, type) return None
[ "def", "type", "(", "subtag", ",", "type", ")", ":", "subtag", "=", "subtag", ".", "lower", "(", ")", "if", "subtag", "in", "index", ":", "types", "=", "index", "[", "subtag", "]", "if", "type", "in", "types", ":", "return", "Subtag", "(", "subtag"...
34.428571
16.285714
def get( table, session, version_id=None, t1=None, t2=None, fields=None, conds=None, include_deleted=True, page=1, page_size=100, ): """ :param table: the model class which inherits from :class:`~savage.models.user_table.SavageModelMixin` and specifies the model of the user table from which we are querying :param session: a sqlalchemy session with connections to the database :param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will return all records after the specified version_id. :param t1: lower bound time for this query; if None or unspecified, defaults to the unix epoch. If this is specified and t2 is not, this query will simply return the time slice of data at t1. This must either be a valid sql time string or a datetime.datetime object. :param t2: upper bound time for this query; if both t1 and t2 are none or unspecified, this will return the latest data (i.e. time slice of data now). This must either be a valid sql time string or a datetime.datetime object. :param fields: a list of strings which corresponds to columns in the table; If None or unspecified, returns all fields in the table. :param conds: a list of dictionary of key value pairs where keys are columns in the table and values are values the column should take on. If specified, this query will only return rows where the columns meet all the conditions. The columns specified in this dictionary must be exactly the unique columns that versioning pivots around. :param include_deleted: if ``True``, the response will include deleted changes. Else it will only include changes where ``deleted = 0`` i.e. the data was in the user table. :param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2, the result set will contain results 100 - 199 :param page_size: upper bound on number of results to display. Note the actual returned result set may be smaller than this due to the roll up. """ limit, offset = _get_limit_and_offset(page, page_size) version_col_names = table.version_columns if fields is None: fields = [name for name in utils.get_column_names(table) if name != 'version_id'] if version_id is not None: return _format_response(utils.result_to_dict(session.execute( sa.select([table.ArchiveTable]) .where(table.ArchiveTable.version_id > version_id) .order_by(*_get_order_clause(table.ArchiveTable)) .limit(page_size) .offset(offset) )), fields, version_col_names) if t1 is None and t2 is None: rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset) return _format_response(rows, fields, version_col_names) if t2 is None: # return a historical time slice rows = _get_historical_time_slice( table, session, t1, conds, include_deleted, limit, offset ) return _format_response(rows, fields, version_col_names) if t1 is None: t1 = datetime.utcfromtimestamp(0) rows = _get_historical_changes( table, session, conds, t1, t2, include_deleted, limit, offset ) return _format_response(rows, fields, version_col_names)
[ "def", "get", "(", "table", ",", "session", ",", "version_id", "=", "None", ",", "t1", "=", "None", ",", "t2", "=", "None", ",", "fields", "=", "None", ",", "conds", "=", "None", ",", "include_deleted", "=", "True", ",", "page", "=", "1", ",", "p...
47.785714
27.957143
def create_jdbc_resource(name, server=None, **kwargs): ''' Create a JDBC resource ''' defaults = { 'description': '', 'enabled': True, 'id': name, 'poolName': '', 'target': 'server' } # Data = defaults + merge kwargs + poolname data = defaults data.update(kwargs) if not data['poolName']: raise CommandExecutionError('No pool name!') return _create_element(name, 'resources/jdbc-resource', data, server)
[ "def", "create_jdbc_resource", "(", "name", ",", "server", "=", "None", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "{", "'description'", ":", "''", ",", "'enabled'", ":", "True", ",", "'id'", ":", "name", ",", "'poolName'", ":", "''", ",", "...
23.75
22.85
def read_jp2_image(filename): """ Read data from JPEG2000 file :param filename: name of JPEG2000 file to be read :type filename: str :return: data stored in JPEG2000 file """ # Other option: # return glymur.Jp2k(filename)[:] image = read_image(filename) with open(filename, 'rb') as file: bit_depth = get_jp2_bit_depth(file) return fix_jp2_image(image, bit_depth)
[ "def", "read_jp2_image", "(", "filename", ")", ":", "# Other option:", "# return glymur.Jp2k(filename)[:]", "image", "=", "read_image", "(", "filename", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file", ":", "bit_depth", "=", "get_jp2_bit_depth...
26.666667
13.4
def shift_coordinate_grid(self, x_shift, y_shift, pixel_unit=False): """ shifts the coordinate system :param x_shif: shift in x (or RA) :param y_shift: shift in y (or DEC) :param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC :return: updated data class with change in coordinate system """ self._coords.shift_coordinate_grid(x_shift, y_shift, pixel_unit=pixel_unit) self._x_grid, self._y_grid = self._coords.coordinate_grid(self.nx)
[ "def", "shift_coordinate_grid", "(", "self", ",", "x_shift", ",", "y_shift", ",", "pixel_unit", "=", "False", ")", ":", "self", ".", "_coords", ".", "shift_coordinate_grid", "(", "x_shift", ",", "y_shift", ",", "pixel_unit", "=", "pixel_unit", ")", "self", "...
51.9
18.5
def _inhibitColumnsLocal(self, overlaps, density): """ Performs local inhibition. Local inhibition is performed on a column by column basis. Each column observes the overlaps of its neighbors and is selected if its overlap score is within the top 'numActive' in its local neighborhood. At most half of the columns in a local neighborhood are allowed to be active. Columns with an overlap score below the 'stimulusThreshold' are always inhibited. :param overlaps: an array containing the overlap score for each column. The overlap score for a column is defined as the number of synapses in a "connected state" (connected synapses) that are connected to input bits which are turned on. :param density: The fraction of columns to survive inhibition. This value is only an intended target. Since the surviving columns are picked in a local fashion, the exact fraction of surviving columns is likely to vary. @return list with indices of the winning columns """ activeArray = numpy.zeros(self._numColumns, dtype="bool") for column, overlap in enumerate(overlaps): if overlap >= self._stimulusThreshold: neighborhood = self._getColumnNeighborhood(column) neighborhoodOverlaps = overlaps[neighborhood] numBigger = numpy.count_nonzero(neighborhoodOverlaps > overlap) # When there is a tie, favor neighbors that are already selected as # active. ties = numpy.where(neighborhoodOverlaps == overlap) tiedNeighbors = neighborhood[ties] numTiesLost = numpy.count_nonzero(activeArray[tiedNeighbors]) numActive = int(0.5 + density * len(neighborhood)) if numBigger + numTiesLost < numActive: activeArray[column] = True return activeArray.nonzero()[0]
[ "def", "_inhibitColumnsLocal", "(", "self", ",", "overlaps", ",", "density", ")", ":", "activeArray", "=", "numpy", ".", "zeros", "(", "self", ".", "_numColumns", ",", "dtype", "=", "\"bool\"", ")", "for", "column", ",", "overlap", "in", "enumerate", "(", ...
46.9
24.15
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False): """ Load parameters from file to fill all blocks sequentially. :type blocks: list of deepy.layers.Block """ if not os.path.exists(path): raise Exception("model {} does not exist".format(path)) # Decide which parameters to load normal_params = sum([nn.parameters for nn in blocks], []) all_params = sum([nn.all_parameters for nn in blocks], []) # Load parameters if path.endswith(".gz"): opener = gzip.open if path.lower().endswith('.gz') else open handle = opener(path, 'rb') saved_params = pickle.load(handle) handle.close() # Write parameters if len(all_params) != len(saved_params): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(saved_params))) for target, source in zip(all_params, saved_params): if not exclude_free_params or target not in normal_params: target.set_value(source) elif path.endswith(".npz"): arrs = np.load(path) # Write parameters if len(all_params) != len(arrs.keys()): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(arrs.keys()))) for target, idx in zip(all_params, range(len(arrs.keys()))): if not exclude_free_params or target not in normal_params: source = arrs['arr_%d' % idx] target.set_value(source) else: raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path)
[ "def", "fill_parameters", "(", "self", ",", "path", ",", "blocks", ",", "exclude_free_params", "=", "False", ",", "check_parameters", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "Exception", "(",...
54.972973
22.918919
def max(x, axis=None, keepdims=False, with_index=False, only_index=False): """Reduce the input N-D array `x` along the given `axis` using the max operation. The `axis` argument may be a single integer to reduce over one axis, a tuple of integers to reduce over multiple axes, or ``None`` to reduce over all axes. If `keepdims` is ``True``, the output will keep all reduced dimensions with size 1. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) maxval = F.max(x, axis=1) assert np.allclose(maxval.d, np.max(x.d, axis=1)) maxval, indices = F.max(x, axis=1, with_index=True) assert np.allclose(maxval.d, np.max(x.d, axis=1)) assert np.all(indices.d == np.argmax(x.d, axis=1)) indices = F.max(x, axis=1, only_index=True) assert np.all(indices.d == np.argmax(x.d, axis=1)) Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which max is calculated. The default value `None` will reduce all dimensions. keepdims(bool): Keep reduced axes as dimension with 1 element. with_index(bool): Return tuple of max values and index. only_index(bool): Return only the index of max values. Returns: ~nnabla.Variable: N-D array. """ from .function_bases import max as max_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] n_outputs = 2 if with_index and not only_index else 1 return max_base(x, axis, keepdims, with_index, only_index, n_outputs)
[ "def", "max", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ",", "with_index", "=", "False", ",", "only_index", "=", "False", ")", ":", "from", ".", "function_bases", "import", "max", "as", "max_base", "if", "axis", "is", "None", ...
39.979167
22.208333
def setup_toolbar(self): """Setup toolbar""" load_button = create_toolbutton(self, text=_('Import data'), icon=ima.icon('fileimport'), triggered=lambda: self.import_data()) self.save_button = create_toolbutton(self, text=_("Save data"), icon=ima.icon('filesave'), triggered=lambda: self.save_data(self.filename)) self.save_button.setEnabled(False) save_as_button = create_toolbutton(self, text=_("Save data as..."), icon=ima.icon('filesaveas'), triggered=self.save_data) reset_namespace_button = create_toolbutton( self, text=_("Remove all variables"), icon=ima.icon('editdelete'), triggered=self.reset_namespace) return [load_button, self.save_button, save_as_button, reset_namespace_button]
[ "def", "setup_toolbar", "(", "self", ")", ":", "load_button", "=", "create_toolbutton", "(", "self", ",", "text", "=", "_", "(", "'Import data'", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'fileimport'", ")", ",", "triggered", "=", "lambda", ":", ...
55.894737
22.315789
async def _check_subscriptions(self): """ Checks that all subscriptions are subscribed """ subscribed, url = await self._get_subscriptions() expect = set(settings.FACEBOOK_SUBSCRIPTIONS) if (expect - subscribed) or url != self.webhook_url: await self._set_subscriptions(expect | subscribed) logger.info('Updated webhook subscriptions') else: logger.info('No need to update webhook subscriptions')
[ "async", "def", "_check_subscriptions", "(", "self", ")", ":", "subscribed", ",", "url", "=", "await", "self", ".", "_get_subscriptions", "(", ")", "expect", "=", "set", "(", "settings", ".", "FACEBOOK_SUBSCRIPTIONS", ")", "if", "(", "expect", "-", "subscrib...
36.769231
18.153846
def __set_frame_shift_status(self): """Check for frame shift and set the self.is_frame_shift flag.""" if 'fs' in self.hgvs_original: self.is_frame_shift = True self.is_non_silent = True elif re.search('[A-Z]\d+[A-Z]+\*', self.hgvs_original): # it looks like some mutations dont follow the convention # of using 'fs' to indicate frame shift self.is_frame_shift = True self.is_non_silent = True else: self.is_frame_shift = False
[ "def", "__set_frame_shift_status", "(", "self", ")", ":", "if", "'fs'", "in", "self", ".", "hgvs_original", ":", "self", ".", "is_frame_shift", "=", "True", "self", ".", "is_non_silent", "=", "True", "elif", "re", ".", "search", "(", "'[A-Z]\\d+[A-Z]+\\*'", ...
44.25
9
def _record2card(self, record): """ when we add new records they don't have a card, this sort of fakes it up similar to what cfitsio does, just for display purposes. e.g. DBL = 23.299843 LNG = 3423432 KEYSNC = 'hello ' KEYSC = 'hello ' / a comment for string KEYDC = 3.14159265358979 / a comment for pi KEYLC = 323423432 / a comment for long basically, - 8 chars, left aligned, for the keyword name - a space - 20 chars for value, left aligned for strings, right aligned for numbers - if there is a comment, one space followed by / then another space then the comment out to 80 chars """ name = record['name'] value = record['value'] v_isstring = isstring(value) if name == 'COMMENT': # card = 'COMMENT %s' % value card = 'COMMENT %s' % value elif name == 'CONTINUE': card = 'CONTINUE %s' % value elif name == 'HISTORY': card = 'HISTORY %s' % value else: if len(name) > 8: card = 'HIERARCH %s= ' % name else: card = '%-8s= ' % name[0:8] # these may be string representations of data, or actual strings if v_isstring: value = str(value) if len(value) > 0: if value[0] != "'": # this is a string representing a string header field # make it look like it will look in the header value = "'" + value + "'" vstr = '%-20s' % value else: vstr = "%20s" % value else: vstr = "''" else: vstr = '%20s' % value card += vstr if 'comment' in record: card += ' / %s' % record['comment'] if v_isstring and len(card) > 80: card = card[0:79] + "'" else: card = card[0:80] return card
[ "def", "_record2card", "(", "self", ",", "record", ")", ":", "name", "=", "record", "[", "'name'", "]", "value", "=", "record", "[", "'value'", "]", "v_isstring", "=", "isstring", "(", "value", ")", "if", "name", "==", "'COMMENT'", ":", "# card = 'COMMEN...
32.955224
16.865672
def get_mean(self, distribution_function): """Get the mean value for a distribution. If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted. Else, the distribution is instantiated and then the mean is being calculated. :param distribution_function: :return: the mean as a scalar """ name = self.distribution_name params = self.random_function_params if name == 'normal': return params[0] if name == 'uniform': return (params[0] + params[1]) / 2. if name == 'choice': return params[0].mean() if name == 'triangular': return (params[0] + params[1] + params[2]) / 3. return distribution_function().mean()
[ "def", "get_mean", "(", "self", ",", "distribution_function", ")", ":", "name", "=", "self", ".", "distribution_name", "params", "=", "self", ".", "random_function_params", "if", "name", "==", "'normal'", ":", "return", "params", "[", "0", "]", "if", "name",...
41.421053
13.947368
def string_class(cls): """Define __unicode__ and __str__ methods on the given class in Python 2. The given class must define a __str__ method returning a unicode string, otherwise a TypeError is raised. Under Python 3, the class is returned as is. """ if not PY3: if '__str__' not in cls.__dict__: raise TypeError('the given class has no __str__ method') cls.__unicode__, cls.__string__ = ( cls.__str__, lambda self: self.__unicode__().encode('utf-8')) return cls
[ "def", "string_class", "(", "cls", ")", ":", "if", "not", "PY3", ":", "if", "'__str__'", "not", "in", "cls", ".", "__dict__", ":", "raise", "TypeError", "(", "'the given class has no __str__ method'", ")", "cls", ".", "__unicode__", ",", "cls", ".", "__strin...
40
17.076923
def _getOverlay(self, readDataInstance, sectionHdrsInstance): """ Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified. """ if readDataInstance is not None and sectionHdrsInstance is not None: # adjust the offset in readDataInstance to the RawOffset + RawSize of the last section try: offset = sectionHdrsInstance[-1].pointerToRawData.value + sectionHdrsInstance[-1].sizeOfRawData.value readDataInstance.setOffset(offset) except excep.WrongOffsetValueException: if self._verbose: print "It seems that the file has no overlay data." else: raise excep.InstanceErrorException("ReadData instance or SectionHeaders instance not specified.") return readDataInstance.data[readDataInstance.offset:]
[ "def", "_getOverlay", "(", "self", ",", "readDataInstance", ",", "sectionHdrsInstance", ")", ":", "if", "readDataInstance", "is", "not", "None", "and", "sectionHdrsInstance", "is", "not", "None", ":", "# adjust the offset in readDataInstance to the RawOffset + RawSize of th...
51.740741
31.296296
def train(X_train, X_test, y_train, y_test, **kwargs): ''' >>> corpus = CorpusReader('annot.opcorpora.xml') >>> X_train, x_test, y_train, y_test = get_train_data(corpus, test_size=0.33, random_state=42) >>> crf = train(X_train, X_test, y_train, y_test) ''' crf = Trainer() crf.set_params({ 'c1': 1.0, 'c2': 0.001, 'max_iterations': 200, }) for xseq, yseq in zip(X_train, y_train): crf.append(xseq, yseq) crf.train(model_name) return crf
[ "def", "train", "(", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", ",", "*", "*", "kwargs", ")", ":", "crf", "=", "Trainer", "(", ")", "crf", ".", "set_params", "(", "{", "'c1'", ":", "1.0", ",", "'c2'", ":", "0.001", ",", "'max_iteratio...
29.235294
22.647059
def async_decorator(func): """Asynchronous function decorator. Interprets the function as being asynchronous, so returns a function that will handle calling the Function asynchronously. Args: func (function): function to be called asynchronously Returns: The wrapped function. Raises: AttributeError: if ``func`` is not callable """ @functools.wraps(func) def async_wrapper(*args, **kwargs): """Wraps up the call to ``func``, so that it is called from a separate thread. The callback, if given, will be called with two parameters, ``exception`` and ``result`` as ``callback(exception, result)``. If the thread ran to completion without error, ``exception`` will be ``None``, otherwise ``exception`` will be the generated exception that stopped the thread. Result is the result of the exected function. Args: callback (function): the callback to ultimately be called args: list of arguments to pass to ``func`` kwargs: key-word arguments dictionary to pass to ``func`` Returns: A thread if the call is asynchronous, otherwise the the return value of the wrapped function. Raises: TypeError: if ``callback`` is not callable or is missing """ if 'callback' not in kwargs or not kwargs['callback']: return func(*args, **kwargs) callback = kwargs.pop('callback') if not callable(callback): raise TypeError('Expected \'callback\' is not callable.') def thread_func(*args, **kwargs): """Thread function on which the given ``func`` and ``callback`` are executed. Args: args: list of arguments to pass to ``func`` kwargs: key-word arguments dictionary to pass to ``func`` Returns: Return value of the wrapped function. """ exception, res = None, None try: res = func(*args, **kwargs) except Exception as e: exception = e return callback(exception, res) thread = threads.ThreadReturn(target=thread_func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread return async_wrapper
[ "def", "async_decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "async_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wraps up the call to ``func``, so that it is called from a separate\n thread.\...
33.180556
21.25
def can_process_shell(self, entry): """:return: True when shell can be executed.""" count = 0 condition = render(entry['when'], variables=self.pipeline.variables, model=self.pipeline.model, env=self.get_merged_env(include_os=True)) if Condition.evaluate("" if condition is None else condition): if len(self.pipeline.options.tags) == 0: return True if 'tags' in entry: for tag in self.pipeline.options.tags: if tag in entry['tags']: count += 1 return count > 0
[ "def", "can_process_shell", "(", "self", ",", "entry", ")", ":", "count", "=", "0", "condition", "=", "render", "(", "entry", "[", "'when'", "]", ",", "variables", "=", "self", ".", "pipeline", ".", "variables", ",", "model", "=", "self", ".", "pipelin...
38.375
21.4375
def add_comment(self, comment): """ Adds a comment to a bug. If the bug object does not have a bug ID (ie you are creating a bug) then you will need to also call `put` on the :class:`Bugsy` class. >>> bug.add_comment("I like sausages") >>> bugzilla.put(bug) If it does have a bug id then this will immediately post to the server >>> bug.add_comment("I like eggs too") More examples can be found at: https://github.com/AutomatedTester/Bugsy/blob/master/example/add_comments.py """ # If we have a key post immediately otherwise hold onto it until # put(bug) is called if 'id' in self._bug: self._bugsy.request('bug/{}/comment'.format(self._bug['id']), method='POST', json={"comment": comment} ) else: self._bug['comment'] = comment
[ "def", "add_comment", "(", "self", ",", "comment", ")", ":", "# If we have a key post immediately otherwise hold onto it until", "# put(bug) is called", "if", "'id'", "in", "self", ".", "_bug", ":", "self", ".", "_bugsy", ".", "request", "(", "'bug/{}/comment'", ".", ...
39.75
21.583333
def delete_acl_request(request): """Submission to remove an ACL.""" uuid_ = request.matchdict['uuid'] posted = request.json permissions = [(x['uid'], x['permission'],) for x in posted] with db_connect() as db_conn: with db_conn.cursor() as cursor: remove_acl(cursor, uuid_, permissions) resp = request.response resp.status_int = 200 return resp
[ "def", "delete_acl_request", "(", "request", ")", ":", "uuid_", "=", "request", ".", "matchdict", "[", "'uuid'", "]", "posted", "=", "request", ".", "json", "permissions", "=", "[", "(", "x", "[", "'uid'", "]", ",", "x", "[", "'permission'", "]", ",", ...
29.692308
15.384615
def bulk_insert(self, resource, docs, **kwargs): """Bulk insert documents.""" kwargs.update(self._es_args(resource)) parent_type = self._get_parent_type(resource) if parent_type: for doc in docs: if doc.get(parent_type.get('field')): doc['_parent'] = doc.get(parent_type.get('field')) res = bulk(self.elastic(resource), docs, stats_only=False, **kwargs) self._refresh_resource_index(resource) return res
[ "def", "bulk_insert", "(", "self", ",", "resource", ",", "docs", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "self", ".", "_es_args", "(", "resource", ")", ")", "parent_type", "=", "self", ".", "_get_parent_type", "(", "resource", ...
41.416667
16.916667
def get_sendback(self, uuid, key): """ Return function for sending progress messages back to original caller. Args: uuid (str): UUID of the received message. key (str): Routing key. Returns: fn reference: Reference to function which takes only one data \ argument. """ def send_back_callback(data): self.sendResponse( serializers.serialize(data), uuid, key ) return send_back_callback
[ "def", "get_sendback", "(", "self", ",", "uuid", ",", "key", ")", ":", "def", "send_back_callback", "(", "data", ")", ":", "self", ".", "sendResponse", "(", "serializers", ".", "serialize", "(", "data", ")", ",", "uuid", ",", "key", ")", "return", "sen...
27.9
18.2
def setMood(self, mood): """ Update the activity message for the current user. Args: mood (str): new mood message """ self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}}) self.user.mood = SkypeUser.Mood(plain=mood) if mood else None
[ "def", "setMood", "(", "self", ",", "mood", ")", ":", "self", ".", "conn", "(", "\"POST\"", ",", "\"{0}/users/{1}/profile/partial\"", ".", "format", "(", "SkypeConnection", ".", "API_USER", ",", "self", ".", "userId", ")", ",", "auth", "=", "SkypeConnection"...
42.4
24.8
def subcommand_not_found(self, command, string): """|maybecoro| A method called when a command did not have a subcommand requested in the help command. This is useful to override for i18n. Defaults to either: - ``'Command "{command.qualified_name}" has no subcommands.'`` - If there is no subcommand in the ``command`` parameter. - ``'Command "{command.qualified_name}" has no subcommand named {string}'`` - If the ``command`` parameter has subcommands but not one named ``string``. Parameters ------------ command: :class:`Command` The command that did not have the subcommand requested. string: :class:`str` The string that contains the invalid subcommand. Note that this has had mentions removed to prevent abuse. Returns --------- :class:`str` The string to use when the command did not have the subcommand requested. """ if isinstance(command, Group) and len(command.all_commands) > 0: return 'Command "{0.qualified_name}" has no subcommand named {1}'.format(command, string) return 'Command "{0.qualified_name}" has no subcommands.'.format(command)
[ "def", "subcommand_not_found", "(", "self", ",", "command", ",", "string", ")", ":", "if", "isinstance", "(", "command", ",", "Group", ")", "and", "len", "(", "command", ".", "all_commands", ")", ">", "0", ":", "return", "'Command \"{0.qualified_name}\" has no...
42.862069
28.068966
def _from_pointer(pointer, incref): """Wrap an existing :c:type:`cairo_surface_t *` cdata pointer. :type incref: bool :param incref: Whether increase the :ref:`reference count <refcounting>` now. :return: A new instance of :class:`Surface` or one of its sub-classes, depending on the surface’s type. """ if pointer == ffi.NULL: raise ValueError('Null pointer') if incref: cairo.cairo_surface_reference(pointer) self = object.__new__(SURFACE_TYPE_TO_CLASS.get( cairo.cairo_surface_get_type(pointer), Surface)) Surface.__init__(self, pointer) # Skip the subclass’s __init__ return self
[ "def", "_from_pointer", "(", "pointer", ",", "incref", ")", ":", "if", "pointer", "==", "ffi", ".", "NULL", ":", "raise", "ValueError", "(", "'Null pointer'", ")", "if", "incref", ":", "cairo", ".", "cairo_surface_reference", "(", "pointer", ")", "self", "...
37.894737
18.157895
def g_step(self, gen_frames, fake_logits_stop): """Performs the generator step in computing the GAN loss. Args: gen_frames: Generated frames fake_logits_stop: Logits corresponding to the generated frames as per the discriminator. Assumed to have a stop-gradient term. Returns: gan_g_loss_pos_d: Loss. gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator. """ hparam_to_gen_loss = { "least_squares": gan_losses.least_squares_generator_loss, "cross_entropy": gan_losses.modified_generator_loss, "wasserstein": gan_losses.wasserstein_generator_loss } fake_logits = self.discriminator(gen_frames) mean_fake_logits = tf.reduce_mean(fake_logits) tf.summary.scalar("mean_fake_logits", mean_fake_logits) # Generator loss. # Using gan_g_loss_pos_d updates the discriminator as well. # To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d # but with stop gradient on the generator. # This makes sure that the net gradient on the discriminator is zero and # net-gradient on the generator is just due to the gan_g_loss_pos_d. generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss] gan_g_loss_pos_d = generator_loss_func( discriminator_gen_outputs=fake_logits, add_summaries=True) gan_g_loss_neg_d = -generator_loss_func( discriminator_gen_outputs=fake_logits_stop, add_summaries=True) return gan_g_loss_pos_d, gan_g_loss_neg_d
[ "def", "g_step", "(", "self", ",", "gen_frames", ",", "fake_logits_stop", ")", ":", "hparam_to_gen_loss", "=", "{", "\"least_squares\"", ":", "gan_losses", ".", "least_squares_generator_loss", ",", "\"cross_entropy\"", ":", "gan_losses", ".", "modified_generator_loss", ...
44.878788
21.242424
def tag_handler(self, cmd): """Process a TagCommand.""" # Keep tags if they indirectly reference something we kept cmd.from_ = self._find_interesting_from(cmd.from_) self.keep = cmd.from_ is not None
[ "def", "tag_handler", "(", "self", ",", "cmd", ")", ":", "# Keep tags if they indirectly reference something we kept", "cmd", ".", "from_", "=", "self", ".", "_find_interesting_from", "(", "cmd", ".", "from_", ")", "self", ".", "keep", "=", "cmd", ".", "from_", ...
45.4
11.6
def convert_datetext_to_dategui(datetext, ln=None, secs=False): """Convert: '2005-11-16 15:11:57' => '16 nov 2005, 15:11' Or optionally with seconds: '2005-11-16 15:11:57' => '16 nov 2005, 15:11:57' Month is internationalized """ assert ln is None, 'setting language is not supported' try: datestruct = convert_datetext_to_datestruct(datetext) if datestruct == datestruct_default: raise ValueError if secs: output_format = "d MMM Y, H:mm:ss" else: output_format = "d MMM Y, H:mm" dt = datetime.fromtimestamp(time.mktime(datestruct)) return babel_format_datetime(dt, output_format) except ValueError: return _("N/A")
[ "def", "convert_datetext_to_dategui", "(", "datetext", ",", "ln", "=", "None", ",", "secs", "=", "False", ")", ":", "assert", "ln", "is", "None", ",", "'setting language is not supported'", "try", ":", "datestruct", "=", "convert_datetext_to_datestruct", "(", "dat...
34.333333
16.714286
def generate_enums(basename, xml): '''generate main header per XML file''' directory = os.path.join(basename, '''enums''') mavparse.mkdir_p(directory) for en in xml.enum: f = open(os.path.join(directory, en.name+".java"), mode='w') t.write(f, ''' /* AUTO-GENERATED FILE. DO NOT MODIFY. * * This class was automatically generated by the * java mavlink generator tool. It should not be modified by hand. */ package com.MAVLink.enums; /** * ${description} */ public class ${name} { ${{entry: public static final int ${name} = ${value}; /* ${description} |${{param:${description}| }} */ }} } ''', en) f.close()
[ "def", "generate_enums", "(", "basename", ",", "xml", ")", ":", "directory", "=", "os", ".", "path", ".", "join", "(", "basename", ",", "'''enums'''", ")", "mavparse", ".", "mkdir_p", "(", "directory", ")", "for", "en", "in", "xml", ".", "enum", ":", ...
26.875
24.875
def _make_verb_helper(verb_func, add_groups=False): """ Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb. """ @wraps(verb_func) def _verb_func(verb): verb.expressions, new_columns = build_expressions(verb) if add_groups: verb.groups = new_columns return verb_func(verb) return _verb_func
[ "def", "_make_verb_helper", "(", "verb_func", ",", "add_groups", "=", "False", ")", ":", "@", "wraps", "(", "verb_func", ")", "def", "_verb_func", "(", "verb", ")", ":", "verb", ".", "expressions", ",", "new_columns", "=", "build_expressions", "(", "verb", ...
29.272727
20.484848
def cmd_do_change_speed(self, args): '''speed value''' if ( len(args) != 1): print("Usage: setspeed SPEED_VALUE") return if (len(args) == 1): speed = float(args[0]) print("SPEED %s" % (str(speed))) self.master.mav.command_long_send( self.settings.target_system, # target_system mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED, # command 0, # confirmation 0, # param1 speed, # param2 (Speed value) 0, # param3 0, # param4 0, # param5 0, # param6 0)
[ "def", "cmd_do_change_speed", "(", "self", ",", "args", ")", ":", "if", "(", "len", "(", "args", ")", "!=", "1", ")", ":", "print", "(", "\"Usage: setspeed SPEED_VALUE\"", ")", "return", "if", "(", "len", "(", "args", ")", "==", "1", ")", ":", "speed...
35.428571
14.857143
def send(self, wallet_id, passcode, address, amount, message='', fee=None, fan_unspend=10): """ Send bitcoins to address :param wallet_id: bitgo wallet id :param address: bitcoin address :param amount: btc amount in satoshis :param split: create new outputs if needed :return: boolean """ MINIMAL_FEE = 20000 MINIMAL_SPLIT = 10000000 wallet = self.get_wallet(wallet_id) if not wallet['spendingAccount']: raise NotSpendableWallet() if not wallet['isActive']: raise NotActiveWallet() if amount < 10000: raise Exception('amount to small') if wallet['confirmedBalance'] < amount: raise NotEnoughFunds('Not enough funds: balance %s amount %s' % (wallet['confirmedBalance'], amount) ) change_address = self.create_address(wallet_id, chain=1) usableKeychain = False spendables = [] chain_paths = [] p2sh = [] payables = [(address, amount)] keychain_path = "" for keychain in wallet['private']['keychains']: keychain_path = keychain['path'][1:] keychain = self.get_keychain(keychain['xpub']) if 'encryptedXprv' not in keychain: continue usableKeychain = True break if not usableKeychain: raise BitGoError("didn't found a spendable keychain") data = json.loads(keychain['encryptedXprv']) #add base64 paddings for k in ['iv', 'salt', 'ct']: data[k] = data[k] + "==" cipher = sjcl.SJCL() xprv = cipher.decrypt(data, passcode) unspents = self.get_unspents(wallet_id) unspents = filter(lambda u: u['confirmations'] > 0, unspents['unspents'][::-1]) total_value = 0 for d in unspents: path = keychain_path + d['chainPath'] chain_paths.append(path) p2sh.append(h2b(d["redeemScript"])) spendables.append(Spendable(d["value"], h2b(d["script"]), h2b_rev(d["tx_hash"]), d["tx_output_n"])) total_value += d['value'] if total_value > amount: break # make many outputs? if len(unspents) < 5 and (total_value > (amount + MINIMAL_SPLIT)) and fan_unspend > 0: fee = self.calculate_fee(len(spendables), fan_unspend) value = (total_value - amount - fee) / fan_unspend for i in range(fan_unspend): payables.append((change_address, value)) elif total_value > (amount + MINIMAL_FEE): # add a change address if fee is None: fee = self.calculate_fee(len(spendables), 2) value = total_value - amount - fee if value > 10000: #avoid dust payables.append((change_address, value)) p2sh_lookup = build_p2sh_lookup(p2sh) spendable_keys = [] priv_key = BIP32Node.from_hwif(xprv) spendable_keys = [priv_key.subkey_for_path(path) for path in chain_paths] hash160_lookup = build_hash160_lookup([key.secret_exponent() for key in spendable_keys]) tx = create_tx(spendables, payables) tx.sign(hash160_lookup=hash160_lookup, p2sh_lookup=p2sh_lookup) r = requests.post(self.url + '/tx/send', { 'tx': tx.as_hex(), 'message': message }, headers={ 'Authorization': 'Bearer %s' % self.access_token, }) return r.json()
[ "def", "send", "(", "self", ",", "wallet_id", ",", "passcode", ",", "address", ",", "amount", ",", "message", "=", "''", ",", "fee", "=", "None", ",", "fan_unspend", "=", "10", ")", ":", "MINIMAL_FEE", "=", "20000", "MINIMAL_SPLIT", "=", "10000000", "w...
33.583333
19.37963
def forest(S=3, r1=4, r2=2, p=0.1, is_sparse=False): """Generate a MDP example based on a simple forest management scenario. This function is used to generate a transition probability (``A`` × ``S`` × ``S``) array ``P`` and a reward (``S`` × ``A``) matrix ``R`` that model the following problem. A forest is managed by two actions: 'Wait' and 'Cut'. An action is decided each year with first the objective to maintain an old forest for wildlife and second to make money selling cut wood. Each year there is a probability ``p`` that a fire burns the forest. Here is how the problem is modelled. Let {0, 1 . . . ``S``-1 } be the states of the forest, with ``S``-1 being the oldest. Let 'Wait' be action 0 and 'Cut' be action 1. After a fire, the forest is in the youngest state, that is state 0. The transition matrix ``P`` of the problem can then be defined as follows:: | p 1-p 0.......0 | | . 0 1-p 0....0 | P[0,:,:] = | . . 0 . | | . . . | | . . 1-p | | p 0 0....0 1-p | | 1 0..........0 | | . . . | P[1,:,:] = | . . . | | . . . | | . . . | | 1 0..........0 | The reward matrix R is defined as follows:: | 0 | | . | R[:,0] = | . | | . | | 0 | | r1 | | 0 | | 1 | R[:,1] = | . | | . | | 1 | | r2 | Parameters --------- S : int, optional The number of states, which should be an integer greater than 1. Default: 3. r1 : float, optional The reward when the forest is in its oldest state and action 'Wait' is performed. Default: 4. r2 : float, optional The reward when the forest is in its oldest state and action 'Cut' is performed. Default: 2. p : float, optional The probability of wild fire occurence, in the range ]0, 1[. Default: 0.1. is_sparse : bool, optional If True, then the probability transition matrices will be returned in sparse format, otherwise they will be in dense format. Default: False. Returns ------- out : tuple ``out[0]`` contains the transition probability matrix P and ``out[1]`` contains the reward matrix R. If ``is_sparse=False`` then P is a numpy array with a shape of ``(A, S, S)`` and R is a numpy array with a shape of ``(S, A)``. If ``is_sparse=True`` then P is a tuple of length ``A`` where each ``P[a]`` is a scipy sparse CSR format matrix of shape ``(S, S)``; R remains the same as in the case of ``is_sparse=False``. Examples -------- >>> import mdptoolbox.example >>> P, R = mdptoolbox.example.forest() >>> P array([[[ 0.1, 0.9, 0. ], [ 0.1, 0. , 0.9], [ 0.1, 0. , 0.9]], <BLANKLINE> [[ 1. , 0. , 0. ], [ 1. , 0. , 0. ], [ 1. , 0. , 0. ]]]) >>> R array([[ 0., 0.], [ 0., 1.], [ 4., 2.]]) >>> Psp, Rsp = mdptoolbox.example.forest(is_sparse=True) >>> len(Psp) 2 >>> Psp[0] <3x3 sparse matrix of type '<... 'numpy.float64'>' with 6 stored elements in Compressed Sparse Row format> >>> Psp[1] <3x3 sparse matrix of type '<... 'numpy.int64'>' with 3 stored elements in Compressed Sparse Row format> >>> Rsp array([[ 0., 0.], [ 0., 1.], [ 4., 2.]]) >>> (Psp[0].todense() == P[0]).all() True >>> (Rsp == R).all() True """ assert S > 1, "The number of states S must be greater than 1." assert (r1 > 0) and (r2 > 0), "The rewards must be non-negative." assert 0 <= p <= 1, "The probability p must be in [0; 1]." # Definition of Transition matrix if is_sparse: P = [] rows = list(range(S)) * 2 cols = [0] * S + list(range(1, S)) + [S - 1] vals = [p] * S + [1-p] * S P.append(_sp.coo_matrix((vals, (rows, cols)), shape=(S, S)).tocsr()) rows = list(range(S)) cols = [0] * S vals = [1] * S P.append(_sp.coo_matrix((vals, (rows, cols)), shape=(S, S)).tocsr()) else: P = _np.zeros((2, S, S)) P[0, :, :] = (1 - p) * _np.diag(_np.ones(S - 1), 1) P[0, :, 0] = p P[0, S - 1, S - 1] = (1 - p) P[1, :, :] = _np.zeros((S, S)) P[1, :, 0] = 1 # Definition of Reward matrix R = _np.zeros((S, 2)) R[S - 1, 0] = r1 R[:, 1] = _np.ones(S) R[0, 1] = 0 R[S - 1, 1] = r2 return(P, R)
[ "def", "forest", "(", "S", "=", "3", ",", "r1", "=", "4", ",", "r2", "=", "2", ",", "p", "=", "0.1", ",", "is_sparse", "=", "False", ")", ":", "assert", "S", ">", "1", ",", "\"The number of states S must be greater than 1.\"", "assert", "(", "r1", ">...
34.817518
20.861314
def id_to_extended_id(item_id, item_class): """Return the extended ID from an ID. :param item_id: The ID of the music library item :type item_id: str :param cls: The class of the music service item :type cls: Sub-class of :py:class:`soco.data_structures.MusicServiceItem` The extended id can be something like 00030020trackid_22757082 where the id is just trackid_22757082. For classes where the prefix is not known returns None. """ out = ID_PREFIX[item_class] if out: out += item_id return out
[ "def", "id_to_extended_id", "(", "item_id", ",", "item_class", ")", ":", "out", "=", "ID_PREFIX", "[", "item_class", "]", "if", "out", ":", "out", "+=", "item_id", "return", "out" ]
35.352941
17.705882
def map(self, fn): """Run a map function across all y points in the series""" return TimeSeries([(x, fn(y)) for x, y in self.points])
[ "def", "map", "(", "self", ",", "fn", ")", ":", "return", "TimeSeries", "(", "[", "(", "x", ",", "fn", "(", "y", ")", ")", "for", "x", ",", "y", "in", "self", ".", "points", "]", ")" ]
49
15
def actuator_on(self, service_location_id, actuator_id, duration=None): """ Turn actuator on Parameters ---------- service_location_id : int actuator_id : int duration : int, optional 300,900,1800 or 3600 , specifying the time in seconds the actuator should be turned on. Any other value results in turning on for an undetermined period of time. Returns ------- requests.Response """ return self._actuator_on_off( on_off='on', service_location_id=service_location_id, actuator_id=actuator_id, duration=duration)
[ "def", "actuator_on", "(", "self", ",", "service_location_id", ",", "actuator_id", ",", "duration", "=", "None", ")", ":", "return", "self", ".", "_actuator_on_off", "(", "on_off", "=", "'on'", ",", "service_location_id", "=", "service_location_id", ",", "actuat...
32.5
19.2
def union(self, *queries): '''Return a new :class:`Query` obtained form the union of this :class:`Query` with one or more *queries*. For example, lets say we want to have the union of two queries obtained from the :meth:`filter` method:: query = session.query(MyModel) qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo')) ''' q = self._clone() q.unions += queries return q
[ "def", "union", "(", "self", ",", "*", "queries", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "unions", "+=", "queries", "return", "q" ]
35.833333
19.666667
def _import_module(self, s): """ Import a module. """ mod = __import__(s) parts = s.split('.') for part in parts[1:]: mod = getattr(mod, part) return mod
[ "def", "_import_module", "(", "self", ",", "s", ")", ":", "mod", "=", "__import__", "(", "s", ")", "parts", "=", "s", ".", "split", "(", "'.'", ")", "for", "part", "in", "parts", "[", "1", ":", "]", ":", "mod", "=", "getattr", "(", "mod", ",", ...
23.666667
9.888889
def transform_from_dual_quaternion(dq, from_frame='unassigned', to_frame='world'): """Create a RigidTransform from a DualQuaternion. Parameters ---------- dq : :obj:`DualQuaternion` The DualQuaternion to transform. from_frame : :obj:`str` A name for the frame of reference on which this transform operates. to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. Returns ------- :obj:`RigidTransform` The RigidTransform made from the DualQuaternion. """ quaternion = dq.qr translation = 2 * dq.qd[1:] return RigidTransform(rotation=quaternion, translation=translation, from_frame=from_frame, to_frame=to_frame)
[ "def", "transform_from_dual_quaternion", "(", "dq", ",", "from_frame", "=", "'unassigned'", ",", "to_frame", "=", "'world'", ")", ":", "quaternion", "=", "dq", ".", "qr", "translation", "=", "2", "*", "dq", ".", "qd", "[", "1", ":", "]", "return", "Rigid...
33.583333
22.666667
def save_file(key, file_path, *refs): """Convert the given parameters to a special JSON object. JSON object is of the form: { key: {"file": file_path}}, or { key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} """ if not os.path.isfile(file_path): return error("Output '{}' set to a missing file: '{}'.".format(key, file_path)) result = {key: {"file": file_path}} if refs: missing_refs = [ ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref)) ] if len(missing_refs) > 0: return error( "Output '{}' set to missing references: '{}'.".format( key, ', '.join(missing_refs) ) ) result[key]['refs'] = refs return json.dumps(result)
[ "def", "save_file", "(", "key", ",", "file_path", ",", "*", "refs", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "error", "(", "\"Output '{}' set to a missing file: '{}'.\"", ".", "format", "(", "key", ",",...
30.576923
20.653846
def sim(self, src, tar, threshold=0.25, max_mismatches=2): """Return the MLIPNS similarity of two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison threshold : float A number [0, 1] indicating the maximum similarity score, below which the strings are considered 'similar' (0.25 by default) max_mismatches : int A number indicating the allowable number of mismatches to remove before declaring two strings not similar (2 by default) Returns ------- float MLIPNS similarity Examples -------- >>> sim_mlipns('cat', 'hat') 1.0 >>> sim_mlipns('Niall', 'Neil') 0.0 >>> sim_mlipns('aluminum', 'Catalan') 0.0 >>> sim_mlipns('ATCG', 'TAGC') 0.0 """ if tar == src: return 1.0 if not src or not tar: return 0.0 mismatches = 0 ham = Hamming().dist_abs(src, tar, diff_lens=True) max_length = max(len(src), len(tar)) while src and tar and mismatches <= max_mismatches: if ( max_length < 1 or (1 - (max_length - ham) / max_length) <= threshold ): return 1.0 else: mismatches += 1 ham -= 1 max_length -= 1 if max_length < 1: return 1.0 return 0.0
[ "def", "sim", "(", "self", ",", "src", ",", "tar", ",", "threshold", "=", "0.25", ",", "max_mismatches", "=", "2", ")", ":", "if", "tar", "==", "src", ":", "return", "1.0", "if", "not", "src", "or", "not", "tar", ":", "return", "0.0", "mismatches",...
27.727273
20.090909
def service_create(auth=None, **kwargs): ''' Create a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_create name=glance type=image salt '*' keystoneng.service_create name=glance type=image description="Image" ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_service(**kwargs)
[ "def", "service_create", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "keep_name", "=", "True", ",", "*", "*", "kwargs", ")", "return", "cloud", ...
28.071429
23.785714
def update_stream(self): """ Restarts the stream with the current list of tracking terms. """ need_to_restart = False # If we think we are running, but something has gone wrong in the streaming thread # Restart it. if self.stream is not None and not self.stream.running: logger.warning("Stream exists but isn't running") self.listener.error = False self.listener.streaming_exception = None need_to_restart = True # Check if the tracking list has changed if self.term_checker.check(): logger.info("Terms have changed") need_to_restart = True # If we aren't running and we are allowing unfiltered streams if self.stream is None and self.unfiltered: need_to_restart = True if not need_to_restart: return logger.info("Restarting stream...") # Stop any old stream self.stop_stream() # Start a new stream self.start_stream()
[ "def", "update_stream", "(", "self", ")", ":", "need_to_restart", "=", "False", "# If we think we are running, but something has gone wrong in the streaming thread", "# Restart it.", "if", "self", ".", "stream", "is", "not", "None", "and", "not", "self", ".", "stream", ...
30.382353
19.088235
def format_value(_type, _data, lookup_string=lambda ix: "<string>"): """ Format a value based on type and data. By default, no strings are looked up and "<string>" is returned. You need to define `lookup_string` in order to actually lookup strings from the string table. :param _type: The numeric type of the value :param _data: The numeric data of the value :param lookup_string: A function how to resolve strings from integer IDs """ # Function to prepend android prefix for attributes/references from the # android library fmt_package = lambda x: "android:" if x >> 24 == 1 else "" # Function to represent integers fmt_int = lambda x: (0x7FFFFFFF & x) - 0x80000000 if x > 0x7FFFFFFF else x if _type == TYPE_STRING: return lookup_string(_data) elif _type == TYPE_ATTRIBUTE: return "?{}{:08X}".format(fmt_package(_data), _data) elif _type == TYPE_REFERENCE: return "@{}{:08X}".format(fmt_package(_data), _data) elif _type == TYPE_FLOAT: return "%f" % unpack("=f", pack("=L", _data))[0] elif _type == TYPE_INT_HEX: return "0x%08X" % _data elif _type == TYPE_INT_BOOLEAN: if _data == 0: return "false" return "true" elif _type == TYPE_DIMENSION: return "{:f}{}".format(complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK]) elif _type == TYPE_FRACTION: return "{:f}{}".format(complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK]) elif TYPE_FIRST_COLOR_INT <= _type <= TYPE_LAST_COLOR_INT: return "#%08X" % _data elif TYPE_FIRST_INT <= _type <= TYPE_LAST_INT: return "%d" % fmt_int(_data) return "<0x{:X}, type 0x{:02X}>".format(_data, _type)
[ "def", "format_value", "(", "_type", ",", "_data", ",", "lookup_string", "=", "lambda", "ix", ":", "\"<string>\"", ")", ":", "# Function to prepend android prefix for attributes/references from the", "# android library", "fmt_package", "=", "lambda", "x", ":", "\"android:...
33.326923
23.326923
def _compare(cur_cmp, cur_struct): ''' Compares two objects and return a boolean value when there's a match. ''' if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict): log.debug('Comparing dict to dict') for cmp_key, cmp_value in six.iteritems(cur_cmp): if cmp_key == '*': # matches any key from the source dictionary if isinstance(cmp_value, dict): found = False for _, cur_struct_val in six.iteritems(cur_struct): found |= _compare(cmp_value, cur_struct_val) return found else: found = False if isinstance(cur_struct, (list, tuple)): for cur_ele in cur_struct: found |= _compare(cmp_value, cur_ele) elif isinstance(cur_struct, dict): for _, cur_ele in six.iteritems(cur_struct): found |= _compare(cmp_value, cur_ele) return found else: if isinstance(cmp_value, dict): if cmp_key not in cur_struct: return False return _compare(cmp_value, cur_struct[cmp_key]) if isinstance(cmp_value, list): found = False for _, cur_struct_val in six.iteritems(cur_struct): found |= _compare(cmp_value, cur_struct_val) return found else: return _compare(cmp_value, cur_struct[cmp_key]) elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)): log.debug('Comparing list to list') found = False for cur_cmp_ele in cur_cmp: for cur_struct_ele in cur_struct: found |= _compare(cur_cmp_ele, cur_struct_ele) return found elif isinstance(cur_cmp, dict) and isinstance(cur_struct, (list, tuple)): log.debug('Comparing dict to list (of dicts?)') found = False for cur_struct_ele in cur_struct: found |= _compare(cur_cmp, cur_struct_ele) return found elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool): log.debug('Comparing booleans: %s ? %s', cur_cmp, cur_struct) return cur_cmp == cur_struct elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \ isinstance(cur_struct, (six.string_types, six.text_type)): log.debug('Comparing strings (and regex?): %s ? %s', cur_cmp, cur_struct) # Trying literal match matched = re.match(cur_cmp, cur_struct, re.I) if matched: return True return False elif isinstance(cur_cmp, (six.integer_types, float)) and \ isinstance(cur_struct, (six.integer_types, float)): log.debug('Comparing numeric values: %d ? %d', cur_cmp, cur_struct) # numeric compare return cur_cmp == cur_struct elif isinstance(cur_struct, (six.integer_types, float)) and \ isinstance(cur_cmp, (six.string_types, six.text_type)): # Comapring the numerical value agains a presumably mathematical value log.debug('Comparing a numeric value (%d) with a string (%s)', cur_struct, cur_cmp) numeric_compare = _numeric_regex.match(cur_cmp) # determine if the value to compare agains is a mathematical operand if numeric_compare: compare_value = numeric_compare.group(2) return getattr(float(cur_struct), _numeric_operand[numeric_compare.group(1)])(float(compare_value)) return False return False
[ "def", "_compare", "(", "cur_cmp", ",", "cur_struct", ")", ":", "if", "isinstance", "(", "cur_cmp", ",", "dict", ")", "and", "isinstance", "(", "cur_struct", ",", "dict", ")", ":", "log", ".", "debug", "(", "'Comparing dict to dict'", ")", "for", "cmp_key"...
48.197368
19.723684
def _write_config(config, cfg_file): """ Write a config object to the settings.cfg file. :param config: A ConfigParser object to write to the settings.cfg file. """ directory = os.path.dirname(cfg_file) if not os.path.exists(directory): os.makedirs(directory) with open(cfg_file, "w+") as output_file: config.write(output_file)
[ "def", "_write_config", "(", "config", ",", "cfg_file", ")", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "cfg_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "direc...
32.909091
10.545455
def _run_program(self, bin, fastafile, params=None): """ Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {} if params is not None: default_params.update(params) width = params.get("width", 8) basename = "posmo_in.fa" new_file = os.path.join(self.tmpdir, basename) shutil.copy(fastafile, new_file) fastafile = new_file #pwmfile = fastafile + ".pwm" motifs = [] current_path = os.getcwd() os.chdir(self.tmpdir) for n_ones in range(4, min(width, 11), 2): x = "1" * n_ones outfile = "%s.%s.out" % (fastafile, x) cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() context_file = fastafile.replace(basename, "context.%s.%s.txt" % (basename, x)) cmd = "%s %s %s simi.txt 0.88 10 2 10" % (bin.replace("posmo","clusterwd"), context_file, outfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() stdout += out.decode() stderr += err.decode() if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f, width, n_ones) os.chdir(current_path) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "width", "=", "param...
31.69697
17.515152
def _set_mpls_traffic_bypasses(self, v, load=False): """ Setter method for mpls_traffic_bypasses, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass/mpls_traffic_bypasses (list) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_traffic_bypasses is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_traffic_bypasses() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mpls_traffic_bypasses must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__mpls_traffic_bypasses = t if hasattr(self, '_set'): self._set()
[ "def", "_set_mpls_traffic_bypasses", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ...
129
62.727273
def _get_stddevs(self, C, stddev_types, num_sites, mag, c1_rrup, log_phi_ss, mean_phi_ss): """ Return standard deviations """ phi_ss = _compute_phi_ss(C, mag, c1_rrup, log_phi_ss, mean_phi_ss) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt( C['tau'] * C['tau'] + phi_ss * phi_ss) + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(phi_ss + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(C['tau'] + np.zeros(num_sites)) return stddevs
[ "def", "_get_stddevs", "(", "self", ",", "C", ",", "stddev_types", ",", "num_sites", ",", "mag", ",", "c1_rrup", ",", "log_phi_ss", ",", "mean_phi_ss", ")", ":", "phi_ss", "=", "_compute_phi_ss", "(", "C", ",", "mag", ",", "c1_rrup", ",", "log_phi_ss", "...
37.956522
18.304348
def read_frame(self): """Reads a frame and converts the color if needed. In case no frame is available, i.e. self.capture.read() returns False as the first return value, the event_source of the TimedAnimation is stopped, and if possible the capture source released. Returns: None if stopped, otherwise the color converted source image. """ ret, frame = self.capture.read() if not ret: self.event_source.stop() try: self.capture.release() except AttributeError: # has no release method, thus just pass pass return None if self.convert_color != -1 and is_color_image(frame): return cv2.cvtColor(frame, self.convert_color) return frame
[ "def", "read_frame", "(", "self", ")", ":", "ret", ",", "frame", "=", "self", ".", "capture", ".", "read", "(", ")", "if", "not", "ret", ":", "self", ".", "event_source", ".", "stop", "(", ")", "try", ":", "self", ".", "capture", ".", "release", ...
36.954545
19
def _list_audio_files(self, root, skip_rows=0): """Populates synsets - a map of index to label for the data items. Populates the data in the dataset, making tuples of (data, label) """ self.synsets = [] self.items = [] if not self._train_csv: # The audio files are organized in folder structure with # directory name as label and audios in them self._folder_structure(root) else: # train_csv contains mapping between filename and label self._csv_labelled_dataset(root, skip_rows=skip_rows) # Generating the synset.txt file now if not os.path.exists("./synset.txt"): with open("./synset.txt", "w") as synsets_file: for item in self.synsets: synsets_file.write(item+os.linesep) print("Synsets is generated as synset.txt") else: warnings.warn("Synset file already exists in the current directory! Not generating synset.txt.")
[ "def", "_list_audio_files", "(", "self", ",", "root", ",", "skip_rows", "=", "0", ")", ":", "self", ".", "synsets", "=", "[", "]", "self", ".", "items", "=", "[", "]", "if", "not", "self", ".", "_train_csv", ":", "# The audio files are organized in folder ...
46.090909
18.136364
def get_raw_data(self): """Reads the raw red, green, blue and clear channel values. Will return a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit numbers). """ # Read each color register. r = self._readU16LE(TCS34725_RDATAL) g = self._readU16LE(TCS34725_GDATAL) b = self._readU16LE(TCS34725_BDATAL) c = self._readU16LE(TCS34725_CDATAL) # Delay for the integration time to allow for next reading immediately. time.sleep(INTEGRATION_TIME_DELAY[self._integration_time]) return (r, g, b, c)
[ "def", "get_raw_data", "(", "self", ")", ":", "# Read each color register.", "r", "=", "self", ".", "_readU16LE", "(", "TCS34725_RDATAL", ")", "g", "=", "self", ".", "_readU16LE", "(", "TCS34725_GDATAL", ")", "b", "=", "self", ".", "_readU16LE", "(", "TCS347...
45.615385
13.769231
def affine_coupling(name, x, mid_channels=512, activation="relu", reverse=False, dropout=0.0): """Reversible affine coupling layer. Args: name: variable scope. x: 4-D Tensor. mid_channels: number of channels in the coupling layer. activation: Can be either "relu" or "gatu". reverse: Forward or reverse operation. dropout: default, 0.0 Returns: output: x shifted and scaled by an affine transformation. objective: log-determinant of the jacobian """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) # scale, shift = NN(x1) # If reverse: # z2 = scale * (x2 + shift) # Else: # z2 = (x2 / scale) - shift z1 = x1 log_scale_and_shift = conv_stack( "nn", x1, mid_channels, x_shape[-1], activation=activation, dropout=dropout) shift = log_scale_and_shift[:, :, :, 0::2] scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0) if not reverse: z2 = (x2 + shift) * scale else: z2 = x2 / scale - shift objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3]) if reverse: objective *= -1 return tf.concat([z1, z2], axis=3), objective
[ "def", "affine_coupling", "(", "name", ",", "x", ",", "mid_channels", "=", "512", ",", "activation", "=", "\"relu\"", ",", "reverse", "=", "False", ",", "dropout", "=", "0.0", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=...
31.948718
17.615385
def hook_point(self, hook_name, handle=None): """Used to call module function that may define a hook function for hook_name Available hook points: - `tick`, called on each daemon loop turn - `save_retention`; called by the scheduler when live state saving is to be done - `load_retention`; called by the scheduler when live state restoring is necessary (on restart) - `get_new_actions`; called by the scheduler before adding the actions to be executed - `early_configuration`; called by the arbiter when it begins parsing the configuration - `read_configuration`; called by the arbiter when it read the configuration - `late_configuration`; called by the arbiter when it finishes parsing the configuration As a default, the `handle` parameter provided to the hooked function is the caller Daemon object. The scheduler will provide its own instance when it call this function. :param hook_name: function name we may hook in module :type hook_name: str :param handle: parameter to provide to the hook function :type: handle: alignak.Satellite :return: None """ full_hook_name = 'hook_' + hook_name for module in self.modules_manager.instances: _ts = time.time() if not hasattr(module, full_hook_name): continue fun = getattr(module, full_hook_name) try: fun(handle if handle is not None else self) # pylint: disable=broad-except except Exception as exp: # pragma: no cover, never happen during unit tests... logger.warning('The instance %s raised an exception %s. I disabled it,' ' and set it to restart later', module.name, str(exp)) logger.exception('Exception %s', exp) self.modules_manager.set_to_restart(module) else: statsmgr.timer('hook.%s.%s' % (hook_name, module.name), time.time() - _ts)
[ "def", "hook_point", "(", "self", ",", "hook_name", ",", "handle", "=", "None", ")", ":", "full_hook_name", "=", "'hook_'", "+", "hook_name", "for", "module", "in", "self", ".", "modules_manager", ".", "instances", ":", "_ts", "=", "time", ".", "time", "...
50.121951
24.658537
def logpdf(self, mu): """ Log PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.poisson.logpmf(mu, self.lmd0)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "poisson", ".", "logpmf", "(", "mu", ",", "self", ".", "lmd0", ...
23.4375
17.3125
def gt(self, other): """ Strictly greater than. Returns True if no part of this Interval extends lower than or into other. :raises ValueError: if either self or other is a null Interval :param other: Interval or point :return: True or False :rtype: bool """ self._raise_if_null(other) if hasattr(other, 'end'): return self.begin >= other.end else: return self.begin > other
[ "def", "gt", "(", "self", ",", "other", ")", ":", "self", ".", "_raise_if_null", "(", "other", ")", "if", "hasattr", "(", "other", ",", "'end'", ")", ":", "return", "self", ".", "begin", ">=", "other", ".", "end", "else", ":", "return", "self", "."...
33.714286
11.285714
def unused(self, _dict): """ Remove empty parameters from the dict """ for key, value in _dict.items(): if value is None: del _dict[key] return _dict
[ "def", "unused", "(", "self", ",", "_dict", ")", ":", "for", "key", ",", "value", "in", "_dict", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "del", "_dict", "[", "key", "]", "return", "_dict" ]
26.25
7.75
def target_str(self): """Returns the string representation of the target property.""" if isinstance(self.target, tuple): return "({})".format(", ".join(self.target)) else: return self.target
[ "def", "target_str", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "target", ",", "tuple", ")", ":", "return", "\"({})\"", ".", "format", "(", "\", \"", ".", "join", "(", "self", ".", "target", ")", ")", "else", ":", "return", "self", ...
34.833333
13
def parse_error(text: str) -> Any: """ Validate and parse the BMA answer from websocket :param text: the bma error :return: the json data """ try: data = json.loads(text) jsonschema.validate(data, ERROR_SCHEMA) except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e))) return data
[ "def", "parse_error", "(", "text", ":", "str", ")", "->", "Any", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "text", ")", "jsonschema", ".", "validate", "(", "data", ",", "ERROR_SCHEMA", ")", "except", "(", "TypeError", ",", "json", ".",...
28.571429
18.428571
def find_file_match(folder_path, regex=''): """ Returns absolute paths of files that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([os.path.join(root, f) for f in files if re.match(regex, f)]) return outlist
[ "def", "find_file_match", "(", "folder_path", ",", "regex", "=", "''", ")", ":", "outlist", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "outlist", ".", "extend", "(", "[", "os", "....
22
23.36
def get_docstring(obj): """Extract the docstring from an object as individual lines. Parameters ---------- obj : object The Python object (class, function or method) to extract docstrings from. Returns ------- lines : `list` of `str` Individual docstring lines with common indentation removed, and newline characters stripped. Notes ----- If the object does not have a docstring, a docstring with the content ``"Undocumented."`` is created. """ docstring = getdoc(obj, allow_inherited=True) if docstring is None: logger = getLogger(__name__) logger.warning("Object %s doesn't have a docstring.", obj) docstring = 'Undocumented' # ignore is simply the number of initial lines to ignore when determining # the docstring's baseline indent level. We really want "1" here. return prepare_docstring(docstring, ignore=1)
[ "def", "get_docstring", "(", "obj", ")", ":", "docstring", "=", "getdoc", "(", "obj", ",", "allow_inherited", "=", "True", ")", "if", "docstring", "is", "None", ":", "logger", "=", "getLogger", "(", "__name__", ")", "logger", ".", "warning", "(", "\"Obje...
32.607143
22
def compute_transformed(context): """Compute transformed key for opening database""" if context._._.transformed_key is not None: transformed_key = context._._transformed_key else: transformed_key = aes_kdf( context._.header.value.dynamic_header.transform_seed.data, context._.header.value.dynamic_header.transform_rounds.data, password=context._._.password, keyfile=context._._.keyfile ) return transformed_key
[ "def", "compute_transformed", "(", "context", ")", ":", "if", "context", ".", "_", ".", "_", ".", "transformed_key", "is", "not", "None", ":", "transformed_key", "=", "context", ".", "_", ".", "_transformed_key", "else", ":", "transformed_key", "=", "aes_kdf...
34.785714
18.071429
def weighted_sample_with_replacement(seq, weights, n): """Pick n samples from seq at random, with replacement, with the probability of each element in proportion to its corresponding weight.""" sample = weighted_sampler(seq, weights) return [sample() for s in range(n)]
[ "def", "weighted_sample_with_replacement", "(", "seq", ",", "weights", ",", "n", ")", ":", "sample", "=", "weighted_sampler", "(", "seq", ",", "weights", ")", "return", "[", "sample", "(", ")", "for", "s", "in", "range", "(", "n", ")", "]" ]
47.333333
7.333333
def remove_item(self, jid): """Remove item from the roster. :Parameters: - `jid`: JID of the item to remove :Types: - `jid`: `JID` """ if jid not in self._jids: raise KeyError(jid) index = self._jids[jid] for i in range(index, len(self._jids)): self._jids[self._items[i].jid] -= 1 del self._jids[jid] del self._items[index]
[ "def", "remove_item", "(", "self", ",", "jid", ")", ":", "if", "jid", "not", "in", "self", ".", "_jids", ":", "raise", "KeyError", "(", "jid", ")", "index", "=", "self", ".", "_jids", "[", "jid", "]", "for", "i", "in", "range", "(", "index", ",",...
28.666667
12
def run(self): """Main thread function to maintain connection and receive remote status.""" _LOGGER.info("Started") while True: self._maybe_reconnect() line = '' try: # If someone is sending a command, we can lose our connection so grab a # copy beforehand. We don't need the lock because if the connection is # open, we are the only ones that will read from telnet (the reconnect # code runs synchronously in this loop). t = self._telnet if t is not None: line = t.read_until(b"\n") except EOFError: try: self._lock.acquire() self._disconnect_locked() continue finally: self._lock.release() self._recv_cb(line.decode('ascii').rstrip())
[ "def", "run", "(", "self", ")", ":", "_LOGGER", ".", "info", "(", "\"Started\"", ")", "while", "True", ":", "self", ".", "_maybe_reconnect", "(", ")", "line", "=", "''", "try", ":", "# If someone is sending a command, we can lose our connection so grab a", "# copy...
35.045455
18.863636
def init_key_jar(public_path='', private_path='', key_defs='', owner='', read_only=True): """ A number of cases here: 1. A private path is given a. The file exists and a JWKS is found there. From that JWKS a KeyJar instance is built. b. If the private path file doesn't exit the key definitions are used to build a KeyJar instance. A JWKS with the private keys are written to the file named in private_path. If a public path is also provided a JWKS with public keys are written to that file. 2. A public path is given but no private path. a. If the public path file exists then the JWKS in that file is used to construct a KeyJar. b. If no such file exists then a KeyJar will be built based on the key_defs specification and a JWKS with the public keys will be written to the public path file. 3. If neither a public path nor a private path is given then a KeyJar is built based on the key_defs specification and no JWKS will be written to file. In all cases a KeyJar instance is returned The keys stored in the KeyJar will be stored under the '' identifier. :param public_path: A file path to a file that contains a JWKS with public keys :param private_path: A file path to a file that contains a JWKS with private keys. :param key_defs: A definition of what keys should be created if they are not already available :param owner: The owner of the keys :param read_only: This function should not attempt to write anything to a file system. :return: An instantiated :py:class;`oidcmsg.key_jar.KeyJar` instance """ if private_path: if os.path.isfile(private_path): _jwks = open(private_path, 'r').read() _kj = KeyJar() _kj.import_jwks(json.loads(_jwks), owner) if key_defs: _kb = _kj.issuer_keys[owner][0] _diff = key_diff(_kb, key_defs) if _diff: if read_only: logger.error('Not allowed to write to disc!') else: update_key_bundle(_kb, _diff) _kj.issuer_keys[owner] = [_kb] jwks = _kj.export_jwks(private=True, issuer=owner) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) if not read_only: jwks = _kj.export_jwks(private=True, issuer=owner) head, tail = os.path.split(private_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() if public_path and not read_only: jwks = _kj.export_jwks(issuer=owner) # public part head, tail = os.path.split(public_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(public_path, 'w') fp.write(json.dumps(jwks)) fp.close() elif public_path: if os.path.isfile(public_path): _jwks = open(public_path, 'r').read() _kj = KeyJar() _kj.import_jwks(json.loads(_jwks), owner) if key_defs: _kb = _kj.issuer_keys[owner][0] _diff = key_diff(_kb, key_defs) if _diff: if read_only: logger.error('Not allowed to write to disc!') else: update_key_bundle(_kb, _diff) _kj.issuer_keys[owner] = [_kb] jwks = _kj.export_jwks(issuer=owner) fp = open(private_path, 'w') fp.write(json.dumps(jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) if not read_only: _jwks = _kj.export_jwks(issuer=owner) head, tail = os.path.split(public_path) if head and not os.path.isdir(head): os.makedirs(head) fp = open(public_path, 'w') fp.write(json.dumps(_jwks)) fp.close() else: _kj = build_keyjar(key_defs, owner=owner) return _kj
[ "def", "init_key_jar", "(", "public_path", "=", "''", ",", "private_path", "=", "''", ",", "key_defs", "=", "''", ",", "owner", "=", "''", ",", "read_only", "=", "True", ")", ":", "if", "private_path", ":", "if", "os", ".", "path", ".", "isfile", "("...
39.254386
18.289474