text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _validated_locations(self, locations): """Ensure that the given locations argument is valid. :raises: ValueError if a given locations includes an invalid location. """ # The set difference between the given locations and the available locations # will be the set of invalid locations valid_locations = set(self.__location_map__.keys()) given = set(locations) invalid_locations = given - valid_locations if len(invalid_locations): msg = "Invalid locations arguments: {0}".format(list(invalid_locations)) raise ValueError(msg) return locations
[ "def", "_validated_locations", "(", "self", ",", "locations", ")", ":", "# The set difference between the given locations and the available locations", "# will be the set of invalid locations", "valid_locations", "=", "set", "(", "self", ".", "__location_map__", ".", "keys", "(...
45.5
17.357143
def remove_layer(svg_source, layer_name): ''' Remove layer(s) from SVG document. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. layer_name : str or list Layer name or list of layer names to remove from SVG document. Returns ------- StringIO.StringIO File-like object containing XML document with layer(s) removed. ''' # Parse input file. xml_root = lxml.etree.parse(svg_source) svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0] if isinstance(layer_name, str): layer_name = [layer_name] for layer_name_i in layer_name: # Remove existing layer from source, in-memory XML (source file remains # unmodified). layer_xpath = '//svg:g[@inkscape:label="%s"]' % layer_name_i layer_groups = svg_root.xpath(layer_xpath, namespaces=INKSCAPE_NSMAP) if layer_groups: for g in layer_groups: g.getparent().remove(g) # Write result to `StringIO`. output = StringIO.StringIO() xml_root.write(output) output.seek(0) return output
[ "def", "remove_layer", "(", "svg_source", ",", "layer_name", ")", ":", "# Parse input file.", "xml_root", "=", "lxml", ".", "etree", ".", "parse", "(", "svg_source", ")", "svg_root", "=", "xml_root", ".", "xpath", "(", "'/svg:svg'", ",", "namespaces", "=", "...
29.447368
21.394737
def query(options, collection_name, num_to_skip, num_to_return, query, field_selector=None): """Get a **query** message. """ data = struct.pack("<I", options) data += bson._make_c_string(collection_name) data += struct.pack("<i", num_to_skip) data += struct.pack("<i", num_to_return) data += bson.BSON.encode(query) if field_selector is not None: data += bson.BSON.encode(field_selector) return __pack_message(2004, data)
[ "def", "query", "(", "options", ",", "collection_name", ",", "num_to_skip", ",", "num_to_return", ",", "query", ",", "field_selector", "=", "None", ")", ":", "data", "=", "struct", ".", "pack", "(", "\"<I\"", ",", "options", ")", "data", "+=", "bson", "....
38.666667
5.833333
def saturating_sigmoid(x): """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].""" with tf.name_scope("saturating_sigmoid", values=[x]): y = tf.sigmoid(x) return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
[ "def", "saturating_sigmoid", "(", "x", ")", ":", "with", "tf", ".", "name_scope", "(", "\"saturating_sigmoid\"", ",", "values", "=", "[", "x", "]", ")", ":", "y", "=", "tf", ".", "sigmoid", "(", "x", ")", "return", "tf", ".", "minimum", "(", "1.0", ...
45
13.2
def assemble_caption(begin_line, begin_index, end_line, end_index, lines): """ Take the caption of a picture and put it all together in a nice way. If it spans multiple lines, put it on one line. If it contains controlled characters, strip them out. If it has tags we don't want to worry about, get rid of them, etc. :param: begin_line (int): the index of the line where the caption begins :param: begin_index (int): the index within the line where the caption begins :param: end_line (int): the index of the line where the caption ends :param: end_index (int): the index within the line where the caption ends :param: lines ([string, string, ...]): the line strings of the text :return: caption (string): the caption, formatted and pieced together """ # stuff we don't like label_head = '\\label{' # reassemble that sucker if end_line > begin_line: # our caption spanned multiple lines caption = lines[begin_line][begin_index:] for included_line_index in range(begin_line + 1, end_line): caption = caption + ' ' + lines[included_line_index] caption = caption + ' ' + lines[end_line][:end_index] caption = caption.replace('\n', ' ') caption = caption.replace(' ', ' ') else: # it fit on one line caption = lines[begin_line][begin_index:end_index] # clean out a label tag, if there is one label_begin = caption.find(label_head) if label_begin > -1: # we know that our caption is only one line, so if there's a label # tag in it, it will be all on one line. so we make up some args dummy_start, dummy_start_line, label_end, dummy_end = \ find_open_and_close_braces(0, label_begin, '{', [caption]) caption = caption[:label_begin] + caption[label_end + 1:] caption = caption.strip() if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}': caption = caption[1:-1] return caption
[ "def", "assemble_caption", "(", "begin_line", ",", "begin_index", ",", "end_line", ",", "end_index", ",", "lines", ")", ":", "# stuff we don't like", "label_head", "=", "'\\\\label{'", "# reassemble that sucker", "if", "end_line", ">", "begin_line", ":", "# our captio...
39.62
24.06
def macho_dependencies_list(target_path, header_magic=None): """ Generates a list of libraries the given Mach-O file depends on. In that list a single library is represented by its "install path": for some libraries it would be a full file path, and for others it would be a relative path (sometimes with dyld templates like @executable_path or @rpath in it). Note: I don't know any reason why would some architectures of a fat Mach-O depend on certain libraries while others don't, but *it's technically possible*. So that's why you may want to specify the `header_magic` value for a particular header. Returns an object with two properties: `weak` and `strong` that hold lists of weak and strong dependencies respectively. """ MachODeprendencies = namedtuple("MachODeprendecies", "weak strong") # Convert the magic value into macholib representation if needed if isinstance(header_magic, basestring): header_magic = _MH_MAGIC_from_string(header_magic) macho = MachO(target_path) # Obtain a list of headers for the required magic value (if any) suggestions = filter(lambda t: t.header.magic == header_magic or # just add all headers if user didn't specifiy the magic header_magic == None, macho.headers) header = None if len(suggestions) <= 0 else suggestions[0] # filter() above *always* returns a list, so we have to check if it's empty if header is None: raise Exception("Unable to find a header for the given MAGIC value in that Mach-O file") return None def decodeLoadCommandData(data): # Also ignore trailing zeros return data[:data.find(b"\x00")].decode(sys.getfilesystemencoding()) def strongReferencesFromHeader(h): # List of LC_LOAD_DYLIB commands list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_DYLIB, h.commands) # Their contents (aka data) as a file path return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list) def weakReferencesFromHeader(h): list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_WEAK_DYLIB, h.commands) return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list) strongRefs = strongReferencesFromHeader(header) weakRefs = weakReferencesFromHeader(header) return MachODeprendencies(weak = weakRefs, strong = strongRefs)
[ "def", "macho_dependencies_list", "(", "target_path", ",", "header_magic", "=", "None", ")", ":", "MachODeprendencies", "=", "namedtuple", "(", "\"MachODeprendecies\"", ",", "\"weak strong\"", ")", "# Convert the magic value into macholib representation if needed", "if", "isi...
46.040816
26.530612
def getChanges(self): """Get all :class:`rtcclient.models.Change` objects in this changeset :return: a :class:`list` contains all the :class:`rtcclient.models.Change` objects :rtype: list """ identifier = self.url.split("/")[-1] resource_url = "/".join(["%s" % self.rtc_obj.url, "resource/itemOid", "com.ibm.team.scm.ChangeSet", "%s?_mediaType=text/xml" % identifier]) resp = self.get(resource_url, verify=False, proxies=self.rtc_obj.proxies, headers=self.rtc_obj.headers) raw_data = xmltodict.parse(resp.content).get("scm:ChangeSet") common_changes = dict() changes = raw_data.get("changes") for (key, value) in raw_data.items(): if key.startswith("@"): continue if "changes" != key: common_changes[key] = value return self._handle_changes(changes, common_changes)
[ "def", "getChanges", "(", "self", ")", ":", "identifier", "=", "self", ".", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "resource_url", "=", "\"/\"", ".", "join", "(", "[", "\"%s\"", "%", "self", ".", "rtc_obj", ".", "url", ",", "...
41.423077
13.653846
def weighted_sampler(seq, weights): "Return a random-sample function that picks from seq weighted by weights." totals = [] for w in weights: totals.append(w + totals[-1] if totals else w) return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]
[ "def", "weighted_sampler", "(", "seq", ",", "weights", ")", ":", "totals", "=", "[", "]", "for", "w", "in", "weights", ":", "totals", ".", "append", "(", "w", "+", "totals", "[", "-", "1", "]", "if", "totals", "else", "w", ")", "return", "lambda", ...
46.5
22.833333
def parse_fixed(self, node): """ Parses <Fixed> @param node: Node containing the <Fixed> element @type node: xml.etree.Element """ try: parameter = node.lattrib['parameter'] except: self.raise_error('<Fixed> must specify a parameter to be fixed.') try: value = node.lattrib['value'] except: self.raise_error("Fixed parameter '{0}'must specify a value.", parameter) description = node.lattrib.get('description', '') self.current_component_type.add_parameter(Fixed(parameter, value, description))
[ "def", "parse_fixed", "(", "self", ",", "node", ")", ":", "try", ":", "parameter", "=", "node", ".", "lattrib", "[", "'parameter'", "]", "except", ":", "self", ".", "raise_error", "(", "'<Fixed> must specify a parameter to be fixed.'", ")", "try", ":", "value"...
29.666667
23.952381
def run(command, timeout=None, cwd=None, env=None, debug=None): """ Runs a given command on the system within a set time period, providing an easy way to access command output as it happens without waiting for the command to finish running. :type list :param command: Should be a list that contains the command that should be ran on the given system. The only whitespaces that can occur is for paths that use a backslash to escape it appropriately :type int :param timeout: Specificed in seconds. If a command outruns the timeout then the command and its child processes will be terminated. The default is to run :type string :param cwd: If cwd is set then the current directory will be changed to cwd before it is executed. Note that this directory is not considered when searching the executable, so you can’t specify the program’s path relative to cwd. :type dict :param env: A dict of any ENV variables that should be combined into the OS ENV that will help the command to run successfully. Note that more often than not the command run does not have the same ENV variables available as your shell by default and as such require some assistance. :type function :param debug: A function (also a class function) can be passed in here and all output, line by line, from the command being run will be passed to it as it gets outputted to stdout. This allows for things such as logging (using the built in python logging lib) what is happening on long running commands or redirect output of a tail -f call as lines get outputted without having to wait till the command finishes. :return returns :class:`Command.Response` that contains the exit code and the output from the command """ return Command.run(command, timeout=timeout, cwd=cwd, env=env, debug=debug)
[ "def", "run", "(", "command", ",", "timeout", "=", "None", ",", "cwd", "=", "None", ",", "env", "=", "None", ",", "debug", "=", "None", ")", ":", "return", "Command", ".", "run", "(", "command", ",", "timeout", "=", "timeout", ",", "cwd", "=", "c...
57.342857
40.085714
def update_where(self, res, depth=0, since=None, **kwargs): "Like update() but uses WHERE-style args" fetch = lambda: self._fetcher.fetch_all_latest(res, 0, kwargs, since=since) self._update(res, fetch, depth)
[ "def", "update_where", "(", "self", ",", "res", ",", "depth", "=", "0", ",", "since", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fetch", "=", "lambda", ":", "self", ".", "_fetcher", ".", "fetch_all_latest", "(", "res", ",", "0", ",", "kwargs"...
57.5
18
def move_arc(x, y, r, speed = 1, orientation = True): # WARNING: This function currently contains inaccuracy likely due to the rounding of trigonometric functions """ Moves the cursor in an arc of radius r to (x, y) at a certain speed :param x: target x-ordinate :param y: target y-ordinate :param r: radius :param speed: pixel traversal rate :param orientation: direction of arc :return: None """ _x, _y = win32api.GetCursorPos() c_len = (r**2 - (((x - _x)/2)**2 + ((y - _y)/2)**2))**0.5 t = (c_len**2/((y - _y)**2 + (x - _x)**2))**0.5 t = t if orientation else -t centre = ((_x + x)/2 + t*(_x - x), (_y + y)/2 + t*(y - _y)) if any(isinstance(ordinate, complex) for ordinate in centre): raise ValueError("Radius too low - minimum: {}".format(((x - _x)**2 + (y - _y)**2)**0.5/2)) theta = math.atan2(_y - centre[1], _x - centre[0]) end = math.atan2(y - centre[1], x - centre[0]) while theta < end: move(*list(map(round, (centre[0] + r*math.cos(theta), centre[1] + r*math.sin(theta))))) theta += speed/100 time.sleep(0.01) move(x, y)
[ "def", "move_arc", "(", "x", ",", "y", ",", "r", ",", "speed", "=", "1", ",", "orientation", "=", "True", ")", ":", "# WARNING: This function currently contains inaccuracy likely due to the rounding of trigonometric functions", "_x", ",", "_y", "=", "win32api", ".", ...
41.444444
20.851852
def start(self, driver=None, device=None, midi_driver=None): """Start audio output driver in separate background thread Call this function any time after creating the Synth object. If you don't call this function, use get_samples() to generate samples. Optional keyword argument: driver : which audio driver to use for output Possible choices: 'alsa', 'oss', 'jack', 'portaudio' 'sndmgr', 'coreaudio', 'Direct Sound' device: the device to use for audio output Not all drivers will be available for every platform, it depends on which drivers were compiled into FluidSynth for your platform. """ if driver is not None: assert (driver in ['alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'pulseaudio']) fluid_settings_setstr(self.settings, b'audio.driver', driver.encode()) if device is not None: fluid_settings_setstr(self.settings, str('audio.%s.device' % (driver)).encode(), device.encode()) self.audio_driver = new_fluid_audio_driver(self.settings, self.synth) if midi_driver is not None: assert (midi_driver in ['alsa_seq', 'alsa_raw', 'oss', 'winmidi', 'midishare', 'coremidi']) fluid_settings_setstr(self.settings, b'midi.driver', midi_driver.encode()) self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth) fluid_synth_set_midi_router(self.synth, self.router) self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router)
[ "def", "start", "(", "self", ",", "driver", "=", "None", ",", "device", "=", "None", ",", "midi_driver", "=", "None", ")", ":", "if", "driver", "is", "not", "None", ":", "assert", "(", "driver", "in", "[", "'alsa'", ",", "'oss'", ",", "'jack'", ","...
53.645161
29.483871
def build_verify_command(self, packages): """build_verify_command(self, packages) -> str Generate a command to verify the list of packages given in ``packages`` using the native package manager's verification tool. The command to be executed is returned as a string that may be passed to a command execution routine (for e.g. ``sos_get_command_output()``. :param packages: a string, or a list of strings giving package names to be verified. :returns: a string containing an executable command that will perform verification of the given packages. :returntype: str or ``NoneType`` """ if not self.verify_command: return None # The re.match(pkg) used by all_pkgs_by_name_regex() may return # an empty list (`[[]]`) when no package matches: avoid building # an rpm -V command line with the empty string as the package # list in this case. by_regex = self.all_pkgs_by_name_regex verify_list = filter(None, map(by_regex, packages)) # No packages after regex match? if not verify_list: return None verify_packages = "" for package_list in verify_list: for package in package_list: if any([f in package for f in self.verify_filter]): continue if len(verify_packages): verify_packages += " " verify_packages += package return self.verify_command + " " + verify_packages
[ "def", "build_verify_command", "(", "self", ",", "packages", ")", ":", "if", "not", "self", ".", "verify_command", ":", "return", "None", "# The re.match(pkg) used by all_pkgs_by_name_regex() may return", "# an empty list (`[[]]`) when no package matches: avoid building", "# an r...
41.1
16.825
def publish(self, user, provider, obj, comment, **kwargs): ''' user - django User or UserSocialAuth instance provider - name of publisher provider obj - sharing object comment - string ''' social_user = self._get_social_user(user, provider) backend = self.get_backend(social_user, provider, context=kwargs) return backend.publish(obj, comment)
[ "def", "publish", "(", "self", ",", "user", ",", "provider", ",", "obj", ",", "comment", ",", "*", "*", "kwargs", ")", ":", "social_user", "=", "self", ".", "_get_social_user", "(", "user", ",", "provider", ")", "backend", "=", "self", ".", "get_backen...
42.2
17.8
def get_redis_info(): """Check Redis connection.""" from kombu.utils.url import _parse_url as parse_redis_url from redis import ( StrictRedis, ConnectionError as RedisConnectionError, ResponseError as RedisResponseError, ) for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'): if hasattr(settings, conf_name): url = getattr(settings, conf_name) if url.startswith('redis://'): break else: log.error("No redis connection info found in settings.") return {"status": NO_CONFIG} _, host, port, _, password, database, _ = parse_redis_url(url) start = datetime.now() try: rdb = StrictRedis( host=host, port=port, db=database, password=password, socket_timeout=TIMEOUT_SECONDS, ) info = rdb.info() except (RedisConnectionError, TypeError) as ex: log.error("Error making Redis connection: %s", ex.args) return {"status": DOWN} except RedisResponseError as ex: log.error("Bad Redis response: %s", ex.args) return {"status": DOWN, "message": "auth error"} micro = (datetime.now() - start).microseconds del rdb # the redis package does not support Redis's QUIT. ret = { "status": UP, "response_microseconds": micro, } fields = ("uptime_in_seconds", "used_memory", "used_memory_peak") ret.update({x: info[x] for x in fields}) return ret
[ "def", "get_redis_info", "(", ")", ":", "from", "kombu", ".", "utils", ".", "url", "import", "_parse_url", "as", "parse_redis_url", "from", "redis", "import", "(", "StrictRedis", ",", "ConnectionError", "as", "RedisConnectionError", ",", "ResponseError", "as", "...
37.128205
17.512821
def release(self) -> None: """Increment the counter and wake one waiter.""" self._value += 1 while self._waiters: waiter = self._waiters.popleft() if not waiter.done(): self._value -= 1 # If the waiter is a coroutine paused at # # with (yield semaphore.acquire()): # # then the context manager's __exit__ calls release() at the end # of the "with" block. waiter.set_result(_ReleasingContextManager(self)) break
[ "def", "release", "(", "self", ")", "->", "None", ":", "self", ".", "_value", "+=", "1", "while", "self", ".", "_waiters", ":", "waiter", "=", "self", ".", "_waiters", ".", "popleft", "(", ")", "if", "not", "waiter", ".", "done", "(", ")", ":", "...
37
16.5
def install(runas=None, path=None): ''' Install pyenv systemwide CLI Example: .. code-block:: bash salt '*' pyenv.install ''' path = path or _pyenv_path(runas) path = os.path.expanduser(path) return _install_pyenv(path, runas)
[ "def", "install", "(", "runas", "=", "None", ",", "path", "=", "None", ")", ":", "path", "=", "path", "or", "_pyenv_path", "(", "runas", ")", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "return", "_install_pyenv", "(", "path"...
19.769231
20.230769
def add_grid(self, row=None, col=None, row_span=1, col_span=1, **kwargs): """ Create a new Grid and add it as a child widget. Parameters ---------- row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict Keyword arguments to pass to the new `Grid`. """ from .grid import Grid grid = Grid(**kwargs) return self.add_widget(grid, row, col, row_span, col_span)
[ "def", "add_grid", "(", "self", ",", "row", "=", "None", ",", "col", "=", "None", ",", "row_span", "=", "1", ",", "col_span", "=", "1", ",", "*", "*", "kwargs", ")", ":", "from", ".", "grid", "import", "Grid", "grid", "=", "Grid", "(", "*", "*"...
37.238095
20.666667
def hostinterface_delete(interfaceids, **kwargs): ''' Delete host interface .. versionadded:: 2016.3.0 :param interfaceids: IDs of the host interfaces to delete :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: ID of deleted host interfaces, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.hostinterface_delete 50 ''' conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'hostinterface.delete' if isinstance(interfaceids, list): params = interfaceids else: params = [interfaceids] ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['interfaceids'] else: raise KeyError except KeyError: return ret
[ "def", "hostinterface_delete", "(", "interfaceids", ",", "*", "*", "kwargs", ")", ":", "conn_args", "=", "_login", "(", "*", "*", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "if", "conn_args", ":", "method", "=", "'hostinterface.delete'", "if", "isi...
34.333333
26.333333
def _show_status_for_work(self, work): """Shows status for given work pieces. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces """ work_count = len(work.work) work_completed = {} work_completed_count = 0 for v in itervalues(work.work): if v['is_completed']: work_completed_count += 1 worker_id = v['claimed_worker_id'] if worker_id not in work_completed: work_completed[worker_id] = { 'completed_count': 0, 'last_update': 0.0, } work_completed[worker_id]['completed_count'] += 1 work_completed[worker_id]['last_update'] = max( work_completed[worker_id]['last_update'], v['claimed_worker_start_time']) print('Completed {0}/{1} work'.format(work_completed_count, work_count)) for k in sorted(iterkeys(work_completed)): last_update_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(work_completed[k]['last_update'])) print('Worker {0}: completed {1} last claimed work at {2}'.format( k, work_completed[k]['completed_count'], last_update_time))
[ "def", "_show_status_for_work", "(", "self", ",", "work", ")", ":", "work_count", "=", "len", "(", "work", ".", "work", ")", "work_completed", "=", "{", "}", "work_completed_count", "=", "0", "for", "v", "in", "itervalues", "(", "work", ".", "work", ")",...
39.533333
13.166667
def get_sub_dept_ids(self): """Method to get the department list""" self.logger.info("%s\t%s" % (self.request_method, self.request_url)) return self.json_response.get("sub_dept_id_list", None)
[ "def", "get_sub_dept_ids", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"%s\\t%s\"", "%", "(", "self", ".", "request_method", ",", "self", ".", "request_url", ")", ")", "return", "self", ".", "json_response", ".", "get", "(", "\"sub_...
54
18
def update_list_positions_obj(self, positions_obj_id, revision, values): ''' Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout ''' return positions_endpoints.update_list_positions_obj(self, positions_obj_id, revision, values)
[ "def", "update_list_positions_obj", "(", "self", ",", "positions_obj_id", ",", "revision", ",", "values", ")", ":", "return", "positions_endpoints", ".", "update_list_positions_obj", "(", "self", ",", "positions_obj_id", ",", "revision", ",", "values", ")" ]
54.2
46.8
def readFromProto(cls, proto): """ Read state from proto object. :param proto: SDRClassifierRegionProto capnproto object """ instance = cls() instance.implementation = proto.implementation instance.steps = proto.steps instance.stepsList = [int(i) for i in proto.steps.split(",")] instance.alpha = proto.alpha instance.verbosity = proto.verbosity instance.maxCategoryCount = proto.maxCategoryCount instance._sdrClassifier = SDRClassifierFactory.read(proto) instance.learningMode = proto.learningMode instance.inferenceMode = proto.inferenceMode instance.recordNum = proto.recordNum return instance
[ "def", "readFromProto", "(", "cls", ",", "proto", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "implementation", "=", "proto", ".", "implementation", "instance", ".", "steps", "=", "proto", ".", "steps", "instance", ".", "stepsList", "=", ...
29.272727
17.181818
def hashable(val): """Test if `val` is hashable and if not, get it's string representation Parameters ---------- val: object Any (possibly not hashable) python object Returns ------- val or string The given `val` if it is hashable or it's string representation""" if val is None: return val try: hash(val) except TypeError: return repr(val) else: return val
[ "def", "hashable", "(", "val", ")", ":", "if", "val", "is", "None", ":", "return", "val", "try", ":", "hash", "(", "val", ")", "except", "TypeError", ":", "return", "repr", "(", "val", ")", "else", ":", "return", "val" ]
21.6
22.75
def git_ls_remote(self, uri, ref): """Determine the latest commit id for a given ref. Args: uri (string): git URI ref (string): git ref Returns: str: A commit id """ logger.debug("Invoking git to retrieve commit id for repo %s...", uri) lsremote_output = subprocess.check_output(['git', 'ls-remote', uri, ref]) if b"\t" in lsremote_output: commit_id = lsremote_output.split(b"\t")[0] logger.debug("Matching commit id found: %s", commit_id) return commit_id else: raise ValueError("Ref \"%s\" not found for repo %s." % (ref, uri))
[ "def", "git_ls_remote", "(", "self", ",", "uri", ",", "ref", ")", ":", "logger", ".", "debug", "(", "\"Invoking git to retrieve commit id for repo %s...\"", ",", "uri", ")", "lsremote_output", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'ls-...
36.909091
19.772727
def numberOfTilesAtZoom(self, zoom): "Returns the total number of tile at a given zoom level" [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom) return (maxCol - minCol + 1) * (maxRow - minRow + 1)
[ "def", "numberOfTilesAtZoom", "(", "self", ",", "zoom", ")", ":", "[", "minRow", ",", "minCol", ",", "maxRow", ",", "maxCol", "]", "=", "self", ".", "getExtentAddress", "(", "zoom", ")", "return", "(", "maxCol", "-", "minCol", "+", "1", ")", "*", "("...
57.5
19.5
def plotShape (includePost = ['all'], includePre = ['all'], showSyns = False, showElectrodes = False, synStyle = '.', synSiz=3, dist=0.6, cvar=None, cvals=None, iv=False, ivprops=None, includeAxon=True, bkgColor = None, fontSize = 12, figSize = (10,8), saveData = None, dpi = 300, saveFig = None, showFig = True): ''' Plot 3D cell shape using NEURON Interview PlotShape - includePre: (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of presynaptic cells to consider when plotting connections (default: ['all']) - includePost: (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of cells to show shape of (default: ['all']) - showSyns (True|False): Show synaptic connections in 3D view (default: False) - showElectrodes (True|False): Show LFP electrodes in 3D view (default: False) - synStyle: Style of marker to show synapses (default: '.') - dist: 3D distance (like zoom) (default: 0.6) - synSize: Size of marker to show synapses (default: 3) - cvar: ('numSyns'|'weightNorm') Variable to represent in shape plot (default: None) - cvals: List of values to represent in shape plot; must be same as num segments (default: None) - iv: Use NEURON Interviews (instead of matplotlib) to show shape plot (default: None) - ivprops: Dict of properties to plot using Interviews (default: None) - includeAxon: Include axon in shape plot (default: True) - bkgColor (list/tuple with 4 floats): RGBA list/tuple with bakcground color eg. (0.5, 0.2, 0.1, 1.0) (default: None) - figSize ((width, height)): Size of figure (default: (10,8)) - saveData (None|True|'fileName'): File name where to save the final data used to generate the figure; if set to True uses filename from simConfig (default: None) - saveFig (None|True|'fileName'): File name where to save the figure; if set to True uses filename from simConfig (default: None) - showFig (True|False): Whether to show the figure or not (default: True) - Returns figure handles ''' from .. import sim from neuron import h print('Plotting 3D cell shape ...') cellsPreGids = [c.gid for c in sim.getCellsList(includePre)] if includePre else [] cellsPost = sim.getCellsList(includePost) if not hasattr(sim.net, 'compartCells'): sim.net.compartCells = [c for c in cellsPost if type(c) is sim.CompartCell] sim.net.defineCellShapes() # in case some cells had stylized morphologies without 3d pts if not iv: # plot using Python instead of interviews from mpl_toolkits.mplot3d import Axes3D from netpyne.support import morphology as morph # code adapted from https://github.com/ahwillia/PyNeuron-Toolbox # create secList from include secs = None # Set cvals and secs if not cvals and cvar: cvals = [] secs = [] # weighNorm if cvar == 'weightNorm': for cellPost in cellsPost: cellSecs = list(cellPost.secs.values()) if includeAxon else [s for s in list(cellPost.secs.values()) if 'axon' not in s['hObj'].hname()] for sec in cellSecs: if 'weightNorm' in sec: secs.append(sec['hObj']) cvals.extend(sec['weightNorm']) cvals = np.array(cvals) cvals = cvals/min(cvals) # numSyns elif cvar == 'numSyns': for cellPost in cellsPost: cellSecs = cellPost.secs if includeAxon else {k:s for k,s in cellPost.secs.items() if 'axon' not in s['hObj'].hname()} for secLabel,sec in cellSecs.items(): nseg=sec['hObj'].nseg nsyns = [0] * nseg secs.append(sec['hObj']) conns = [conn for conn in cellPost.conns if conn['sec']==secLabel and conn['preGid'] in cellsPreGids] for conn in conns: nsyns[int(round(conn['loc']*nseg))-1] += 1 cvals.extend(nsyns) cvals = np.array(cvals) if not secs: secs = [s['hObj'] for cellPost in cellsPost for s in list(cellPost.secs.values())] if not includeAxon: secs = [sec for sec in secs if 'axon' not in sec.hname()] # Plot shapeplot cbLabels = {'numSyns': 'number of synapses per segment', 'weightNorm': 'weight scaling'} plt.rcParams.update({'font.size': fontSize}) fig=plt.figure(figsize=figSize) shapeax = plt.subplot(111, projection='3d') shapeax.elev=90 # 90 shapeax.azim=-90 # -90 shapeax.dist=dist*shapeax.dist plt.axis('equal') cmap = plt.cm.viridis #plt.cm.jet #plt.cm.rainbow #plt.cm.jet #YlOrBr_r morph.shapeplot(h,shapeax, sections=secs, cvals=cvals, cmap=cmap) fig.subplots_adjust(left=0, right=1, bottom=0, top=1) if cvals is not None and len(cvals)>0: sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=np.min(cvals), vmax=np.max(cvals))) sm._A = [] # fake up the array of the scalar mappable cb = plt.colorbar(sm, fraction=0.15, shrink=0.5, pad=0.05, aspect=20) if cvar: cb.set_label(cbLabels[cvar], rotation=90, fontsize=fontSize) if bkgColor: shapeax.w_xaxis.set_pane_color(bkgColor) shapeax.w_yaxis.set_pane_color(bkgColor) shapeax.w_zaxis.set_pane_color(bkgColor) #shapeax.grid(False) # Synapses if showSyns: synColor='red' for cellPost in cellsPost: for sec in list(cellPost.secs.values()): for synMech in sec['synMechs']: morph.mark_locations(h, sec['hObj'], synMech['loc'], markspec=synStyle, color=synColor, markersize=synSiz) # Electrodes if showElectrodes: ax = plt.gca() colorOffset = 0 if 'avg' in showElectrodes: showElectrodes.remove('avg') colorOffset = 1 coords = sim.net.recXElectrode.pos.T[np.array(showElectrodes).astype(int),:] ax.scatter(coords[:,0],coords[:,1],coords[:,2], s=150, c=colorList[colorOffset:len(coords)+colorOffset], marker='v', depthshade=False, edgecolors='k', linewidth=2) for i in range(coords.shape[0]): ax.text(coords[i,0],coords[i,1],coords[i,2], ' '+str(showElectrodes[i]), fontweight='bold' ) cb.set_label('Segment total transfer resistance to electrodes (kiloohm)', rotation=90, fontsize=fontSize) #plt.title(str(includePre)+' -> '+str(includePost) + ' ' + str(cvar)) shapeax.set_xticklabels([]) shapeax.set_yticklabels([]) shapeax.set_zticklabels([]) #shapeax.set_ylabel('y location (um)') # save figure if saveFig: if isinstance(saveFig, basestring): filename = saveFig else: filename = sim.cfg.filename+'_shape.png' plt.savefig(filename, dpi=dpi) # show fig if showFig: _showFigure() else: # Plot using Interviews # colors: 0 white, 1 black, 2 red, 3 blue, 4 green, 5 orange, 6 brown, 7 violet, 8 yellow, 9 gray from neuron import gui fig = h.Shape() secList = h.SectionList() if not ivprops: ivprops = {'colorSecs': 1, 'colorSyns':2 ,'style': 'O', 'siz':5} for cell in [c for c in cellsPost]: for sec in list(cell.secs.values()): if 'axon' in sec['hObj'].hname() and not includeAxon: continue sec['hObj'].push() secList.append() h.pop_section() if showSyns: for synMech in sec['synMechs']: if synMech['hObj']: # find pre pop using conn[preGid] # create dict with color for each pre pop; check if exists; increase color counter # colorsPre[prePop] = colorCounter # find synMech using conn['loc'], conn['sec'] and conn['synMech'] fig.point_mark(synMech['hObj'], ivprops['colorSyns'], ivprops['style'], ivprops['siz']) fig.observe(secList) fig.color_list(secList, ivprops['colorSecs']) fig.flush() fig.show(0) # show real diam # save figure if saveFig: if isinstance(saveFig, basestring): filename = saveFig else: filename = sim.cfg.filename+'_'+'shape.ps' fig.printfile(filename) return fig, {}
[ "def", "plotShape", "(", "includePost", "=", "[", "'all'", "]", ",", "includePre", "=", "[", "'all'", "]", ",", "showSyns", "=", "False", ",", "showElectrodes", "=", "False", ",", "synStyle", "=", "'.'", ",", "synSiz", "=", "3", ",", "dist", "=", "0....
48.417582
28.164835
def do_size(self, w, h): """Record size.""" if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h # Now we have region and size, generate the image image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
[ "def", "do_size", "(", "self", ",", "w", ",", "h", ")", ":", "if", "(", "w", "is", "None", ")", ":", "self", ".", "sw", "=", "self", ".", "rw", "self", ".", "sh", "=", "self", ".", "rh", "else", ":", "self", ".", "sw", "=", "w", "self", "...
37.777778
13.777778
def submit(self, password=''): """Submits the participation to the web site. The passwords is sent as plain text. :return: the evaluation results. """ url = '{}/api/submit'.format(BASE_URL) try: r = requests.post(url, data=self.dumps(), headers={'content-type': 'application/json'}, auth=(self['metadata']['email'], password)) response = r.json() except requests.exceptions.HTTPError as e: logging.error('Error while submitting the participation. {}'.format(e)) return Job() if 'error' in response: logging.error('Error while processing the participation. {}'.format(response['error'])) return Job() return Job(response)
[ "def", "submit", "(", "self", ",", "password", "=", "''", ")", ":", "url", "=", "'{}/api/submit'", ".", "format", "(", "BASE_URL", ")", "try", ":", "r", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "self", ".", "dumps", "(", ")", "...
32.153846
22.461538
def setDefaultIREncoding(encoding): ''' setDefaultIREncoding - Sets the default encoding used by IndexedRedis. This will be the default encoding used for field data. You can override this on a per-field basis by using an IRField (such as IRUnicodeField or IRRawField) @param encoding - An encoding (like utf-8) ''' try: b''.decode(encoding) except: raise ValueError('setDefaultIREncoding was provided an invalid codec. Got (encoding="%s")' %(str(encoding), )) global defaultIREncoding defaultIREncoding = encoding
[ "def", "setDefaultIREncoding", "(", "encoding", ")", ":", "try", ":", "b''", ".", "decode", "(", "encoding", ")", "except", ":", "raise", "ValueError", "(", "'setDefaultIREncoding was provided an invalid codec. Got (encoding=\"%s\")'", "%", "(", "str", "(", "encoding"...
34.866667
30.6
def get_args(obj): """Get a list of argument names for a callable.""" if inspect.isfunction(obj): return inspect.getargspec(obj).args elif inspect.ismethod(obj): return inspect.getargspec(obj).args[1:] elif inspect.isclass(obj): return inspect.getargspec(obj.__init__).args[1:] elif hasattr(obj, '__call__'): return inspect.getargspec(obj.__call__).args[1:] else: raise TypeError("Can't inspect signature of '%s' object." % obj)
[ "def", "get_args", "(", "obj", ")", ":", "if", "inspect", ".", "isfunction", "(", "obj", ")", ":", "return", "inspect", ".", "getargspec", "(", "obj", ")", ".", "args", "elif", "inspect", ".", "ismethod", "(", "obj", ")", ":", "return", "inspect", "....
40.083333
13.416667
def _check_min_max_range(self, var, test_ctx): """ Checks that either both valid_min and valid_max exist, or valid_range exists. """ if 'valid_range' in var.ncattrs(): test_ctx.assert_true(var.valid_range.dtype == var.dtype and len(var.valid_range) == 2 and var.valid_range[0] <= var.valid_range[1], "valid_range must be a two element vector of min followed by max with the same data type as {}".format(var.name) ) else: for bound in ('valid_min', 'valid_max'): v_bound = getattr(var, bound, '') warn_msg = '{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'.format(bound, var.name) # need to special case str attributes since they aren't directly # comparable to numpy dtypes if isinstance(v_bound, six.string_types): test_ctx.assert_true(v_bound != '' and var.dtype.char == 'S', warn_msg) # otherwise compare the numpy types directly else: test_ctx.assert_true(v_bound.dtype == var.dtype, warn_msg) return test_ctx
[ "def", "_check_min_max_range", "(", "self", ",", "var", ",", "test_ctx", ")", ":", "if", "'valid_range'", "in", "var", ".", "ncattrs", "(", ")", ":", "test_ctx", ".", "assert_true", "(", "var", ".", "valid_range", ".", "dtype", "==", "var", ".", "dtype",...
56.291667
26.541667
def is_bool_indexer(key: Any) -> bool: """ Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. """ na_msg = 'cannot index with vector containing NA / NaN values' if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (is_array_like(key) and is_extension_array_dtype(key.dtype))): if key.dtype == np.object_: key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): # an ndarray with bool-dtype by definition has no missing values. # So we only need to check for NAs in ExtensionArrays if is_extension_array_dtype(key.dtype): if np.any(key.isna()): raise ValueError(na_msg) return True elif isinstance(key, list): try: arr = np.asarray(key) return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False return False
[ "def", "is_bool_indexer", "(", "key", ":", "Any", ")", "->", "bool", ":", "na_msg", "=", "'cannot index with vector containing NA / NaN values'", "if", "(", "isinstance", "(", "key", ",", "(", "ABCSeries", ",", "np", ".", "ndarray", ",", "ABCIndex", ")", ")", ...
33
19.583333
def get_all(self, references, field_paths=None, transaction=None): """Retrieve a batch of documents. .. note:: Documents returned by this method are not guaranteed to be returned in the same order that they are given in ``references``. .. note:: If multiple ``references`` refer to the same document, the server will only return one result. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. If a ``transaction`` is used and it already has write operations added, this method cannot be used (i.e. read-after-write is not allowed). Args: references (List[.DocumentReference, ...]): Iterable of document references to be retrieved. field_paths (Optional[Iterable[str, ...]]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the returned results. If no value is provided, all fields will be returned. transaction (Optional[~.firestore_v1beta1.transaction.\ Transaction]): An existing transaction that these ``references`` will be retrieved in. Yields: .DocumentSnapshot: The next document snapshot that fulfills the query, or :data:`None` if the document does not exist. """ document_paths, reference_map = _reference_info(references) mask = _get_doc_mask(field_paths) response_iterator = self._firestore_api.batch_get_documents( self._database_string, document_paths, mask, transaction=_helpers.get_transaction_id(transaction), metadata=self._rpc_metadata, ) for get_doc_response in response_iterator: yield _parse_batch_get(get_doc_response, reference_map, self)
[ "def", "get_all", "(", "self", ",", "references", ",", "field_paths", "=", "None", ",", "transaction", "=", "None", ")", ":", "document_paths", ",", "reference_map", "=", "_reference_info", "(", "references", ")", "mask", "=", "_get_doc_mask", "(", "field_path...
41.12766
25.340426
def install_frontend(instance='default', forcereload=False, forcerebuild=False, forcecopy=True, install=True, development=False, build_type='dist'): """Builds and installs the frontend""" hfoslog("Updating frontend components", emitter='BUILDER') components = {} loadable_components = {} # TODO: Fix this up, it is probably not a sane way to get at the real root if development: frontendroot = os.path.abspath(os.path.dirname(os.path.realpath( __file__)) + "../../../frontend") else: frontendroot = '/opt/hfos/frontend' frontendtarget = os.path.join('/var/lib/hfos', instance, 'frontend') if install: cmdline = ["npm", "install"] hfoslog("Running", cmdline, lvl=verbose, emitter='BUILDER') npminstall = Popen(cmdline, cwd=frontendroot) out, err = npminstall.communicate() npminstall.wait() hfoslog("Frontend dependency installing done: ", out, err, lvl=debug, emitter='BUILDER') if True: # try: from pkg_resources import iter_entry_points entry_point_tuple = ( iter_entry_points(group='hfos.base', name=None), iter_entry_points(group='hfos.sails', name=None), iter_entry_points(group='hfos.components', name=None) ) for iterator in entry_point_tuple: for entry_point in iterator: try: name = entry_point.name location = entry_point.dist.location loaded = entry_point.load() hfoslog("Entry point: ", entry_point, name, entry_point.resolve().__module__, lvl=debug, emitter='BUILDER') component_name = entry_point.resolve().__module__.split('.')[1] hfoslog("Loaded: ", loaded, lvl=verbose, emitter='BUILDER') comp = { 'location': location, 'version': str(entry_point.dist.parsed_version), 'description': loaded.__doc__ } frontend = os.path.join(location, 'frontend') hfoslog("Checking component frontend parts: ", frontend, lvl=verbose, emitter='BUILDER') if os.path.isdir( frontend) and frontend != frontendroot: comp['frontend'] = frontend else: hfoslog("Component without frontend " "directory:", comp, lvl=debug, emitter='BUILDER') components[component_name] = comp loadable_components[component_name] = loaded hfoslog("Loaded component:", comp, lvl=verbose, emitter='BUILDER') except Exception as e: hfoslog("Could not inspect entrypoint: ", e, type(e), entry_point, iterator, lvl=error, exc=True, emitter='BUILDER') # except Exception as e: # hfoslog("Error: ", e, type(e), lvl=error, exc=True, emitter='BUILDER') # return hfoslog('Components after lookup:', sorted(list(components.keys())), emitter='BUILDER') def _update_frontends(install=True): hfoslog("Checking unique frontend locations: ", loadable_components, lvl=debug, emitter='BUILDER') importlines = [] modules = [] for name, component in components.items(): if 'frontend' in component: origin = component['frontend'] target = os.path.join(frontendroot, 'src', 'components', name) target = os.path.normpath(target) if install: reqfile = os.path.join(origin, 'requirements.txt') if os.path.exists(reqfile): # TODO: Speed this up by collecting deps first then doing one single install call hfoslog("Installing package dependencies", lvl=debug, emitter='BUILDER') with open(reqfile, 'r') as f: cmdline = ["npm", "install"] for line in f.readlines(): cmdline.append(line.replace("\n", "")) hfoslog("Running", cmdline, lvl=verbose, emitter='BUILDER') npminstall = Popen(cmdline, cwd=frontendroot) out, err = npminstall.communicate() npminstall.wait() hfoslog("Frontend installing done: ", out, err, lvl=debug, emitter='BUILDER') # if target in ('/', '/boot', '/usr', '/home', '/root', # '/var'): # hfoslog("Unsafe frontend deletion target path, " # "NOT proceeding! ", target, lvl=critical, # emitter='BUILDER') hfoslog("Copying:", origin, target, lvl=debug, emitter='BUILDER') copytree(origin, target) for modulefilename in glob(target + '/*.module.js'): modulename = os.path.basename(modulefilename).split( ".module.js")[0] line = u"import {s} from './components/{p}/{" \ u"s}.module';\nmodules.push({s});\n".format( s=modulename, p=name) if modulename not in modules: importlines += line modules.append(modulename) else: hfoslog("Module without frontend:", name, component, lvl=debug, emitter='BUILDER') with open(os.path.join(frontendroot, 'src', 'main.tpl.js'), "r") as f: main = "".join(f.readlines()) parts = main.split("/* COMPONENT SECTION */") if len(parts) != 3: hfoslog("Frontend loader seems damaged! Please check!", lvl=critical, emitter='BUILDER') return try: with open(os.path.join(frontendroot, 'src', 'main.js'), "w") as f: f.write(parts[0]) f.write("/* COMPONENT SECTION:BEGIN */\n") for line in importlines: f.write(line) f.write("/* COMPONENT SECTION:END */\n") f.write(parts[2]) except Exception as e: hfoslog("Error during frontend package info writing. Check " "permissions! ", e, lvl=error, emitter='BUILDER') def _rebuild_frontend(): hfoslog("Starting frontend build.", lvl=warn, emitter='BUILDER') npmbuild = Popen(["npm", "run", build_type], cwd=frontendroot) out, err = npmbuild.communicate() try: npmbuild.wait() except Exception as e: hfoslog("Error during frontend build", e, type(e), exc=True, lvl=error, emitter='BUILDER') return hfoslog("Frontend build done: ", out, err, lvl=debug, emitter='BUILDER') copytree(os.path.join(frontendroot, build_type), frontendtarget, hardlink=False) copytree(os.path.join(frontendroot, 'assets'), os.path.join(frontendtarget, 'assets'), hardlink=False) hfoslog("Frontend deployed", emitter='BUILDER') hfoslog("Checking component frontend bits in ", frontendroot, lvl=verbose, emitter='BUILDER') _update_frontends(install=install) if forcerebuild: _rebuild_frontend() hfoslog("Done: Install Frontend", emitter='BUILDER')
[ "def", "install_frontend", "(", "instance", "=", "'default'", ",", "forcereload", "=", "False", ",", "forcerebuild", "=", "False", ",", "forcecopy", "=", "True", ",", "install", "=", "True", ",", "development", "=", "False", ",", "build_type", "=", "'dist'",...
39.904523
22.417085
def reference(self, t, i): """Handle references.""" octal = self.get_octal(t, i) if t in _OCTAL and octal: self.parse_octal(octal, i) elif (t in _DIGIT or t == 'g') and not self.use_format: group = self.get_group(t, i) if not group: group = self.get_named_group(t, i) self.handle_group('\\' + group) elif t in _STANDARD_ESCAPES: self.get_single_stack() self.result.append('\\' + t) elif t == "l": self.single_case(i, _LOWER) elif t == "L": self.span_case(i, _LOWER) elif t == "c": self.single_case(i, _UPPER) elif t == "C": self.span_case(i, _UPPER) elif t == "E": self.end_found = True elif not self.is_bytes and t == "U": self.parse_unicode(i, True) elif not self.is_bytes and t == "u": self.parse_unicode(i) elif not self.is_bytes and t == "N": self.parse_named_unicode(i) elif t == "x": self.parse_bytes(i) elif self.use_format and t in _CURLY_BRACKETS: self.result.append('\\\\') self.handle_format(t, i) elif self.use_format and t == 'g': self.result.append('\\\\') self.result.append(t) else: value = '\\' + t self.get_single_stack() if self.span_stack: value = self.convert_case(value, self.span_stack[-1]) self.result.append(value)
[ "def", "reference", "(", "self", ",", "t", ",", "i", ")", ":", "octal", "=", "self", ".", "get_octal", "(", "t", ",", "i", ")", "if", "t", "in", "_OCTAL", "and", "octal", ":", "self", ".", "parse_octal", "(", "octal", ",", "i", ")", "elif", "("...
35.860465
8.27907
def alpha_gen(x): """ Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/cplfit, though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users. Docstring for the generated alpha function:: Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ def alpha_(xmin,x=x): """ Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ gexmin = x>=xmin n = np.count_nonzero(gexmin) if n < 2: return 0 x = x[gexmin] a = 1 + float(n) / sum(log(x/xmin)) return a return alpha_
[ "def", "alpha_gen", "(", "x", ")", ":", "def", "alpha_", "(", "xmin", ",", "x", "=", "x", ")", ":", "\"\"\"\n Given a sorted data set and a minimum, returns power law MLE fit\n data is passed as a keyword parameter so that it can be vectorized\n\n If there is onl...
35.481481
21.518519
def processTPED(uniqueSNPs, mapF, fileName, tfam, prefix): """Process the TPED file. :param uniqueSNPs: the unique markers. :param mapF: a representation of the ``map`` file. :param fileName: the name of the ``tped`` file. :param tfam: the name of the ``tfam`` file. :param prefix: the prefix of all the files. :type uniqueSNPs: dict :type mapF: list :type fileName: str :type tfam: str :type prefix: str :returns: a tuple with the representation of the ``tped`` file (:py:class:`numpy.array`) as first element, and the updated position of the duplicated markers in the ``tped`` representation. Copies the ``tfam`` file into ``prefix.unique_snps.tfam``. While reading the ``tped`` file, creates a new one (``prefix.unique_snps.tped``) containing only unique markers. """ # Copying the tfam file try: shutil.copy(tfam, prefix + ".unique_snps.tfam") except IOError: msg = "%s: can't write file" % prefix + ".unique_snps.tfam" raise ProgramError(msg) tped = [] updatedSNPs = defaultdict(list) outputFile = None try: outputFile = open(prefix + ".unique_snps.tped", "w") except IOError: msg = "%s: can't write to file" % prefix + ".unique_snps.tped" raise ProgramError(msg) nbSNP = 0 with open(fileName, 'r') as inputFile: for line in inputFile: nbSNP += 1 row = line.rstrip("\r\n").split("\t") snpInfo = row[:4] genotype = [i.upper() for i in row[4:]] chromosome = snpInfo[0] position = snpInfo[3] if (chromosome, position) in uniqueSNPs: # Printing the new TPED file (unique SNPs only) print >>outputFile, "\t".join(snpInfo + genotype) else: # Saving the TPED file (duplicated samples only) currPos = len(tped) tped.append(tuple(snpInfo + genotype)) updatedSNPs[(chromosome, position)].append(currPos) outputFile.close() if len(mapF) != nbSNP: msg = "%(fileName)s: no the same number of SNPs than MAP " \ "file" % locals() raise ProgramError(msg) tped = np.array(tped) return tped, updatedSNPs
[ "def", "processTPED", "(", "uniqueSNPs", ",", "mapF", ",", "fileName", ",", "tfam", ",", "prefix", ")", ":", "# Copying the tfam file", "try", ":", "shutil", ".", "copy", "(", "tfam", ",", "prefix", "+", "\".unique_snps.tfam\"", ")", "except", "IOError", ":"...
32.942029
20.188406
def prepare_data(self): ''' Method returning data passed to template. Subclasses can override it. ''' value = self.get_raw_value() return dict(widget=self, field=self.field, value=value, readonly=not self.field.writable)
[ "def", "prepare_data", "(", "self", ")", ":", "value", "=", "self", ".", "get_raw_value", "(", ")", "return", "dict", "(", "widget", "=", "self", ",", "field", "=", "self", ".", "field", ",", "value", "=", "value", ",", "readonly", "=", "not", "self"...
31.9
12.5
def next(self): """ Returns the next element from the array. :return: the next array element object, wrapped as JavaObject if not null :rtype: JavaObject or None """ if self.index < self.length: index = self.index self.index += 1 return self.data[index] else: raise StopIteration()
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "index", "<", "self", ".", "length", ":", "index", "=", "self", ".", "index", "self", ".", "index", "+=", "1", "return", "self", ".", "data", "[", "index", "]", "else", ":", "raise", "StopI...
28.769231
14.307692
def find_transported_elements(rxn): """ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. """ element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = \ {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
[ "def", "find_transported_elements", "(", "rxn", ")", ":", "element_dist", "=", "defaultdict", "(", ")", "# Collecting elements for each metabolite.", "for", "met", "in", "rxn", ".", "metabolites", ":", "if", "met", ".", "compartment", "not", "in", "element_dist", ...
43.425
18.925
def inside(self, other): """ Return true if this rectangle is inside the given shape. """ return ( self.left >= other.left and self.right <= other.right and self.top <= other.top and self.bottom >= other.bottom)
[ "def", "inside", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "left", ">=", "other", ".", "left", "and", "self", ".", "right", "<=", "other", ".", "right", "and", "self", ".", "top", "<=", "other", ".", "top", "and", "self", "...
45.5
5.5
def css(src, dest=False, shift=4): """Beautify CSS Args: src: css string or path-to-file with text to beautify (mandatory) dest: path-to-file to save beautified css string; if file doesn't exist it is created automatically; (optional) if this arg is skept function returns string shift: can be either integer or string 1) if int - number of spaces in tab, for example shift=8 <a> <b></b> </a> 2) if string - pattern, for example shift='....' <a> ....<b></b> </a> Returns: 1) beautified XML string if dest is not provided 2) length of saved file if dest is provided Example: css('path/to/file.css') css('path/to/file.css', 'path/to/save/result.css') css('path/to/file.css', 8) css('path/to/file.css', '____') css('path/to/file.css', 'path/to/save/result.css', 2) """ if not dest: # all default return _css(_text(src)) else: if type(dest) is int: #dest is skept, custom pattern provided at dist place return _css(_text(src), dest) else: with open(dest, 'w') as f2: return f2.write(_css(_text(src), shift))
[ "def", "css", "(", "src", ",", "dest", "=", "False", ",", "shift", "=", "4", ")", ":", "if", "not", "dest", ":", "# all default", "return", "_css", "(", "_text", "(", "src", ")", ")", "else", ":", "if", "type", "(", "dest", ")", "is", "int", ":...
36.777778
19.055556
def execute(self): """ params = { "ApexCode" : "None", "ApexProfiling" : "01pd0000001yXtYAAU", "Callout" : True, "Database" : 1, "ExpirationDate" : 3, "ScopeId" : "", "System" : "", "TracedEntityId" : "", "Validation" : "", "Visualforce" : "", "Workflow" : "" } """ if 'type' not in self.params: raise MMException("Please include the type of log, 'user' or 'apex'") if 'debug_categories' not in self.params: raise MMException("Please include debug categories in dictionary format: e.g.: {'ApexCode':'DEBUG', 'Visualforce':'INFO'}") request = {} if self.params['type'] == 'user': request['ScopeId'] = None request['TracedEntityId'] = self.params.get('user_id', config.sfdc_client.user_id) elif self.params['type'] == 'apex': #request['ScopeId'] = 'user' request['ScopeId'] = config.sfdc_client.user_id request['TracedEntityId'] = self.params['apex_id'] for c in self.params['debug_categories']: if 'category' in c: request[c['category']] = c['level'] else: request[c] = self.params['debug_categories'][c] request['ExpirationDate'] = util.get_iso_8601_timestamp(int(float(self.params.get('expiration', 30)))) config.logger.debug(self.params['debug_categories']) config.logger.debug("Log creation reuqest--->") config.logger.debug(request) create_result = config.sfdc_client.create_trace_flag(request) config.logger.debug("Log creation response--->") config.logger.debug(create_result) if type(create_result) is list: create_result = create_result[0] if type(create_result) is not str and type(create_result) is not unicode: return json.dumps(create_result) else: return create_result
[ "def", "execute", "(", "self", ")", ":", "if", "'type'", "not", "in", "self", ".", "params", ":", "raise", "MMException", "(", "\"Please include the type of log, 'user' or 'apex'\"", ")", "if", "'debug_categories'", "not", "in", "self", ".", "params", ":", "rais...
40.320755
18.207547
def check_absolute_refs(self, construction_table): """Checks first three rows of ``construction_table`` for linear references Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices. """ c_table = construction_table problem_index = [i for i in c_table.index[:3] if not self._has_valid_abs_ref(i, c_table)] return problem_index
[ "def", "check_absolute_refs", "(", "self", ",", "construction_table", ")", ":", "c_table", "=", "construction_table", "problem_index", "=", "[", "i", "for", "i", "in", "c_table", ".", "index", "[", ":", "3", "]", "if", "not", "self", ".", "_has_valid_abs_ref...
42
20.05
def deploy(self, *lambdas): """Deploys lambdas to AWS""" if not self.role: logger.error('Missing AWS Role') raise ArgumentsError('Role required') logger.debug('Deploying lambda {}'.format(self.lambda_name)) zfh = self.package() if self.lambda_name in self.get_function_names(): logger.info('Updating {} lambda'.format(self.lambda_name)) response = self.client.update_function_code( FunctionName=self.lambda_name, ZipFile=zfh.getvalue(), Publish=True ) else: logger.info('Adding new {} lambda'.format(self.lambda_name)) response = self.client.create_function( FunctionName=self.lambda_name, Runtime=yaep.env( 'LAMBDA_RUNTIME', 'python2.7' ), Role=self.role, Handler=yaep.env( 'LAMBDA_HANDLER', 'lambda_function.lambda_handler' ), Code={ 'ZipFile': zfh.getvalue(), }, Description=yaep.env( 'LAMBDA_DESCRIPTION', 'Lambda code for {}'.format(self.lambda_name) ), Timeout=yaep.env( 'LAMBDA_TIMEOUT', 3, convert_booleans=False, type_class=int ), MemorySize=yaep.env( 'LAMBDA_MEMORY_SIZE', 128, convert_booleans=False, type_class=int ), Publish=True ) status_code = response.get( 'ResponseMetadata', {} ).get('HTTPStatusCode') if status_code in [200, 201]: logger.info('Successfully deployed {} version {}'.format( self.lambda_name, response.get('Version', 'Unkown') )) else: logger.error('Error deploying {}: {}'.format( self.lambda_name, response ))
[ "def", "deploy", "(", "self", ",", "*", "lambdas", ")", ":", "if", "not", "self", ".", "role", ":", "logger", ".", "error", "(", "'Missing AWS Role'", ")", "raise", "ArgumentsError", "(", "'Role required'", ")", "logger", ".", "debug", "(", "'Deploying lam...
31.941176
15.794118
def RelaxNGValidateCtxt(self, reader, options): """Use RelaxNG schema context to validate the document as it is processed. Activation is only possible before the first Read(). If @ctxt is None, then RelaxNG schema validation is deactivated. """ if reader is None: reader__o = None else: reader__o = reader._o ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options) return ret
[ "def", "RelaxNGValidateCtxt", "(", "self", ",", "reader", ",", "options", ")", ":", "if", "reader", "is", "None", ":", "reader__o", "=", "None", "else", ":", "reader__o", "=", "reader", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlTextReaderRelaxNGValidateC...
51.222222
15.555556
def retrieve_diaspora_hcard(handle): """ Retrieve a remote Diaspora hCard document. :arg handle: Remote handle to retrieve :return: str (HTML document) """ webfinger = retrieve_and_parse_diaspora_webfinger(handle) document, code, exception = fetch_document(webfinger.get("hcard_url")) if exception: return None return document
[ "def", "retrieve_diaspora_hcard", "(", "handle", ")", ":", "webfinger", "=", "retrieve_and_parse_diaspora_webfinger", "(", "handle", ")", "document", ",", "code", ",", "exception", "=", "fetch_document", "(", "webfinger", ".", "get", "(", "\"hcard_url\"", ")", ")"...
30
15
def get_exchange_rates(self, base, targets=None): """Return the ::base:: to ::targets:: exchange rate (as a dictionary).""" if targets is None: targets = get_available_currencies() return {t: self.get_exchange_rate(base, t, raise_errors=False) for t in targets}
[ "def", "get_exchange_rates", "(", "self", ",", "base", ",", "targets", "=", "None", ")", ":", "if", "targets", "is", "None", ":", "targets", "=", "get_available_currencies", "(", ")", "return", "{", "t", ":", "self", ".", "get_exchange_rate", "(", "base", ...
48.833333
19.666667
def print_tokens(output, tokens, style): """ Print a list of (Token, text) tuples in the given style to the output. """ assert isinstance(output, Output) assert isinstance(style, Style) # Reset first. output.reset_attributes() output.enable_autowrap() # Print all (token, text) tuples. attrs_for_token = _TokenToAttrsCache(style.get_attrs_for_token) for token, text in tokens: attrs = attrs_for_token[token] if attrs: output.set_attributes(attrs) else: output.reset_attributes() output.write(text) # Reset again. output.reset_attributes() output.flush()
[ "def", "print_tokens", "(", "output", ",", "tokens", ",", "style", ")", ":", "assert", "isinstance", "(", "output", ",", "Output", ")", "assert", "isinstance", "(", "style", ",", "Style", ")", "# Reset first.", "output", ".", "reset_attributes", "(", ")", ...
23.888889
18.185185
def get_predicate_text(sent_tokens: List[Token], tags: List[str]) -> str: """ Get the predicate in this prediction. """ return " ".join([sent_tokens[pred_id].text for pred_id in get_predicate_indices(tags)])
[ "def", "get_predicate_text", "(", "sent_tokens", ":", "List", "[", "Token", "]", ",", "tags", ":", "List", "[", "str", "]", ")", "->", "str", ":", "return", "\" \"", ".", "join", "(", "[", "sent_tokens", "[", "pred_id", "]", ".", "text", "for", "pred...
39.833333
10.833333
def _clear_inspect(self): """Clears inspect attributes when re-executing a pipeline""" self.trace_info = defaultdict(list) self.process_tags = {} self.process_stats = {} self.samples = [] self.stored_ids = [] self.stored_log_ids = [] self.time_start = None self.time_stop = None self.execution_command = None self.nextflow_version = None self.abort_cause = None self._c = 0 # Clean up of tag running status for p in self.processes.values(): p["barrier"] = "W" for i in ["submitted", "finished", "failed", "retry"]: p[i] = set()
[ "def", "_clear_inspect", "(", "self", ")", ":", "self", ".", "trace_info", "=", "defaultdict", "(", "list", ")", "self", ".", "process_tags", "=", "{", "}", "self", ".", "process_stats", "=", "{", "}", "self", ".", "samples", "=", "[", "]", "self", "...
33.45
10.95
def get_statements_noprior(self): """Return a list of all non-prior Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model (excluding the prior). """ stmt_lists = [v for k, v in self.stmts.items() if k != 'prior'] stmts = [] for s in stmt_lists: stmts += s return stmts
[ "def", "get_statements_noprior", "(", "self", ")", ":", "stmt_lists", "=", "[", "v", "for", "k", ",", "v", "in", "self", ".", "stmts", ".", "items", "(", ")", "if", "k", "!=", "'prior'", "]", "stmts", "=", "[", "]", "for", "s", "in", "stmt_lists", ...
31.714286
18.214286
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity): """ Create the DynamoDB table for async task return values """ try: dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name) return False, dynamodb_table # catch this exception (triggered if the table doesn't exist) except botocore.exceptions.ClientError: dynamodb_table = self.dynamodb_client.create_table( AttributeDefinitions=[ { 'AttributeName': 'id', 'AttributeType': 'S' } ], TableName=table_name, KeySchema=[ { 'AttributeName': 'id', 'KeyType': 'HASH' }, ], ProvisionedThroughput = { 'ReadCapacityUnits': read_capacity, 'WriteCapacityUnits': write_capacity } ) if dynamodb_table: try: self._set_async_dynamodb_table_ttl(table_name) except botocore.exceptions.ClientError: # this fails because the operation is async, so retry time.sleep(10) self._set_async_dynamodb_table_ttl(table_name) return True, dynamodb_table
[ "def", "create_async_dynamodb_table", "(", "self", ",", "table_name", ",", "read_capacity", ",", "write_capacity", ")", ":", "try", ":", "dynamodb_table", "=", "self", ".", "dynamodb_client", ".", "describe_table", "(", "TableName", "=", "table_name", ")", "return...
37.973684
17.184211
def get(self, file_id, **queryparams): """ Get information about a specific file in the File Manager. :param file_id: The unique id for the File Manager file. :type file_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] """ self.file_id = file_id return self._mc_client._get(url=self._build_path(file_id), **queryparams)
[ "def", "get", "(", "self", ",", "file_id", ",", "*", "*", "queryparams", ")", ":", "self", ".", "file_id", "=", "file_id", "return", "self", ".", "_mc_client", ".", "_get", "(", "url", "=", "self", ".", "_build_path", "(", "file_id", ")", ",", "*", ...
39.166667
14
def ReadSerializedDict(cls, json_dict): """Reads an attribute container from serialized dictionary form. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: AttributeContainer: attribute container or None. Raises: TypeError: if the serialized dictionary does not contain an AttributeContainer. """ if json_dict: json_object = cls._ConvertDictToObject(json_dict) if not isinstance(json_object, containers_interface.AttributeContainer): raise TypeError('{0:s} is not an attribute container type.'.format( type(json_object))) return json_object return None
[ "def", "ReadSerializedDict", "(", "cls", ",", "json_dict", ")", ":", "if", "json_dict", ":", "json_object", "=", "cls", ".", "_ConvertDictToObject", "(", "json_dict", ")", "if", "not", "isinstance", "(", "json_object", ",", "containers_interface", ".", "Attribut...
30.952381
22.904762
def close(self): """ Closes pipe :return: """ resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) self.session.delete(resource)
[ "def", "close", "(", "self", ")", ":", "resource", "=", "ResourceLocator", "(", "CommandShell", ".", "ShellResource", ")", "resource", ".", "add_selector", "(", "'ShellId'", ",", "self", ".", "__shell_id", ")", "self", ".", "session", ".", "delete", "(", "...
28.625
13.875
def replaceChild(self, new_child: AbstractNode, old_child: AbstractNode) -> AbstractNode: """Replace an old child with new child.""" return self._replace_child(new_child, old_child)
[ "def", "replaceChild", "(", "self", ",", "new_child", ":", "AbstractNode", ",", "old_child", ":", "AbstractNode", ")", "->", "AbstractNode", ":", "return", "self", ".", "_replace_child", "(", "new_child", ",", "old_child", ")" ]
53.75
11.25
def backing_type_for(value): """Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS' """ if isinstance(value, str): vtype = "S" elif isinstance(value, bytes): vtype = "B" # NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number) elif isinstance(value, bool): vtype = "BOOL" elif isinstance(value, numbers.Number): vtype = "N" elif isinstance(value, dict): vtype = "M" elif isinstance(value, list): vtype = "L" elif isinstance(value, set): if not value: vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x else: inner = next(iter(value)) if isinstance(inner, str): vtype = "SS" elif isinstance(inner, bytes): vtype = "BS" elif isinstance(inner, numbers.Number): vtype = "NS" else: raise ValueError(f"Unknown set type for inner value {inner!r}") else: raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}") return vtype
[ "def", "backing_type_for", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "vtype", "=", "\"S\"", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "vtype", "=", "\"B\"", "# NOTE: numbers.Number check must come **AFTER...
35.868421
17.631579
def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw)
[ "def", "clone", "(", "self", ",", "*", "*", "kw", ")", ":", "names", "=", "'project_name version py_version platform location precedence'", "for", "attr", "in", "names", ".", "split", "(", ")", ":", "kw", ".", "setdefault", "(", "attr", ",", "getattr", "(", ...
50.571429
13.428571
def capture_objects(self): """! @brief Returns indexes of captured objects by each neuron. @details For example, network with size 2x2 has been trained on 5 sample, we neuron #1 has won one object with index '1', neuron #2 - objects with indexes '0', '3', '4', neuron #3 - nothing, neuron #4 - object with index '2'. Thus, output is [ [1], [0, 3, 4], [], [2] ]. @return (list) Indexes of captured objects by each neuron. """ if self.__ccore_som_pointer is not None: self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer) return self._capture_objects
[ "def", "capture_objects", "(", "self", ")", ":", "if", "self", ".", "__ccore_som_pointer", "is", "not", "None", ":", "self", ".", "_capture_objects", "=", "wrapper", ".", "som_get_capture_objects", "(", "self", ".", "__ccore_som_pointer", ")", "return", "self", ...
47.2
30.733333
def update(self, friendly_name=values.unset, unique_name=values.unset, email=values.unset, cc_emails=values.unset, status=values.unset, verification_code=values.unset, verification_type=values.unset, verification_document_sid=values.unset, extension=values.unset, call_delay=values.unset): """ Update the HostedNumberOrderInstance :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param unicode email: Email. :param unicode cc_emails: A list of emails. :param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode verification_code: A verification code. :param HostedNumberOrderInstance.VerificationType verification_type: Verification Type. :param unicode verification_document_sid: Verification Document Sid :param unicode extension: Digits to dial after connecting the verification call. :param unicode call_delay: The number of seconds, between 0 and 60, to delay before initiating the verification call. :returns: Updated HostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance """ data = values.of({ 'FriendlyName': friendly_name, 'UniqueName': unique_name, 'Email': email, 'CcEmails': serialize.map(cc_emails, lambda e: e), 'Status': status, 'VerificationCode': verification_code, 'VerificationType': verification_type, 'VerificationDocumentSid': verification_document_sid, 'Extension': extension, 'CallDelay': call_delay, }) payload = self._version.update( 'POST', self._uri, data=data, ) return HostedNumberOrderInstance(self._version, payload, sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "unique_name", "=", "values", ".", "unset", ",", "email", "=", "values", ".", "unset", ",", "cc_emails", "=", "values", ".", "unset", ",", "status", "=", "values", "....
48.5
25.690476
def subscribe_to_candles(self, pair, timeframe=None, **kwargs): """Subscribe to the passed pair's OHLC data channel. :param pair: str, Symbol pair to request data for :param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h, 1D, 7D, 14D, 1M} :param kwargs: :return: """ valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D', '1M'] if timeframe: if timeframe not in valid_tfs: raise ValueError("timeframe must be any of %s" % valid_tfs) else: timeframe = '1m' identifier = ('candles', pair, timeframe) pair = 't' + pair if not pair.startswith('t') else pair key = 'trade:' + timeframe + ':' + pair self._subscribe('candles', identifier, key=key, **kwargs)
[ "def", "subscribe_to_candles", "(", "self", ",", "pair", ",", "timeframe", "=", "None", ",", "*", "*", "kwargs", ")", ":", "valid_tfs", "=", "[", "'1m'", ",", "'5m'", ",", "'15m'", ",", "'30m'", ",", "'1h'", ",", "'3h'", ",", "'6h'", ",", "'12h'", ...
41.047619
18.714286
def extend(self, schema): """ Extend a structured schema by another. For example extending ``tuple<rstring id, timestamp ts, float64 value>`` with ``tuple<float32 score>`` results in ``tuple<rstring id, timestamp ts, float64 value, float32 score>``. Args: schema(StreamSchema): Schema to extend this schema by. Returns: StreamSchema: New schema that is an extension of this schema. """ if self._spl_type: raise TypeError("Not supported for declared SPL types") base = self.schema() extends = schema.schema() new_schema = base[:-1] + ',' + extends[6:] return StreamSchema(new_schema)
[ "def", "extend", "(", "self", ",", "schema", ")", ":", "if", "self", ".", "_spl_type", ":", "raise", "TypeError", "(", "\"Not supported for declared SPL types\"", ")", "base", "=", "self", ".", "schema", "(", ")", "extends", "=", "schema", ".", "schema", "...
36.736842
22.947368
async def dump_varint_t(writer, type_or, pv): """ Binary dump of the integer of given type :param writer: :param type_or: :param pv: :return: """ width = int_mark_to_size(type_or) n = (pv << 2) | type_or buffer = _UINT_BUFFER for _ in range(width): buffer[0] = n & 0xff await writer.awrite(buffer) n >>= 8 return width
[ "async", "def", "dump_varint_t", "(", "writer", ",", "type_or", ",", "pv", ")", ":", "width", "=", "int_mark_to_size", "(", "type_or", ")", "n", "=", "(", "pv", "<<", "2", ")", "|", "type_or", "buffer", "=", "_UINT_BUFFER", "for", "_", "in", "range", ...
19.736842
17.736842
def sum(self, axis=None, dtype=None, out=None, keepdims=False): """Return the sum of ``self``. See Also -------- numpy.sum prod """ return self.elem.__array_ufunc__( np.add, 'reduce', self.elem, axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
[ "def", "sum", "(", "self", ",", "axis", "=", "None", ",", "dtype", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "self", ".", "elem", ".", "__array_ufunc__", "(", "np", ".", "add", ",", "'reduce'", ",", ...
29.090909
17.181818
def __inner_eval(self, data_name, data_idx, feval=None): """Evaluate training or validation data.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = [] if self.__num_inner_eval > 0: result = np.zeros(self.__num_inner_eval, dtype=np.float64) tmp_out_len = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetEval( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if tmp_out_len.value != self.__num_inner_eval: raise ValueError("Wrong length of eval results") for i in range_(self.__num_inner_eval): ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i])) if feval is not None: if data_idx == 0: cur_data = self.train_set else: cur_data = self.valid_sets[data_idx - 1] feval_ret = feval(self.__inner_predict(data_idx), cur_data) if isinstance(feval_ret, list): for eval_name, val, is_higher_better in feval_ret: ret.append((data_name, eval_name, val, is_higher_better)) else: eval_name, val, is_higher_better = feval_ret ret.append((data_name, eval_name, val, is_higher_better)) return ret
[ "def", "__inner_eval", "(", "self", ",", "data_name", ",", "data_idx", ",", "feval", "=", "None", ")", ":", "if", "data_idx", ">=", "self", ".", "__num_dataset", ":", "raise", "ValueError", "(", "\"Data_idx should be smaller than number of dataset\"", ")", "self",...
48.84375
17.25
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._label_user_creator is not None: return False if self._content is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_id_", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_created", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_updated", "is", "not", "None", ...
19.095238
18.904762
def imgAverage(images, copy=True): ''' returns an image average works on many, also unloaded images minimises RAM usage ''' i0 = images[0] out = imread(i0, dtype='float') if copy and id(i0) == id(out): out = out.copy() for i in images[1:]: out += imread(i, dtype='float') out /= len(images) return out
[ "def", "imgAverage", "(", "images", ",", "copy", "=", "True", ")", ":", "i0", "=", "images", "[", "0", "]", "out", "=", "imread", "(", "i0", ",", "dtype", "=", "'float'", ")", "if", "copy", "and", "id", "(", "i0", ")", "==", "id", "(", "out", ...
22.6875
17.3125
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue): """ Export Compound TSV/CSV tables """ if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
[ "def", "export_compound", "(", "infile", ",", "outfile", ",", "format", ",", "outcsv", ",", "max_rs_peakgroup_qvalue", ")", ":", "if", "format", "==", "\"score_plots\"", ":", "export_score_plots", "(", "infile", ")", "else", ":", "if", "outfile", "is", "None",...
31.8125
18.9375
def merge_dict(dict_1, *other, **kw): """Merge two or more dict including kw into result dict.""" tmp = dict_1.copy() for x in other: tmp.update(x) tmp.update(kw) return tmp
[ "def", "merge_dict", "(", "dict_1", ",", "*", "other", ",", "*", "*", "kw", ")", ":", "tmp", "=", "dict_1", ".", "copy", "(", ")", "for", "x", "in", "other", ":", "tmp", ".", "update", "(", "x", ")", "tmp", ".", "update", "(", "kw", ")", "ret...
27.857143
15.428571
def bottom_sections(self): """ The number of cells that touch the bottom side. Returns ------- sections : int The number of sections on the top """ bottom_line = self.text.split('\n')[-1] sections = len(bottom_line.split('+')) - 2 return sections
[ "def", "bottom_sections", "(", "self", ")", ":", "bottom_line", "=", "self", ".", "text", ".", "split", "(", "'\\n'", ")", "[", "-", "1", "]", "sections", "=", "len", "(", "bottom_line", ".", "split", "(", "'+'", ")", ")", "-", "2", "return", "sect...
24.615385
16.615385
def search(query, results=10, suggestion=False): ''' Do a Wikipedia search for `query`. Keyword arguments: * results - the maxmimum number of results returned * suggestion - if True, return results and suggestion (if any) in a tuple ''' search_params = { 'list': 'search', 'srprop': '', 'srlimit': results, 'limit': results, 'srsearch': query } if suggestion: search_params['srinfo'] = 'suggestion' raw_results = _wiki_request(search_params) if 'error' in raw_results: if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'): raise HTTPTimeoutError(query) else: raise WikipediaException(raw_results['error']['info']) search_results = (d['title'] for d in raw_results['query']['search']) if suggestion: if raw_results['query'].get('searchinfo'): return list(search_results), raw_results['query']['searchinfo']['suggestion'] else: return list(search_results), None return list(search_results)
[ "def", "search", "(", "query", ",", "results", "=", "10", ",", "suggestion", "=", "False", ")", ":", "search_params", "=", "{", "'list'", ":", "'search'", ",", "'srprop'", ":", "''", ",", "'srlimit'", ":", "results", ",", "'limit'", ":", "results", ","...
26.567568
24.837838
def get_intel_compiler_top(version, abi): """ Return the main path to the top-level dir of the Intel compiler, using the given version. The compiler will be in <top>/bin/icl.exe (icc on linux), the include dir is <top>/include, etc. """ if is_windows: if not SCons.Util.can_read_reg: raise NoRegistryModuleError("No Windows registry module was found") top = get_intel_registry_value('ProductDir', version, abi) archdir={'x86_64': 'intel64', 'amd64' : 'intel64', 'em64t' : 'intel64', 'x86' : 'ia32', 'i386' : 'ia32', 'ia32' : 'ia32' }[abi] # for v11 and greater # pre-11, icl was in Bin. 11 and later, it's in Bin/<abi> apparently. if not os.path.exists(os.path.join(top, "Bin", "icl.exe")) \ and not os.path.exists(os.path.join(top, "Bin", abi, "icl.exe")) \ and not os.path.exists(os.path.join(top, "Bin", archdir, "icl.exe")): raise MissingDirError("Can't find Intel compiler in %s"%(top)) elif is_mac or is_linux: def find_in_2008style_dir(version): # first dir is new (>=9.0) style, second is old (8.0) style. dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s') if abi == 'x86_64': dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64 top=None for d in dirs: if os.path.exists(os.path.join(d%version, "bin", "icc")): top = d%version break return top def find_in_2010style_dir(version): dirs=('/opt/intel/Compiler/%s/*'%version) # typically /opt/intel/Compiler/11.1/064 (then bin/intel64/icc) dirs=glob.glob(dirs) # find highest sub-version number by reverse sorting and picking first existing one. dirs.sort() dirs.reverse() top=None for d in dirs: if (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or os.path.exists(os.path.join(d, "bin", "intel64", "icc"))): top = d break return top def find_in_2011style_dir(version): # The 2011 (compiler v12) dirs are inconsistent, so just redo the search from # get_all_compiler_versions and look for a match (search the newest form first) top=None for d in glob.glob('/opt/intel/composer_xe_*'): # Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344 # The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d) if m: cur_ver = "%s.%s"%(m.group(1), m.group(2)) if cur_ver == version and \ (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or os.path.exists(os.path.join(d, "bin", "intel64", "icc"))): top = d break if not top: for d in glob.glob('/opt/intel/composerxe-*'): # Typical dir here is /opt/intel/composerxe-2011.4.184 m = re.search(r'([0-9][0-9.]*)$', d) if m and m.group(1) == version and \ (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or os.path.exists(os.path.join(d, "bin", "intel64", "icc"))): top = d break return top def find_in_2016style_dir(version): # The 2016 (compiler v16) dirs are inconsistent from previous. top = None for d in glob.glob('/opt/intel/compilers_and_libraries_%s/linux'%version): if os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or os.path.exists(os.path.join(d, "bin", "intel64", "icc")): top = d break return top top = find_in_2016style_dir(version) or find_in_2011style_dir(version) or find_in_2010style_dir(version) or find_in_2008style_dir(version) # print "INTELC: top=",top if not top: raise MissingDirError("Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi)) return top
[ "def", "get_intel_compiler_top", "(", "version", ",", "abi", ")", ":", "if", "is_windows", ":", "if", "not", "SCons", ".", "Util", ".", "can_read_reg", ":", "raise", "NoRegistryModuleError", "(", "\"No Windows registry module was found\"", ")", "top", "=", "get_in...
49.797753
23.168539
def var(self, tensor_type, last_dim=0, test_shape=None): """ An alias of deepy.tensor.var. """ from deepy.tensor import var return var(tensor_type, last_dim=last_dim, test_shape=test_shape)
[ "def", "var", "(", "self", ",", "tensor_type", ",", "last_dim", "=", "0", ",", "test_shape", "=", "None", ")", ":", "from", "deepy", ".", "tensor", "import", "var", "return", "var", "(", "tensor_type", ",", "last_dim", "=", "last_dim", ",", "test_shape",...
37.333333
9.333333
def auth_config(self, stage=None): """Create auth config based on stage.""" if stage: section = 'stages.{}'.format(stage) else: section = 'stages.live' try: username = self.lookup(section, 'username') password = self.lookup(section, 'password') except MissingConfigurationError: username = self.lookup('auth', 'username') password = self.lookup('auth', 'password') if stage in self.known_auth_configurations: return AuthConfig( username, password, **self.known_auth_configurations[stage] ) try: cognito_pool_id = self.lookup(section, 'cognito_pool_id') cognito_region = self.lookup(section, 'cognito_region') except MissingConfigurationError: m = arn_re.match(self.lookup(section, 'cognito_pool')) if not m: raise ConfigurationError('Cognito Pool value must be an ARN') cognito_pool_id = m.group('resource') cognito_region = m.group('region') cognito_client_id = self.lookup(section, 'cognito_client_id') return AuthConfig( username, password, cognito_pool_id, cognito_region, cognito_client_id, )
[ "def", "auth_config", "(", "self", ",", "stage", "=", "None", ")", ":", "if", "stage", ":", "section", "=", "'stages.{}'", ".", "format", "(", "stage", ")", "else", ":", "section", "=", "'stages.live'", "try", ":", "username", "=", "self", ".", "lookup...
35.972222
19.75
def set(self, key, value, **kwargs): """ Set the value of a Parameter in the ParameterSet. If :func:`get` would retrieve a Parameter, this will set the value of that parameter. Or you can provide 'value@...' or 'default_unit@...', etc to specify what attribute to set. :parameter str key: the twig (called key here to be analagous to a normal dict) :parameter value: value to set :parameter **kwargs: other filter parameters (must result in returning a single :class:`Parameter`) :return: the value of the :class:`Parameter` after setting the new value (including converting units if applicable) """ twig = key method = None twigsplit = re.findall(r"[\w']+", twig) if twigsplit[0] == 'value': twig = '@'.join(twigsplit[1:]) method = 'set_value' elif twigsplit[0] == 'quantity': twig = '@'.join(twigsplit[1:]) method = 'set_quantity' elif twigsplit[0] in ['unit', 'default_unit']: twig = '@'.join(twigsplit[1:]) method = 'set_default_unit' elif twigsplit[0] in ['timederiv']: twig = '@'.join(twigsplit[1:]) method = 'set_timederiv' elif twigsplit[0] in ['description']: raise KeyError("cannot set {} of {}".format(twigsplit[0], '@'.join(twigsplit[1:]))) if self._bundle is not None and self._bundle.get_setting('dict_set_all').get_value() and len(self.filter(twig=twig, **kwargs)) > 1: # then we need to loop through all the returned parameters and call set on them for param in self.filter(twig=twig, **kwargs).to_list(): self.set('{}@{}'.format(method, param.twig) if method is not None else param.twig, value) else: if method is None: return self.set_value(twig=twig, value=value, **kwargs) else: param = self.get_parameter(twig=twig, **kwargs) return getattr(param, method)(value)
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "*", "*", "kwargs", ")", ":", "twig", "=", "key", "method", "=", "None", "twigsplit", "=", "re", ".", "findall", "(", "r\"[\\w']+\"", ",", "twig", ")", "if", "twigsplit", "[", "0", "]", "=...
42.163265
20.857143
def register_transformer(self, transformer): """Register a transformer instance.""" if transformer not in self._transformers: self._transformers.append(transformer) self.sort_transformers()
[ "def", "register_transformer", "(", "self", ",", "transformer", ")", ":", "if", "transformer", "not", "in", "self", ".", "_transformers", ":", "self", ".", "_transformers", ".", "append", "(", "transformer", ")", "self", ".", "sort_transformers", "(", ")" ]
45
5.4
def apply_string_substitutions( inputs, substitutions, inverse=False, case_insensitive=False, unused_substitutions="ignore", ): """Apply a number of substitutions to a string(s). The substitutions are applied effectively all at once. This means that conflicting substitutions don't interact. Where substitutions are conflicting, the one which is longer takes precedance. This is confusing so we recommend that you look at the examples. Parameters ---------- inputs : str, list of str The string(s) to which we want to apply the substitutions. substitutions : dict The substitutions we wish to make. The keys are the strings we wish to substitute, the values are the strings which we want to appear in the output strings. inverse : bool If True, do the substitutions the other way around i.e. use the keys as the strings we want to appear in the output strings and the values as the strings we wish to substitute. case_insensitive : bool If True, the substitutions will be made in a case insensitive way. unused_substitutions : {"ignore", "warn", "raise"}, default ignore Behaviour when one or more of the inputs does not have a corresponding substitution. If "ignore", nothing happens. If "warn", a warning is issued. If "raise", an error is raised. See the examples. Returns ------- ``type(input)`` The input with substitutions performed. Examples -------- >>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"}) 'Hello BobBob' >>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"}, inverse=True) 'Hello JimJim' >>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"}) ['Hello BobBob', "Bob says, 'Hi Bob'"] >>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"}, inverse=True) ['Hello JimJim', "Jim says, 'Hi Jim'"] >>> apply_string_substitutions("Muttons Butter", {"M": "B", "Button": "Zip"}) 'Buttons Butter' # Substitutions don't cascade. If they did, Muttons would become Buttons, then the # substitutions "Button" --> "Zip" would be applied and we would end up with # "Zips Butter". >>> apply_string_substitutions("Muttons Butter", {"Mutton": "Gutter", "tt": "zz"}) 'Gutters Buzzer' # Longer substitutions take precedent. Hence Mutton becomes Gutter, not Muzzon. >>> apply_string_substitutions("Butter", {"buTTer": "Gutter"}, case_insensitive=True) 'Gutter' >>> apply_string_substitutions("Butter", {"teeth": "tooth"}) 'Butter' >>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="ignore") 'Butter' >>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="warn") ...pymagicc/utils.py:50: UserWarning: No substitution available for {'Butter'} warnings.warn(msg) 'Butter' >>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="raise") ValueError: No substitution available for {'Butter'} """ if inverse: substitutions = {v: k for k, v in substitutions.items()} # only possible to have conflicting substitutions when case insensitive if case_insensitive: _check_duplicate_substitutions(substitutions) if unused_substitutions != "ignore": _check_unused_substitutions( substitutions, inputs, unused_substitutions, case_insensitive ) compiled_regexp = _compile_replacement_regexp( substitutions, case_insensitive=case_insensitive ) inputs_return = deepcopy(inputs) if isinstance(inputs_return, str): inputs_return = _multiple_replace(inputs_return, substitutions, compiled_regexp) else: inputs_return = [ _multiple_replace(v, substitutions, compiled_regexp) for v in inputs_return ] return inputs_return
[ "def", "apply_string_substitutions", "(", "inputs", ",", "substitutions", ",", "inverse", "=", "False", ",", "case_insensitive", "=", "False", ",", "unused_substitutions", "=", "\"ignore\"", ",", ")", ":", "if", "inverse", ":", "substitutions", "=", "{", "v", ...
36.495327
30.149533
def album(self): """ album as :class:`Album` object """ if not self._album: self._album = Album(self._album_id, self._album_name, self._artist_id, self._artist_name, self._cover_url, self._connection) return self._album
[ "def", "album", "(", "self", ")", ":", "if", "not", "self", ".", "_album", ":", "self", ".", "_album", "=", "Album", "(", "self", ".", "_album_id", ",", "self", ".", "_album_name", ",", "self", ".", "_artist_id", ",", "self", ".", "_artist_name", ","...
36.333333
14.555556
def register_default_action(self, file_pattern, action_function): """ Default action used if no compatible action is found. Args: file_pattern: A :program:`fnmatch` pattern for the files concerned by this action. action_function: Warning: Be careful when defining a default action. This action is be applied to **all** template files for which no compatible action is found. You might want to prefer declare explicit actions than to rely on this implicit default action. Use at your own risks. That said, if you have lots of default cases, this default action can be very convenient and avoid lots of unnecessary action declarations. """ if self.__default_action is not None: self.log_error('Default action function already exist.') if not self.__is_function_action(action_function): self.log_error('Attached default function is not an action function.') self.__default_action = GeneratorAction(file_pattern=file_pattern, action_function=action_function)
[ "def", "register_default_action", "(", "self", ",", "file_pattern", ",", "action_function", ")", ":", "if", "self", ".", "__default_action", "is", "not", "None", ":", "self", ".", "log_error", "(", "'Default action function already exist.'", ")", "if", "not", "sel...
52.333333
36.333333
def path_dispatch1(mname, returns_model): """ Decorator for methods that accept path as a first argument. """ def _wrapper(self, *args, **kwargs): path, args = _get_arg('path', args, kwargs) prefix, mgr, mgr_path = _resolve_path(path, self.managers) result = getattr(mgr, mname)(mgr_path, *args, **kwargs) if returns_model and prefix: return _apply_prefix(prefix, result) else: return result return _wrapper
[ "def", "path_dispatch1", "(", "mname", ",", "returns_model", ")", ":", "def", "_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "path", ",", "args", "=", "_get_arg", "(", "'path'", ",", "args", ",", "kwargs", ")", "prefix"...
34.214286
14.214286
def log_interp(x, xp, *args, **kwargs): """Wrap log_interpolate_1d for deprecated log_interp.""" return log_interpolate_1d(x, xp, *args, **kwargs)
[ "def", "log_interp", "(", "x", ",", "xp", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "log_interpolate_1d", "(", "x", ",", "xp", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
50.666667
4.666667
def _get_parts_of_format_string(resolved_string, literal_texts, format_specs): """ Inner function of reverse_format, returns the resolved value for each field in pattern. """ _text = resolved_string bits = [] if literal_texts[-1] != '' and _text.endswith(literal_texts[-1]): _text = _text[:-len(literal_texts[-1])] literal_texts = literal_texts[:-1] format_specs = format_specs[:-1] for i, literal_text in enumerate(literal_texts): if literal_text != '': if literal_text not in _text: raise ValueError(("Resolved string must match pattern. " "'{}' not found.".format(literal_text))) bit, _text = _text.split(literal_text, 1) if bit: bits.append(bit) elif i == 0: continue else: try: format_spec = _validate_format_spec(format_specs[i-1]) bits.append(_text[0:format_spec]) _text = _text[format_spec:] except: if i == len(format_specs) - 1: format_spec = _validate_format_spec(format_specs[i]) bits.append(_text[:-format_spec]) bits.append(_text[-format_spec:]) _text = [] else: _validate_format_spec(format_specs[i-1]) if _text: bits.append(_text) if len(bits) > len([fs for fs in format_specs if fs is not None]): bits = bits[1:] return bits
[ "def", "_get_parts_of_format_string", "(", "resolved_string", ",", "literal_texts", ",", "format_specs", ")", ":", "_text", "=", "resolved_string", "bits", "=", "[", "]", "if", "literal_texts", "[", "-", "1", "]", "!=", "''", "and", "_text", ".", "endswith", ...
37.292683
18.512195
def update_share_image(liststore, tree_iters, col, large_col, pcs_files, dir_name, icon_size, large_icon_size): '''下载文件缩略图, 并将它显示到liststore里. 需要同时更新两列里的图片, 用不同的缩放尺寸. pcs_files - 里面包含了几个必要的字段. dir_name - 缓存目录, 下载到的图片会保存这个目录里. ''' def update_image(filepath, tree_iter): try: tree_path = liststore.get_path(tree_iter) if tree_path is None: return pix = GdkPixbuf.Pixbuf.new_from_file(filepath) width = pix.get_width() height = pix.get_height() small_pix = pix.scale_simple(icon_size, height * icon_size // width, GdkPixbuf.InterpType.NEAREST) liststore[tree_path][col] = small_pix liststore[tree_path][large_col] = pix except GLib.GError: logger.error(traceback.format_exc()) def dump_image(url, filepath): req = net.urlopen(url) if not req or not req.data: logger.warn('update_share_image:, failed to request %s' % url) return False with open(filepath, 'wb') as fh: fh.write(req.data) return True for tree_iter, pcs_file in zip(tree_iters, pcs_files): if 'thumbs' not in pcs_file: continue elif 'url2' in pcs_file['thumbs']: key = 'url2' elif 'url1' in pcs_file['thumbs']: key = 'url1' elif 'url3' in pcs_file['thumbs']: key = 'url3' else: continue fs_id = pcs_file['fs_id'] url = pcs_file['thumbs'][key] filepath = os.path.join(dir_name, 'share-{0}.jpg'.format(fs_id)) if os.path.exists(filepath) and os.path.getsize(filepath): GLib.idle_add(update_image, filepath, tree_iter) elif not url or len(url) < 10: logger.warn('update_share_image: failed to get url %s' % url) else: status = dump_image(url, filepath) if status: GLib.idle_add(update_image, filepath, tree_iter)
[ "def", "update_share_image", "(", "liststore", ",", "tree_iters", ",", "col", ",", "large_col", ",", "pcs_files", ",", "dir_name", ",", "icon_size", ",", "large_icon_size", ")", ":", "def", "update_image", "(", "filepath", ",", "tree_iter", ")", ":", "try", ...
37.763636
16.127273
def urlread(url, encoding='utf8'): """ Read the content of an URL. Parameters ---------- url : str Returns ------- content : str """ try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen response = urlopen(url) content = response.read() content = content.decode(encoding) return content
[ "def", "urlread", "(", "url", ",", "encoding", "=", "'utf8'", ")", ":", "try", ":", "from", "urllib", ".", "request", "import", "urlopen", "except", "ImportError", ":", "from", "urllib2", "import", "urlopen", "response", "=", "urlopen", "(", "url", ")", ...
18.95
17.95
def add_densities(density1, density2): """ Method to sum two densities. Args: density1: First density. density2: Second density. Returns: Dict of {spin: density}. """ return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()}
[ "def", "add_densities", "(", "density1", ",", "density2", ")", ":", "return", "{", "spin", ":", "np", ".", "array", "(", "density1", "[", "spin", "]", ")", "+", "np", ".", "array", "(", "density2", "[", "spin", "]", ")", "for", "spin", "in", "densi...
23.923077
15.461538
def get_word(value): """word = atom / quoted-string Either atom or quoted-string may start with CFWS. We have to peel off this CFWS first to determine which type of word to parse. Afterward we splice the leading CFWS, if any, into the parsed sub-token. If neither an atom or a quoted-string is found before the next special, a HeaderParseError is raised. The token returned is either an Atom or a QuotedString, as appropriate. This means the 'word' level of the formal grammar is not represented in the parse tree; this is because having that extra layer when manipulating the parse tree is more confusing than it is helpful. """ if value[0] in CFWS_LEADER: leader, value = get_cfws(value) else: leader = None if value[0]=='"': token, value = get_quoted_string(value) elif value[0] in SPECIALS: raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' " "but found '{}'".format(value)) else: token, value = get_atom(value) if leader is not None: token[:0] = [leader] return token, value
[ "def", "get_word", "(", "value", ")", ":", "if", "value", "[", "0", "]", "in", "CFWS_LEADER", ":", "leader", ",", "value", "=", "get_cfws", "(", "value", ")", "else", ":", "leader", "=", "None", "if", "value", "[", "0", "]", "==", "'\"'", ":", "t...
37.666667
22.566667
def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation): """Bottleneck block with identity short-cut. Args: cnn: the network to append bottleneck blocks. depth: the number of output filters for this bottleneck block. depth_bottleneck: the number of bottleneck filters for this block. stride: Stride used in the first layer of the bottleneck block. pre_activation: use pre_activation structure used in v2 or not. """ if pre_activation: bottleneck_block_v2(cnn, depth, depth_bottleneck, stride) else: bottleneck_block_v1(cnn, depth, depth_bottleneck, stride)
[ "def", "bottleneck_block", "(", "cnn", ",", "depth", ",", "depth_bottleneck", ",", "stride", ",", "pre_activation", ")", ":", "if", "pre_activation", ":", "bottleneck_block_v2", "(", "cnn", ",", "depth", ",", "depth_bottleneck", ",", "stride", ")", "else", ":"...
43.928571
23.285714
def update_translations(condition=None): """ Updates FieldTranslations table """ if condition is None: condition = {} # Number of updated translations num_translations = 0 # Module caching FieldTranslation._init_module_cache() # Current languages dict LANGUAGES = dict(lang for lang in MODELTRANSLATION_LANG_CHOICES) if settings.LANGUAGE_CODE in LANGUAGES: del LANGUAGES[settings.LANGUAGE_CODE] # For each module, we are going to update the translations for key in FieldTranslation._modules.keys(): module = FieldTranslation._modules[key] # Class of the module clsmembers = inspect.getmembers(sys.modules[key], inspect.isclass) for cls in clsmembers: cls = cls[1] # If the model has in Meta "translatable_fields", we insert this fields if hasattr(cls,"_meta") and not cls._meta.abstract and hasattr(cls._meta,"translatable_fields") and len(cls._meta.translatable_fields)>0: objects = cls.objects.filter(**condition) # For each object, language and field are updated for obj in objects: for lang in LANGUAGES.keys(): for field in cls._meta.translatable_fields: if FieldTranslation.update(obj=obj, field=field, lang=lang, context=""): num_translations += 1 return num_translations
[ "def", "update_translations", "(", "condition", "=", "None", ")", ":", "if", "condition", "is", "None", ":", "condition", "=", "{", "}", "# Number of updated translations", "num_translations", "=", "0", "# Module caching", "FieldTranslation", ".", "_init_module_cache"...
33.105263
20.315789
def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd))
[ "def", "netbsd_interfaces", "(", ")", ":", "# NetBSD versions prior to 8.0 can still use linux_interfaces()", "if", "LooseVersion", "(", "os", ".", "uname", "(", ")", "[", "2", "]", ")", "<", "LooseVersion", "(", "'8.0'", ")", ":", "return", "linux_interfaces", "(...
37.529412
21.176471
def not_(self, *query_expressions): ''' Add a $not expression to the query, negating the query expressions given. **Examples**: ``query.not_(SomeDocClass.age <= 18)`` becomes ``{'age' : { '$not' : { '$gt' : 18 } }}`` :param query_expressions: Instances of :class:`ommongo.query_expression.QueryExpression` ''' for qe in query_expressions: self.filter(qe.not_()) return self
[ "def", "not_", "(", "self", ",", "*", "query_expressions", ")", ":", "for", "qe", "in", "query_expressions", ":", "self", ".", "filter", "(", "qe", ".", "not_", "(", ")", ")", "return", "self" ]
40.818182
30.454545
def encrypt(key, message): '''encrypt leverages KMS encrypt and base64-encode encrypted blob More info on KMS encrypt API: https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html ''' try: ret = kms.encrypt(KeyId=key, Plaintext=message) encrypted_data = base64.encodestring(ret.get('CiphertextBlob')) except Exception as e: # returns http 500 back to user and log error details in Cloudwatch Logs raise Exception("Unable to encrypt data: ", e) return encrypted_data.decode()
[ "def", "encrypt", "(", "key", ",", "message", ")", ":", "try", ":", "ret", "=", "kms", ".", "encrypt", "(", "KeyId", "=", "key", ",", "Plaintext", "=", "message", ")", "encrypted_data", "=", "base64", ".", "encodestring", "(", "ret", ".", "get", "(",...
38.785714
24.785714
def http_exception(channel, title): """ Creates an embed UI containing the 'too long' error message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed Returns: ui (ui_embed.UI): The embed UI object """ # Create embed UI object gui = ui_embed.UI( channel, "Too much help", "{} is too helpful! Try trimming some of the help messages.".format(title), modulename=modulename ) return gui
[ "def", "http_exception", "(", "channel", ",", "title", ")", ":", "# Create embed UI object", "gui", "=", "ui_embed", ".", "UI", "(", "channel", ",", "\"Too much help\"", ",", "\"{} is too helpful! Try trimming some of the help messages.\"", ".", "format", "(", "title", ...
24.619048
22.619048
def ndarray_fs(shape, dtype, location, lock, readonly=False, order='F', **kwargs): """Emulate shared memory using the filesystem.""" dbytes = np.dtype(dtype).itemsize nbytes = Vec(*shape).rectVolume() * dbytes directory = mkdir(EMULATED_SHM_DIRECTORY) filename = os.path.join(directory, location) if lock: lock.acquire() exists = os.path.exists(filename) size = 0 if not exists else os.path.getsize(filename) if readonly and not exists: raise SharedMemoryReadError(filename + " has not been allocated. Requested " + str(nbytes) + " bytes.") elif readonly and size != nbytes: raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format( filename, size, nbytes )) if exists: if size > nbytes: with open(filename, 'wb') as f: os.ftruncate(f.fileno(), nbytes) elif size < nbytes: # too small? just remake it below # if we were being more efficient # we could just append zeros os.unlink(filename) exists = os.path.exists(filename) if not exists: blocksize = 1024 * 1024 * 10 * dbytes steps = int(math.ceil(float(nbytes) / float(blocksize))) total = 0 with open(filename, 'wb') as f: for i in range(0, steps): write_bytes = min(blocksize, nbytes - total) f.write(b'\x00' * write_bytes) total += blocksize if lock: lock.release() with open(filename, 'r+b') as f: array_like = mmap.mmap(f.fileno(), 0) # map entire file renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs) renderbuffer.setflags(write=(not readonly)) return array_like, renderbuffer
[ "def", "ndarray_fs", "(", "shape", ",", "dtype", ",", "location", ",", "lock", ",", "readonly", "=", "False", ",", "order", "=", "'F'", ",", "*", "*", "kwargs", ")", ":", "dbytes", "=", "np", ".", "dtype", "(", "dtype", ")", ".", "itemsize", "nbyte...
32.647059
20.705882
def fqname_to_id(self, fq_name, type): """ Return uuid for fq_name :param fq_name: resource fq name :type fq_name: FQName :param type: resource type :type type: str :rtype: UUIDv4 str :raises HttpError: fq_name not found """ data = { "type": type, "fq_name": list(fq_name) } return self.post_json(self.make_url("/fqname-to-id"), data)["uuid"]
[ "def", "fqname_to_id", "(", "self", ",", "fq_name", ",", "type", ")", ":", "data", "=", "{", "\"type\"", ":", "type", ",", "\"fq_name\"", ":", "list", "(", "fq_name", ")", "}", "return", "self", ".", "post_json", "(", "self", ".", "make_url", "(", "\...
26.352941
14.823529
def get_user(self, login=github.GithubObject.NotSet): """ :calls: `GET /users/:user <http://developer.github.com/v3/users>`_ or `GET /user <http://developer.github.com/v3/users>`_ :param login: string :rtype: :class:`github.NamedUser.NamedUser` """ assert login is github.GithubObject.NotSet or isinstance(login, (str, unicode)), login if login is github.GithubObject.NotSet: return AuthenticatedUser.AuthenticatedUser(self.__requester, {}, {"url": "/user"}, completed=False) else: headers, data = self.__requester.requestJsonAndCheck( "GET", "/users/" + login ) return github.NamedUser.NamedUser(self.__requester, headers, data, completed=True)
[ "def", "get_user", "(", "self", ",", "login", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "login", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "login", ",", "(", "str", ",", "unicode", ")", ")...
51.666667
27.666667
def _create_ret_object(self, status=SUCCESS, data=None, error=False, error_message=None, error_cause=None): """ Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values """ ret = {} if status == self.FAILURE: ret['status'] = self.FAILURE else: ret['status'] = self.SUCCESS ret['data'] = data if error: ret['error'] = {} if error_message is not None: ret['error']['message'] = error_message if error_cause is not None: ret['error']['cause'] = error_cause else: ret['error'] = None return ret
[ "def", "_create_ret_object", "(", "self", ",", "status", "=", "SUCCESS", ",", "data", "=", "None", ",", "error", "=", "False", ",", "error_message", "=", "None", ",", "error_cause", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "status", "==", "...
34.892857
14.464286
def list_namespaced_job(self, namespace, **kwargs): # noqa: E501 """list_namespaced_job # noqa: E501 list or watch objects of kind Job # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1JobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501 return data
[ "def", "list_namespaced_job", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "l...
161
132.566667