text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _configure_logger(cls, simple_name, log_dest, detail_level, log_filename, connection, propagate): # pylint: disable=line-too-long """ Configure the pywbem loggers and optionally activate WBEM connections for logging and setting a log detail level. Parameters: simple_name (:term:`string`): Simple name (ex. `'api'`) of the single pywbem logger this method should affect, or `'all'` to affect all pywbem loggers. Must be one of the strings in :data:`~pywbem._logging.LOGGER_SIMPLE_NAMES`. log_dest (:term:`string`): Log destination for the affected pywbem loggers, controlling the configuration of its Python logging parameters (log handler, message format, and log level). If it is a :term:`string`, it must be one of the strings in :data:`~pywbem._logging.LOG_DESTINATIONS` and the Python logging parameters of the loggers will be configured accordingly for their log handler, message format, and with a logging level of :attr:`py:logging.DEBUG`. If `None`, the Python logging parameters of the loggers will not be changed. detail_level (:term:`string` or :class:`int` or `None`): Detail level for the data in each log record that is generated by the affected pywbem loggers. If it is a :term:`string`, it must be one of the strings in :data:`~pywbem._logging.LOG_DETAIL_LEVELS` and the loggers will be configured for the corresponding detail level. If it is an :class:`int`, it defines the maximum size of the log records created and the loggers will be configured to output all available information up to that size. If `None`, the detail level configuration will not be changed. log_filename (:term:`string`): Path name of the log file (required if the log destination is `'file'`; otherwise ignored). connection (:class:`~pywbem.WBEMConnection` or :class:`py:bool` or `None`): WBEM connection(s) that should be affected for activation and for setting the detail level. If it is a :class:`py:bool`, the information for activating logging and for the detail level of the affected loggers will be stored for use by subsequently created :class:`~pywbem.WBEMConnection` objects. A value of `True` will store the information to activate the connections for logging, and will add the detail level for the logger(s). A value of `False` will reset the stored information for future connections to be deactivated with no detail levels specified. If it is a :class:`~pywbem.WBEMConnection` object, logging will be activated for that WBEM connection only and the specified detail level will be set for the affected pywbem loggers on the connection. If `None`, no WBEM connection will be activated for logging. propagate (:class:`py:bool`): Flag controlling whether the affected pywbem logger should propagate log events to its parent loggers. Raises: ValueError: Invalid input parameters (loggers remain unchanged). """ # noqa: E501 # pylint: enable=line-too-long if simple_name == 'all': for name in ['api', 'http']: cls._configure_logger(name, log_dest=log_dest, detail_level=detail_level, log_filename=log_filename, connection=connection, propagate=propagate) return if simple_name == 'api': logger_name = LOGGER_API_CALLS_NAME elif simple_name == 'http': logger_name = LOGGER_HTTP_NAME else: raise ValueError( _format("Invalid simple logger name: {0!A}; must be one of: " "{1!A}", simple_name, LOGGER_SIMPLE_NAMES)) handler = cls._configure_logger_handler(log_dest, log_filename) detail_level = cls._configure_detail_level(detail_level) cls._activate_logger(logger_name, simple_name, detail_level, handler, connection, propagate)
[ "def", "_configure_logger", "(", "cls", ",", "simple_name", ",", "log_dest", ",", "detail_level", ",", "log_filename", ",", "connection", ",", "propagate", ")", ":", "# pylint: disable=line-too-long", "# noqa: E501", "# pylint: enable=line-too-long", "if", "simple_name", ...
43.764706
26.372549
def get_success_url(self): """Reverses the ``redis_metric_aggregate_detail`` URL using ``self.metric_slugs`` as an argument.""" slugs = '+'.join(self.metric_slugs) url = reverse('redis_metric_aggregate_detail', args=[slugs]) # Django 1.6 quotes reversed URLs, which changes + into %2B. We want # want to keep the + in the url (it's ok according to RFC 1738) # https://docs.djangoproject.com/en/1.6/releases/1.6/#quoting-in-reverse return url.replace("%2B", "+")
[ "def", "get_success_url", "(", "self", ")", ":", "slugs", "=", "'+'", ".", "join", "(", "self", ".", "metric_slugs", ")", "url", "=", "reverse", "(", "'redis_metric_aggregate_detail'", ",", "args", "=", "[", "slugs", "]", ")", "# Django 1.6 quotes reversed URL...
57.444444
17.111111
def parse_loops_file(self, contents, ignore_whitespace = True, ignore_errors = False): '''This parser is forgiving and allows leading whitespace.''' for l in [l for l in contents.strip().split('\n') if l]: try: if ignore_whitespace: l = l.strip() tokens = l.split() if len(tokens) < 3: raise RosettaFileParsingException('Lines in a loops file must have at least three entries.') if len(tokens) < 4: tokens.append(None) self.data.append(self.parse_loop_line(tokens)) except: if ignore_errors: continue else: raise
[ "def", "parse_loops_file", "(", "self", ",", "contents", ",", "ignore_whitespace", "=", "True", ",", "ignore_errors", "=", "False", ")", ":", "for", "l", "in", "[", "l", "for", "l", "in", "contents", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ...
44
18.705882
def add_user_to_group(iam_client, user, group, quiet = False): """ Add an IAM user to an IAM group :param iam_client: :param group: :param user: :param user_info: :param dry_run: :return: """ if not quiet: printInfo('Adding user to group %s...' % group) iam_client.add_user_to_group(GroupName = group, UserName = user)
[ "def", "add_user_to_group", "(", "iam_client", ",", "user", ",", "group", ",", "quiet", "=", "False", ")", ":", "if", "not", "quiet", ":", "printInfo", "(", "'Adding user to group %s...'", "%", "group", ")", "iam_client", ".", "add_user_to_group", "(", "GroupN...
25.571429
19
def getDBusEnvEndpoints(reactor, client=True): """ Creates endpoints from the DBUS_SESSION_BUS_ADDRESS environment variable @rtype: C{list} of L{twisted.internet.interfaces.IStreamServerEndpoint} @returns: A list of endpoint instances """ env = os.environ.get('DBUS_SESSION_BUS_ADDRESS', None) if env is None: raise Exception('DBus Session environment variable not set') return getDBusEndpoints(reactor, env, client)
[ "def", "getDBusEnvEndpoints", "(", "reactor", ",", "client", "=", "True", ")", ":", "env", "=", "os", ".", "environ", ".", "get", "(", "'DBUS_SESSION_BUS_ADDRESS'", ",", "None", ")", "if", "env", "is", "None", ":", "raise", "Exception", "(", "'DBus Session...
37.25
19.583333
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: self._value = self._load(self._path) return self._value
[ "def", "value", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_value\"", ")", "and", "self", ".", "_path", "is", "not", "None", ":", "self", ".", "_value", "=", "self", ".", "_load", "(", "self", ".", "_path", ")", "return", ...
34.5
12
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "seconds_elapsed", "<", "2e-6", "or", "pbar", ".", "currval", "<", "2e-6", ":", "scaled", "=", "bitmath", ".", "Byte", "(", ")", "else", ":", "speed", "=", "pbar", ".", "currva...
36
20.615385
def _GroupActions(action,group,alias,location): """Applies group level actions. :param action: the server action url to exec against :param group: group name :param alias: short code for a particular account. If none will use account's default alias :param location: datacenter location. If none will use account's default alias """ if alias is None: alias = clc.v1.Account.GetAlias() if location is None: location = clc.v1.Account.GetLocation() groups_uuid = Group.GetGroupUUID(group,alias,location) r = clc.v1.API.Call('post','Group/%sHardwareGroup' % (action), {'UUID': groups_uuid, 'AccountAlias': alias }) return(r)
[ "def", "_GroupActions", "(", "action", ",", "group", ",", "alias", ",", "location", ")", ":", "if", "alias", "is", "None", ":", "alias", "=", "clc", ".", "v1", ".", "Account", ".", "GetAlias", "(", ")", "if", "location", "is", "None", ":", "location"...
45.357143
25.928571
def update_exc(exc, msg, before=True, separator="\n"): """ Adds additional text to an exception's error message. The new text will be added before the existing text by default; to append it after the original text, pass False to the `before` parameter. By default the old and new text will be separated by a newline. If you wish to use a different separator, pass that as the `separator` parameter. """ emsg = exc.message if before: parts = (msg, separator, emsg) else: parts = (emsg, separator, msg) new_msg = "%s%s%s" % parts new_args = (new_msg, ) + exc.args[1:] exc.message = new_msg exc.args = new_args return exc
[ "def", "update_exc", "(", "exc", ",", "msg", ",", "before", "=", "True", ",", "separator", "=", "\"\\n\"", ")", ":", "emsg", "=", "exc", ".", "message", "if", "before", ":", "parts", "=", "(", "msg", ",", "separator", ",", "emsg", ")", "else", ":",...
33.85
19.85
def unique_email_validator(form, field): """ Username must be unique. This validator may NOT be customized.""" user_manager = current_app.user_manager if not user_manager.email_is_available(field.data): raise ValidationError(_('This Email is already in use. Please try another one.'))
[ "def", "unique_email_validator", "(", "form", ",", "field", ")", ":", "user_manager", "=", "current_app", ".", "user_manager", "if", "not", "user_manager", ".", "email_is_available", "(", "field", ".", "data", ")", ":", "raise", "ValidationError", "(", "_", "(...
60.2
13.6
def zmeshgrid(d): """ Returns a meshgrid like np.meshgrid but in z-order :param d: you'll get 4**d nodes in meshgrid :return: xx, yy in z-order """ lin = xfun(2, d) one = ones(2, d) xx = zkronv(lin, one) yy = zkronv(one, lin) return xx, yy
[ "def", "zmeshgrid", "(", "d", ")", ":", "lin", "=", "xfun", "(", "2", ",", "d", ")", "one", "=", "ones", "(", "2", ",", "d", ")", "xx", "=", "zkronv", "(", "lin", ",", "one", ")", "yy", "=", "zkronv", "(", "one", ",", "lin", ")", "return", ...
20.692308
17.461538
def add_expect_string_healthcheck(self, expect_string): """Inserts a new healthckeck_expect with only expect_string. :param expect_string: expect_string. :return: Dictionary with the following structure: :: {'healthcheck_expect': {'id': < id >}} :raise InvalidParameterError: The value of expect_string is invalid. :raise HealthCheckExpectJaCadastradoError: There is already a healthcheck_expect registered with the same data. :raise HealthCheckExpectNaoExisteError: Healthcheck_expect not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ healthcheck_map = dict() healthcheck_map['expect_string'] = expect_string url = 'healthcheckexpect/add/expect_string/' code, xml = self.submit({'healthcheck': healthcheck_map}, 'POST', url) return self.response(code, xml)
[ "def", "add_expect_string_healthcheck", "(", "self", ",", "expect_string", ")", ":", "healthcheck_map", "=", "dict", "(", ")", "healthcheck_map", "[", "'expect_string'", "]", "=", "expect_string", "url", "=", "'healthcheckexpect/add/expect_string/'", "code", ",", "xml...
36.037037
28.555556
def export(self): """Returns requirements XML.""" top = self._top_element() properties = self._properties_element(top) self._fill_requirements(top) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
[ "def", "export", "(", "self", ")", ":", "top", "=", "self", ".", "_top_element", "(", ")", "properties", "=", "self", ".", "_properties_element", "(", "top", ")", "self", ".", "_fill_requirements", "(", "top", ")", "self", ".", "_fill_lookup_prop", "(", ...
36.428571
6.857143
def setViewsFromUiAutomatorDump(self, received): ''' Sets L{self.views} to the received value parsing the received XML. @type received: str @param received: the string received from the I{UI Automator} ''' if not received or received == "": raise ValueError("received is empty") self.views = [] ''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. ''' self.__parseTreeFromUiAutomatorDump(received) if DEBUG: print >>sys.stderr, "there are %d views in this dump" % len(self.views)
[ "def", "setViewsFromUiAutomatorDump", "(", "self", ",", "received", ")", ":", "if", "not", "received", "or", "received", "==", "\"\"", ":", "raise", "ValueError", "(", "\"received is empty\"", ")", "self", ".", "views", "=", "[", "]", "''' The list of Views repr...
44.466667
29.933333
def lonlat2xyz(lon, lat): """ Convert lon / lat (radians) for the spherical triangulation into x,y,z on the unit sphere """ lons = np.array(lon) lats = np.array(lat) xs = np.cos(lats) * np.cos(lons) ys = np.cos(lats) * np.sin(lons) zs = np.sin(lats) return xs, ys, zs
[ "def", "lonlat2xyz", "(", "lon", ",", "lat", ")", ":", "lons", "=", "np", ".", "array", "(", "lon", ")", "lats", "=", "np", ".", "array", "(", "lat", ")", "xs", "=", "np", ".", "cos", "(", "lats", ")", "*", "np", ".", "cos", "(", "lons", ")...
21.214286
18.928571
def sync_resources(): """Sync the client's resources with the Lastuser server""" print("Syncing resources with Lastuser...") resources = manager.app.lastuser.sync_resources()['results'] for rname, resource in six.iteritems(resources): if resource['status'] == 'error': print("Error for %s: %s" % (rname, resource['error'])) else: print("Resource %s %s..." % (rname, resource['status'])) for aname, action in six.iteritems(resource['actions']): if action['status'] == 'error': print("\tError for %s/%s: %s" % (rname, aname, action['error'])) else: print("\tAction %s/%s %s..." % (rname, aname, resource['status'])) print("Resources synced...")
[ "def", "sync_resources", "(", ")", ":", "print", "(", "\"Syncing resources with Lastuser...\"", ")", "resources", "=", "manager", ".", "app", ".", "lastuser", ".", "sync_resources", "(", ")", "[", "'results'", "]", "for", "rname", ",", "resource", "in", "six",...
48.25
21
def last_activity_time(self): """获取用户最后一次活动的时间 :return: 用户最后一次活动的时间,返回值为 unix 时间戳 :rtype: int """ self._make_soup() act = self.soup.find( 'div', class_='zm-profile-section-item zm-item clearfix') return int(act['data-time']) if act is not None else -1
[ "def", "last_activity_time", "(", "self", ")", ":", "self", ".", "_make_soup", "(", ")", "act", "=", "self", ".", "soup", ".", "find", "(", "'div'", ",", "class_", "=", "'zm-profile-section-item zm-item clearfix'", ")", "return", "int", "(", "act", "[", "'...
31.1
15.2
def load_preseed(): """ Update JobPriority information from preseed.json The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date The expiration_date field defaults to 2 weeks when inserted in the table The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire The default priority is 1, however, if we want to force coalescing we can do that The fields buildtype, testtype and platform can have * which makes ut match all flavors of the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests """ if not JobPriority.objects.exists(): return preseed = preseed_data() for job in preseed: queryset = JobPriority.objects.all() for field in ('testtype', 'buildtype', 'platform'): if job[field] != '*': queryset = queryset.filter(**{field: job[field]}) # Deal with the case where we have a new entry in preseed if not queryset: create_new_entry(job) else: # We can have wildcards, so loop on all returned values in data for jp in queryset: process_job_priority(jp, job)
[ "def", "load_preseed", "(", ")", ":", "if", "not", "JobPriority", ".", "objects", ".", "exists", "(", ")", ":", "return", "preseed", "=", "preseed_data", "(", ")", "for", "job", "in", "preseed", ":", "queryset", "=", "JobPriority", ".", "objects", ".", ...
43.357143
25.535714
def clear_further_steps(self): """Clear all further steps in order to properly calculate the prev step """ self.parent.step_kw_layermode.lstLayerModes.clear() self.parent.step_kw_unit.lstUnits.clear() self.parent.step_kw_field.lstFields.clear() self.parent.step_kw_classification.lstClassifications.clear()
[ "def", "clear_further_steps", "(", "self", ")", ":", "self", ".", "parent", ".", "step_kw_layermode", ".", "lstLayerModes", ".", "clear", "(", ")", "self", ".", "parent", ".", "step_kw_unit", ".", "lstUnits", ".", "clear", "(", ")", "self", ".", "parent", ...
49.714286
11.142857
def stepfun(n, d=None, center=1, direction=1): """ Create TT-vector for Heaviside step function :math:`\chi(x - x_0)`. Heaviside step function is defined as .. math:: \chi(x) = \\left\{ \\begin{array}{l} 1 \mbox{ when } x \ge 0, \\\\ 0 \mbox{ when } x < 0. \\end{array} \\right. For negative value of ``direction`` :math:`\chi(x_0 - x)` is approximated. """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size N = _np.prod(n0) if center >= N and direction < 0 or center <= 0 and direction > 0: return ones(n0) if center <= 0 and direction < 0 or center >= N and direction > 0: raise ValueError( "Heaviside function with specified center and direction gives zero tensor!") if direction > 0: center = N - center cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] def gen_notx(currcind, currn): return [0.0] * (currn - currcind) + [1.0] * currcind def gen_notx_rev(currcind, currn): return [1.0] * currcind + [0.0] * (currn - currcind) def gen_x(currcind, currn): result = [0.0] * currn result[currn - currcind - 1] = 1.0 return result def gen_x_rev(currcind, currn): result = [0.0] * currn result[currcind] = 1.0 return result if direction > 0: x = gen_x notx = gen_notx else: x = gen_x_rev notx = gen_notx_rev crs = [] prevrank = 1 for i in range(d)[::-1]: break_further = max([0] + cind[:i]) nextrank = 2 if break_further else 1 one = [1] * n0[i] cr = _np.zeros([nextrank, n0[i], prevrank], dtype=_np.float) tempx = x(cind[i], n0[i]) tempnotx = notx(cind[i], n0[i]) # high-conditional magic if not break_further: if cind[i]: if prevrank > 1: cr[0, :, 0] = one cr[0, :, 1] = tempnotx else: cr[0, :, 0] = tempnotx else: cr[0, :, 0] = one else: if prevrank > 1: cr[0, :, 0] = one if cind[i]: cr[0, :, 1] = tempnotx cr[1, :, 1] = tempx else: cr[1, :, 1] = tempx else: if cind[i]: cr[0, :, 0] = tempnotx cr[1, :, 0] = tempx else: nextrank = 1 cr = cr[:1, :, :] cr[0, :, 0] = tempx prevrank = nextrank crs.append(cr) return _vector.vector.from_list(crs[::-1])
[ "def", "stepfun", "(", "n", ",", "d", "=", "None", ",", "center", "=", "1", ",", "direction", "=", "1", ")", ":", "if", "isinstance", "(", "n", ",", "six", ".", "integer_types", ")", ":", "n", "=", "[", "n", "]", "if", "d", "is", "None", ":",...
29.688172
17.548387
def send_vdp_port_event_internal(self, port_uuid, mac, net_uuid, segmentation_id, status, oui): """Send vNIC UP/Down event to VDP. :param port_uuid: a ovslib.VifPort object. :mac: MAC address of the VNIC :param net_uuid: the net_uuid this port is to be associated with. :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' :param status: Type of port event. 'up' or 'down' :oui: OUI Parameters """ lldpad_port = self.lldpad_info if not lldpad_port: fail_reason = "There is no LLDPad port available." LOG.error("%s", fail_reason) return {'result': False, 'fail_reason': fail_reason} if status == 'up': if self.vdp_mode == constants.VDP_SEGMENT_MODE: port_name = self.ext_br_obj.get_ofport_name(port_uuid) if port_name is None: fail_reason = "Unknown portname for uuid %s" % (port_uuid) LOG.error("%s", fail_reason) return {'result': False, 'fail_reason': fail_reason} LOG.info("Status up: portname for uuid %(uuid)s is %(port)s", {'uuid': port_uuid, 'port': port_name}) ret = self.port_up_segment_mode(lldpad_port, port_name, port_uuid, mac, net_uuid, segmentation_id, oui) else: if self.vdp_mode == constants.VDP_SEGMENT_MODE: LOG.info("Status down for portname uuid %s", port_uuid) ret = self.port_down_segment_mode(lldpad_port, port_uuid, mac, net_uuid, segmentation_id, oui) return ret
[ "def", "send_vdp_port_event_internal", "(", "self", ",", "port_uuid", ",", "mac", ",", "net_uuid", ",", "segmentation_id", ",", "status", ",", "oui", ")", ":", "lldpad_port", "=", "self", ".", "lldpad_info", "if", "not", "lldpad_port", ":", "fail_reason", "=",...
51.361111
21.888889
def single_random_manipulation_low(molecule, manipulations): """Return a randomized copy of the molecule, without the nonbond check.""" manipulation = sample(manipulations, 1)[0] coordinates = molecule.coordinates.copy() transformation = manipulation.apply(coordinates) return molecule.copy_with(coordinates=coordinates), transformation
[ "def", "single_random_manipulation_low", "(", "molecule", ",", "manipulations", ")", ":", "manipulation", "=", "sample", "(", "manipulations", ",", "1", ")", "[", "0", "]", "coordinates", "=", "molecule", ".", "coordinates", ".", "copy", "(", ")", "transformat...
50.142857
16.142857
def _query_filter(search, urlkwargs, definitions): """Ingest query filter in query.""" filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.filter(filter_) return (search, urlkwargs)
[ "def", "_query_filter", "(", "search", ",", "urlkwargs", ",", "definitions", ")", ":", "filters", ",", "urlkwargs", "=", "_create_filter_dsl", "(", "urlkwargs", ",", "definitions", ")", "for", "filter_", "in", "filters", ":", "search", "=", "search", ".", "f...
31.5
17.625
def snapshots_to_send(source_snaps, dest_snaps): """return pair of snapshots""" if len(source_snaps) == 0: raise AssertionError("No snapshots exist locally!") if len(dest_snaps) == 0: # nothing on the remote side, send everything return None, source_snaps[-1] last_remote = dest_snaps[-1] for snap in reversed(source_snaps): if snap == last_remote: # found a common snapshot return last_remote, source_snaps[-1] # sys.stderr.write("source:'{}', dest:'{}'".format(source_snaps, dest_snaps)) raise AssertionError("Latest snapshot on destination doesn't exist on source!")
[ "def", "snapshots_to_send", "(", "source_snaps", ",", "dest_snaps", ")", ":", "if", "len", "(", "source_snaps", ")", "==", "0", ":", "raise", "AssertionError", "(", "\"No snapshots exist locally!\"", ")", "if", "len", "(", "dest_snaps", ")", "==", "0", ":", ...
45.714286
12.714286
def delete(self): """ Removes a container that was created earlier. """ if not self.is_created(): LOG.debug("Container was not created. Skipping deletion") return try: self.docker_client.containers\ .get(self.id)\ .remove(force=True) # Remove a container, even if it is running except docker.errors.NotFound: # Container is already not there LOG.debug("Container with ID %s does not exist. Skipping deletion", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions. if not removal_in_progress: raise ex self.id = None
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "is_created", "(", ")", ":", "LOG", ".", "debug", "(", "\"Container was not created. Skipping deletion\"", ")", "return", "try", ":", "self", ".", "docker_client", ".", "containers", ".", "get...
36.76
20.44
def follow_topic(kafka_class, name, retry_interval=1, **kafka_init): """Dump each message from kafka topic to stdio.""" while True: try: client = kafka_class(**kafka_init) topic = client.topics[name] consumer = topic.get_simple_consumer(reset_offset_on_start=True) except Exception as e: if not should_try_kafka_again(e): raise with flushing(sys.stderr): print( 'Failed attempt to connect to Kafka. Will retry ...', file=sys.stderr) sleep(retry_interval) else: with flushing(sys.stdout): print('Connected to Kafka.') break dump = Dump() for message in consumer: with flushing(sys.stdout, sys.stderr): status = load(message.value) if status: dump(status)
[ "def", "follow_topic", "(", "kafka_class", ",", "name", ",", "retry_interval", "=", "1", ",", "*", "*", "kafka_init", ")", ":", "while", "True", ":", "try", ":", "client", "=", "kafka_class", "(", "*", "*", "kafka_init", ")", "topic", "=", "client", "....
34.653846
14.961538
def convertPossibleValues(val, possibleValues, invalidDefault, emptyValue=''): ''' convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None) ''' from .utils import tostr # If null, retain null if val is None: if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Convert to a string val = tostr(val).lower() # If empty string, same as null if val == '': if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Check if this is a valid value if val not in possibleValues: return _handleInvalid(invalidDefault) return val
[ "def", "convertPossibleValues", "(", "val", ",", "possibleValues", ",", "invalidDefault", ",", "emptyValue", "=", "''", ")", ":", "from", ".", "utils", "import", "tostr", "# If null, retain null", "if", "val", "is", "None", ":", "if", "emptyValue", "is", "EMPT...
32.844444
28.711111
def f_get_range(self, copy=True): """Returns a python iterable containing the exploration range. :param copy: If the range should be copied before handed over to avoid tempering with data Example usage: >>> param = Parameter('groupA.groupB.myparam',data=22, comment='I am a neat example') >>> param._explore([42,43,43]) >>> param.f_get_range() (42,43,44) :raises: TypeError: If parameter is not explored. """ if not self.f_has_range(): raise TypeError('Your parameter `%s` is not array, so cannot return array.' % self.v_full_name) elif copy: return self._explored_range[:] else: return self._explored_range
[ "def", "f_get_range", "(", "self", ",", "copy", "=", "True", ")", ":", "if", "not", "self", ".", "f_has_range", "(", ")", ":", "raise", "TypeError", "(", "'Your parameter `%s` is not array, so cannot return array.'", "%", "self", ".", "v_full_name", ")", "elif",...
31.75
22.875
def update_asset(self, asset_form=None): """Updates an existing asset. :param asset_form: the form containing the elements to be updated :type asset_form: ``osid.repository.AssetForm`` :raise: ``IllegalState`` -- ``asset_form`` already used in anupdate transaction :raise: ``InvalidArgument`` -- the form contains an invalid value :raise: ``NullArgument`` -- ``asset_form`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure :raise: ``Unsupported`` -- ``asset_form`` did not originate from ``get_asset_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ if asset_form is None: raise NullArgument() if not isinstance(asset_form, abc_repository_objects.AssetForm): raise InvalidArgument('argument type is not an AssetForm') if not asset_form.is_for_update(): raise InvalidArgument('form is for create only, not update') try: if self._forms[asset_form.get_id().get_identifier()] == UPDATED: raise IllegalState('form already used in an update transaction') except KeyError: raise Unsupported('form did not originate from this session') if not asset_form.is_valid(): raise InvalidArgument('one or more of the form elements is invalid') url_path = construct_url('assets', bank_id=self._catalog_idstr) try: result = self._put_request(url_path, asset_form._my_map) except Exception: raise # OperationFailed() self._forms[asset_form.get_id().get_identifier()] = UPDATED return objects.Asset(result)
[ "def", "update_asset", "(", "self", ",", "asset_form", "=", "None", ")", ":", "if", "asset_form", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "not", "isinstance", "(", "asset_form", ",", "abc_repository_objects", ".", "AssetForm", ")", ":", ...
48.216216
23.891892
def button_with_label(self, description, assistants=None): """ Function creates a button with lave. If assistant is specified then text is aligned """ btn = self.create_button() label = self.create_label(description) if assistants is not None: h_box = self.create_box(orientation=Gtk.Orientation.VERTICAL) h_box.pack_start(label, False, False, 0) label_ass = self.create_label( assistants, justify=Gtk.Justification.LEFT ) label_ass.set_alignment(0, 0) h_box.pack_start(label_ass, False, False, 12) btn.add(h_box) else: btn.add(label) return btn
[ "def", "button_with_label", "(", "self", ",", "description", ",", "assistants", "=", "None", ")", ":", "btn", "=", "self", ".", "create_button", "(", ")", "label", "=", "self", ".", "create_label", "(", "description", ")", "if", "assistants", "is", "not", ...
37.842105
13.105263
def retry(time_unit, multiplier, backoff_coefficient, max_delay, max_attempts, expiration_duration, enable_jitter): """ The retry function will keep retrying `task_to_try` until either: (1) it returns None, then retry() finishes (2) `max_attempts` is reached, then retry() raises an exception. (3) if retrying one more time will cause total wait time to go above: `expiration_duration`, then retry() raises an exception Beware that any exception raised by task_to_try won't get surfaced until (2) or (3) is satisfied. At step n, it sleeps for [0, delay), where delay is defined as the following: `delay = min(max_delay, multiplier * (backoff_coefficient ** (n - 1))) * time_unit` seconds Additionally, if you enable jitter, for each retry, the function will instead sleep for: random.random() * sleep, that is [0, sleep) seconds. :param time_unit: This field represents a fraction of a second, which is used as a multiplier to compute the amount of time to sleep. :type time_unit: float :param multiplier: The initial wait duration for the first retry. :type multiplier: float :param backoff_coefficient: the base value for exponential retry. :type backoff_coefficient: float :param max_delay: The maximum amount of time to wait per try. :type max_delay: float :param max_attempts: This method will retry up to this value. :type max_attempts: int :param expiration_duration: the maximum amount of time retry can wait. :type expiration_duration: float :param enable_jitter: Setting this to true will add jitter. :type enable_jitter: bool """ def deco_retry(task_to_try): @wraps(task_to_try) def retry_impl(*args, **kwargs): total_wait_time = 0 have_tried = 0 retry_errors = [] while have_tried < max_attempts: try: task_to_try(*args, **kwargs) return except Exception as e: retry_errors.append(e) going_to_sleep_for = min(max_delay, multiplier * (backoff_coefficient ** have_tried)) if enable_jitter: going_to_sleep_for = random.random() * going_to_sleep_for duration = going_to_sleep_for * time_unit if total_wait_time + duration > expiration_duration: raise RetryTimeoutException(task_to_try.__name__, have_tried, max_attempts, total_wait_time, multiplier, backoff_coefficient, enable_jitter, retry_errors) runtime_logger.warn('Retrying [{0}], going to sleep for {1} seconds, exception stacktrace:\n{2}' .format(task_to_try.__name__, duration, traceback.format_exc())) time.sleep(duration) total_wait_time += duration have_tried += 1 raise RetryTimeoutException(task_to_try.__name__, have_tried, max_attempts, total_wait_time, multiplier, backoff_coefficient, enable_jitter, retry_errors) return retry_impl return deco_retry
[ "def", "retry", "(", "time_unit", ",", "multiplier", ",", "backoff_coefficient", ",", "max_delay", ",", "max_attempts", ",", "expiration_duration", ",", "enable_jitter", ")", ":", "def", "deco_retry", "(", "task_to_try", ")", ":", "@", "wraps", "(", "task_to_try...
45.928571
29.128571
def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y): """Returns the effective precession spin argument for the smaller mass. This function assumes it's given spins of the secondary mass. """ q = q_from_mass1_mass2(mass1, mass2) a1 = 2 + 3 * q / 2 a2 = 2 + 3 / (2 * q) return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y)
[ "def", "xi2_from_mass1_mass2_spin2x_spin2y", "(", "mass1", ",", "mass2", ",", "spin2x", ",", "spin2y", ")", ":", "q", "=", "q_from_mass1_mass2", "(", "mass1", ",", "mass2", ")", "a1", "=", "2", "+", "3", "*", "q", "/", "2", "a2", "=", "2", "+", "3", ...
46.625
14.875
def list_all_free_shippings(cls, **kwargs): """List FreeShippings Return a list of FreeShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShipping] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_free_shippings_with_http_info(**kwargs) return data
[ "def", "list_all_free_shippings", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_free_shippings_with_http_info", ...
37.869565
15
def run_cmake(arg=""): """ Forcing to run cmake """ if ds.find_executable('cmake') is None: print "CMake is required to build zql" print "Please install cmake version >= 2.8 and re-run setup" sys.exit(-1) print "Configuring zql build with CMake.... " cmake_args = arg try: build_dir = op.join(op.split(__file__)[0], 'build') dd.mkpath(build_dir) os.chdir("build") ds.spawn(['cmake', '..'] + cmake_args.split()) ds.spawn(['make', 'clean']) ds.spawn(['make']) os.chdir("..") except ds.DistutilsExecError: print "Error while running cmake" print "run 'setup.py build --help' for build options" print "You may also try editing the settings in CMakeLists.txt file and re-running setup" sys.exit(-1)
[ "def", "run_cmake", "(", "arg", "=", "\"\"", ")", ":", "if", "ds", ".", "find_executable", "(", "'cmake'", ")", "is", "None", ":", "print", "\"CMake is required to build zql\"", "print", "\"Please install cmake version >= 2.8 and re-run setup\"", "sys", ".", "exit", ...
34
16.5
def _extract_spi_args(self, **kwargs): """ Given a set of keyword arguments, splits it into those relevant to SPI implementations and all the rest. SPI arguments are augmented with defaults and converted into the pin format (from the port/device format) if necessary. Returns a tuple of ``(spi_args, other_args)``. """ dev_defaults = { 'port': 0, 'device': 0, } default_hw = SPI_HARDWARE_PINS[dev_defaults['port']] pin_defaults = { 'clock_pin': default_hw['clock'], 'mosi_pin': default_hw['mosi'], 'miso_pin': default_hw['miso'], 'select_pin': default_hw['select'][dev_defaults['device']], } spi_args = { key: value for (key, value) in kwargs.items() if key in pin_defaults or key in dev_defaults } kwargs = { key: value for (key, value) in kwargs.items() if key not in spi_args } if not spi_args: spi_args = pin_defaults elif set(spi_args) <= set(pin_defaults): spi_args = { key: self.pi_info.to_gpio(spi_args.get(key, default)) for key, default in pin_defaults.items() } elif set(spi_args) <= set(dev_defaults): spi_args = { key: spi_args.get(key, default) for key, default in dev_defaults.items() } try: selected_hw = SPI_HARDWARE_PINS[spi_args['port']] except KeyError: raise SPIBadArgs( 'port %d is not a valid SPI port' % spi_args['port']) try: selected_hw['select'][spi_args['device']] except IndexError: raise SPIBadArgs( 'device must be in the range 0..%d' % len(selected_hw['select'])) spi_args = { key: value if key != 'select_pin' else selected_hw['select'][spi_args['device']] for key, value in pin_defaults.items() } else: raise SPIBadArgs( 'you must either specify port and device, or clock_pin, ' 'mosi_pin, miso_pin, and select_pin; combinations of the two ' 'schemes (e.g. port and clock_pin) are not permitted') return spi_args, kwargs
[ "def", "_extract_spi_args", "(", "self", ",", "*", "*", "kwargs", ")", ":", "dev_defaults", "=", "{", "'port'", ":", "0", ",", "'device'", ":", "0", ",", "}", "default_hw", "=", "SPI_HARDWARE_PINS", "[", "dev_defaults", "[", "'port'", "]", "]", "pin_defa...
39.803279
18.590164
def get_annotation_data_between_times(self, id_tier, start, end): """Gives the annotations within the times. When the tier contains reference annotations this will be returned, check :func:`get_ref_annotation_data_between_times` for the format. :param str id_tier: Name of the tier. :param int start: Start time of the annotation. :param int end: End time of the annotation. :returns: List of annotations within that time. :raises KeyError: If the tier is non existent. """ if self.tiers[id_tier][1]: return self.get_ref_annotation_data_between_times( id_tier, start, end) anns = ((self.timeslots[a[0]], self.timeslots[a[1]], a[2]) for a in self.tiers[id_tier][0].values()) return sorted(a for a in anns if a[1] >= start and a[0] <= end)
[ "def", "get_annotation_data_between_times", "(", "self", ",", "id_tier", ",", "start", ",", "end", ")", ":", "if", "self", ".", "tiers", "[", "id_tier", "]", "[", "1", "]", ":", "return", "self", ".", "get_ref_annotation_data_between_times", "(", "id_tier", ...
50.705882
17.705882
def rm_rf(path): """ Recursively (if needed) delete path. """ if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) elif os.path.lexists(path): os.remove(path)
[ "def", "rm_rf", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "elif", "os", ".", "path", ".", ...
25.875
9.875
def build_filtered_queryset(self, query, **kwargs): """ Build and return the fully-filtered queryset """ # Take the basic queryset qs = self.get_queryset() # filter it via the query conditions qs = qs.filter(self.get_queryset_filters(query)) return self.build_extra_filtered_queryset(qs, **kwargs)
[ "def", "build_filtered_queryset", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "# Take the basic queryset", "qs", "=", "self", ".", "get_queryset", "(", ")", "# filter it via the query conditions", "qs", "=", "qs", ".", "filter", "(", "self", ...
39.222222
9
def send_direct_message_new(self, messageobject): """ :reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-event.html """ headers, post_data = API._buildmessageobject(messageobject) return bind_api( api=self, path='/direct_messages/events/new.json', method='POST', require_auth=True )(self, post_data=post_data, headers=headers)
[ "def", "send_direct_message_new", "(", "self", ",", "messageobject", ")", ":", "headers", ",", "post_data", "=", "API", ".", "_buildmessageobject", "(", "messageobject", ")", "return", "bind_api", "(", "api", "=", "self", ",", "path", "=", "'/direct_messages/eve...
46
12.1
def predict_dims(self, q, dims_x, dims_y, dims_out, sigma=None, k=None): """Provide a prediction of q in the output space @param xq an array of float of length dim_x @param estimated_sigma if False (default), sigma_sq=self.sigma_sq, else it is estimated from the neighbor distances in self._weights(.) """ assert len(q) == len(dims_x) + len(dims_y) sigma_sq = self.sigma_sq if sigma is None else sigma*sigma k = k or self.k dists, index = self.dataset.nn_dims(q[:len(dims_x)], q[len(dims_x):], dims_x, dims_y, k=k) w = self._weights(dists, index, sigma_sq) Xq = np.array(np.append([1.0], q), ndmin = 2) X = np.array([np.append([1.0], self.dataset.get_dims(i, dims_x=dims_x, dims_y=dims_y)) for i in index]) Y = np.array([self.dataset.get_dims(i, dims=dims_out) for i in index]) W = np.diag(w) WX = np.dot(W, X) WXT = WX.T B = np.dot(np.linalg.pinv(np.dot(WXT, WX)),WXT) self.mat = np.dot(B, np.dot(W, Y)) Yq = np.dot(Xq, self.mat) return Yq.ravel()
[ "def", "predict_dims", "(", "self", ",", "q", ",", "dims_x", ",", "dims_y", ",", "dims_out", ",", "sigma", "=", "None", ",", "k", "=", "None", ")", ":", "assert", "len", "(", "q", ")", "==", "len", "(", "dims_x", ")", "+", "len", "(", "dims_y", ...
40.107143
27.642857
def check_nearby_preprocessor(impact_function): """Checker for the nearby preprocessor. :param impact_function: Impact function to check. :type impact_function: ImpactFunction :return: If the preprocessor can run. :rtype: bool """ hazard_key = layer_purpose_hazard['key'] earthquake_key = hazard_earthquake['key'] exposure_key = layer_purpose_exposure['key'] place_key = exposure_place['key'] if impact_function.hazard.keywords.get(hazard_key) == earthquake_key: if impact_function.exposure.keywords.get(exposure_key) == place_key: return True return False
[ "def", "check_nearby_preprocessor", "(", "impact_function", ")", ":", "hazard_key", "=", "layer_purpose_hazard", "[", "'key'", "]", "earthquake_key", "=", "hazard_earthquake", "[", "'key'", "]", "exposure_key", "=", "layer_purpose_exposure", "[", "'key'", "]", "place_...
35.882353
15.058824
def get_kwargs(**kwargs): """This method should be used in query functions where user can query on any number of fields >>> def get_instances(entity_id=NOTSET, my_field=NOTSET): >>> kwargs = CoyoteDb.get_kwargs(entity_id=entity_id, my_field=my_field) """ d = dict() for k, v in kwargs.iteritems(): if v is not NOTSET: d[k] = v return d
[ "def", "get_kwargs", "(", "*", "*", "kwargs", ")", ":", "d", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "v", "is", "not", "NOTSET", ":", "d", "[", "k", "]", "=", "v", "return", "d" ]
38.363636
18.545455
def db(ctx): """[GROUP] Database management operations""" from hfos import database database.initialize(ctx.obj['dbhost'], ctx.obj['dbname']) ctx.obj['db'] = database
[ "def", "db", "(", "ctx", ")", ":", "from", "hfos", "import", "database", "database", ".", "initialize", "(", "ctx", ".", "obj", "[", "'dbhost'", "]", ",", "ctx", ".", "obj", "[", "'dbname'", "]", ")", "ctx", ".", "obj", "[", "'db'", "]", "=", "da...
29.666667
18.666667
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'classifier_id') and self.classifier_id is not None: _dict['classifier_id'] = self.classifier_id if hasattr(self, 'url') and self.url is not None: _dict['url'] = self.url if hasattr(self, 'collection') and self.collection is not None: _dict['collection'] = [x._to_dict() for x in self.collection] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'classifier_id'", ")", "and", "self", ".", "classifier_id", "is", "not", "None", ":", "_dict", "[", "'classifier_id'", "]", "=", "self", ".", "classif...
48.8
20.1
def rename_document(self, old_path, new_path): """ Renames an already opened document (this will not rename the file, just update the file path and tab title). Use that function to update a file that has been renamed externally. :param old_path: old path (path of the widget to rename with ``new_path`` :param new_path: new path that will be used to rename the tab. """ to_rename = [] title = os.path.split(new_path)[1] for widget in self.widgets(include_clones=True): p = os.path.normpath(os.path.normcase(widget.file.path)) old_path = os.path.normpath(os.path.normcase(old_path)) if p == old_path: to_rename.append(widget) for widget in to_rename: tw = widget.parent_tab_widget widget.file._path = new_path tw.setTabText(tw.indexOf(widget), title)
[ "def", "rename_document", "(", "self", ",", "old_path", ",", "new_path", ")", ":", "to_rename", "=", "[", "]", "title", "=", "os", ".", "path", ".", "split", "(", "new_path", ")", "[", "1", "]", "for", "widget", "in", "self", ".", "widgets", "(", "...
41.727273
16.454545
def get_permissions_for_registration(self): """ Utilised by Wagtail's 'register_permissions' hook to allow permissions for a model to be assigned to groups in settings. This is only required if the model isn't a Page model, and isn't registered as a Snippet """ from wagtail.wagtailsnippets.models import SNIPPET_MODELS if not self.is_pagemodel and self.model not in SNIPPET_MODELS: return self.permission_helper.get_all_model_permissions() return Permission.objects.none()
[ "def", "get_permissions_for_registration", "(", "self", ")", ":", "from", "wagtail", ".", "wagtailsnippets", ".", "models", "import", "SNIPPET_MODELS", "if", "not", "self", ".", "is_pagemodel", "and", "self", ".", "model", "not", "in", "SNIPPET_MODELS", ":", "re...
54
19.8
def is_closed(self) -> Optional[bool]: """For Magnet Sensor; True if Closed, False if Open.""" if self._device_type is not None and self._device_type == DeviceType.DoorMagnet: return bool(self._current_status & 0x01) return None
[ "def", "is_closed", "(", "self", ")", "->", "Optional", "[", "bool", "]", ":", "if", "self", ".", "_device_type", "is", "not", "None", "and", "self", ".", "_device_type", "==", "DeviceType", ".", "DoorMagnet", ":", "return", "bool", "(", "self", ".", "...
52
16.6
def release(version): """Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. """ check_new_version(version) set_new_version(version) commit_new_version(version) set_git_tag(version)
[ "def", "release", "(", "version", ")", ":", "check_new_version", "(", "version", ")", "set_new_version", "(", "version", ")", "commit_new_version", "(", "version", ")", "set_git_tag", "(", "version", ")" ]
40.909091
22.727273
def MaskSolve(A, b, w=5, progress=True, niter=None): ''' Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked. ''' # Ensure we have choldate installed if cholupdate is None: log.info("Running the slow version of `MaskSolve`.") log.info("Install the `choldate` package for better performance.") log.info("https://github.com/rodluger/choldate") return MaskSolveSlow(A, b, w=w, progress=progress, niter=niter) # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Solve the first two steps explicitly. for n in range(2): mask = np.arange(n, w + n) A_ = np.delete(np.delete(A, mask, axis=0), mask, axis=1) b_ = np.delete(b, mask) U = cholesky(A_) X[n] = cho_solve((U, False), b_) # Iterate! for n in prange(1, niter - 1): # Update the data vector. b_[n] = b[n] # Remove a row. S33 = U[n + 1:, n + 1:] S23 = U[n, n + 1:] cholupdate(S33, S23) # Add a row. A12 = A[:n, n] A22 = A[n, n] A23 = A[n, n + w + 1:] S11 = U[:n, :n] S12 = solve_triangular(S11.T, A12, lower=True, check_finite=False, trans=0, overwrite_b=True) S22 = np.sqrt(A22 - np.dot(S12.T, S12)) S13 = U[:n, n + 1:] S23 = (A23 - np.dot(S12.T, S13)) / S22 choldowndate(S33, np.array(S23)) U[:n, n] = S12 U[n, n] = S22 U[n, n + 1:] = S23 U[n + 1:, n + 1:] = S33 # Now we can solve our linear equation X[n + 1] = cho_solve((U, False), b_) # Return the matrix return X
[ "def", "MaskSolve", "(", "A", ",", "b", ",", "w", "=", "5", ",", "progress", "=", "True", ",", "niter", "=", "None", ")", ":", "# Ensure we have choldate installed\r", "if", "cholupdate", "is", "None", ":", "log", ".", "info", "(", "\"Running the slow vers...
28.076923
20.076923
def iterkeys(self, match=None, count=1): """Return an iterator over the db's keys. ``match`` allows for filtering the keys by pattern. ``count`` allows for hint the minimum number of returns. >>> dc = Dictator() >>> dc['1'] = 'abc' >>> dc['2'] = 'def' >>> dc['3'] = 'ghi' >>> itr = dc.iterkeys() >>> type(itr) <type 'generator'> >>> list(reversed([item for item in itr])) ['1', '2', '3'] >>> dc.clear() :param match: pattern to filter keys :type match: str :param count: minimum number of returns :type count: int :return: iterator over key. :rtype: generator """ logger.debug('call iterkeys %s', match) if match is None: match = '*' for key in self._redis.scan_iter(match=match, count=count): yield key
[ "def", "iterkeys", "(", "self", ",", "match", "=", "None", ",", "count", "=", "1", ")", ":", "logger", ".", "debug", "(", "'call iterkeys %s'", ",", "match", ")", "if", "match", "is", "None", ":", "match", "=", "'*'", "for", "key", "in", "self", "....
31.642857
14.642857
def i2m(self, pkt, x): """Convert internal value to machine value""" if x is None: # Try to return zero if undefined x = self.h2i(pkt, 0) return x
[ "def", "i2m", "(", "self", ",", "pkt", ",", "x", ")", ":", "if", "x", "is", "None", ":", "# Try to return zero if undefined", "x", "=", "self", ".", "h2i", "(", "pkt", ",", "0", ")", "return", "x" ]
31.5
12.333333
def sample_batch_transitions(self, batch_size, forward_steps=1): """ Return indexes of next sample""" results = [] for i in range(self.num_envs): results.append(self.sample_frame_single_env(batch_size, forward_steps=forward_steps)) return np.stack(results, axis=-1)
[ "def", "sample_batch_transitions", "(", "self", ",", "batch_size", ",", "forward_steps", "=", "1", ")", ":", "results", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_envs", ")", ":", "results", ".", "append", "(", "self", ".", "sampl...
38
23
def model_sizes(m:nn.Module, size:tuple=(64,64))->Tuple[Sizes,Tensor,Hooks]: "Pass a dummy input through the model `m` to get the various sizes of activations." with hook_outputs(m) as hooks: x = dummy_eval(m, size) return [o.stored.shape for o in hooks]
[ "def", "model_sizes", "(", "m", ":", "nn", ".", "Module", ",", "size", ":", "tuple", "=", "(", "64", ",", "64", ")", ")", "->", "Tuple", "[", "Sizes", ",", "Tensor", ",", "Hooks", "]", ":", "with", "hook_outputs", "(", "m", ")", "as", "hooks", ...
54.8
20.8
def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True
[ "def", "unsubscribe", "(", "topic", ",", "subscription_arn", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=...
30.038462
27.961538
def copy_session(session: requests.Session) -> requests.Session: """Duplicates a requests.Session.""" new = requests.Session() new.cookies = requests.utils.cookiejar_from_dict(requests.utils.dict_from_cookiejar(session.cookies)) new.headers = session.headers.copy() return new
[ "def", "copy_session", "(", "session", ":", "requests", ".", "Session", ")", "->", "requests", ".", "Session", ":", "new", "=", "requests", ".", "Session", "(", ")", "new", ".", "cookies", "=", "requests", ".", "utils", ".", "cookiejar_from_dict", "(", "...
48.5
21.166667
def count_in_category(x='call_type', filter_dict=None, model=DEFAULT_MODEL, app=DEFAULT_APP, sort=True, limit=1000): """ Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts. >>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1) >>> len(x) == len(y) == 5 True >>> y[1] >= y[0] True """ sort = sort_prefix(sort) model = get_model(model, app) filter_dict = filter_dict or {} x = fuzzy.extractOne(str(x), model._meta.get_all_field_names())[0] objects = model.objects.filter(**filter_dict) objects = objects.values(x) objects = objects.annotate(y=djmodels.Count(x)) if sort is not None: objects = objects.order_by(sort + 'y') objects = objects.all() if limit: objects = objects[:int(limit)] objects = normalize_choices(util.sod_transposed(objects), field_name=x, app=app, human_readable=True) if not objects: return None objects = consolidated_counts(objects, field_name=x, count_name='y') if sort is not None: objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort)) return objects[x], objects['y']
[ "def", "count_in_category", "(", "x", "=", "'call_type'", ",", "filter_dict", "=", "None", ",", "model", "=", "DEFAULT_MODEL", ",", "app", "=", "DEFAULT_APP", ",", "sort", "=", "True", ",", "limit", "=", "1000", ")", ":", "sort", "=", "sort_prefix", "(",...
40.612903
26.548387
def firsts(iterable, items=1, default=None): # type: (Iterable[T], int, T) -> Iterable[T] """ Lazily return the first x items from this iterable or default. """ try: items = int(items) except (ValueError, TypeError): raise ValueError("items should be usable as an int but is currently " "'{}' of type '{}'".format(items, type(items))) # TODO: replace this so that it returns lasts() if items < 0: raise ValueError(ww.f("items is {items} but should " "be greater than 0. If you wish to get the last " "items, use the lasts() function.")) i = 0 for i, item in zip(range(items), iterable): yield item for x in range(items - (i + 1)): yield default
[ "def", "firsts", "(", "iterable", ",", "items", "=", "1", ",", "default", "=", "None", ")", ":", "# type: (Iterable[T], int, T) -> Iterable[T]", "try", ":", "items", "=", "int", "(", "items", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "r...
35.818182
22.454545
def CheckBlobsExist(self, blob_ids): """Checks if given blobs exit.""" result = {} for blob_id in blob_ids: result[blob_id] = blob_id in self.blobs return result
[ "def", "CheckBlobsExist", "(", "self", ",", "blob_ids", ")", ":", "result", "=", "{", "}", "for", "blob_id", "in", "blob_ids", ":", "result", "[", "blob_id", "]", "=", "blob_id", "in", "self", ".", "blobs", "return", "result" ]
22.25
18.625
def split(patterns, flags): """Split patterns.""" if flags & SPLIT: splitted = [] for pattern in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): splitted.extend(WcSplit(pattern, flags).split()) return splitted else: return patterns
[ "def", "split", "(", "patterns", ",", "flags", ")", ":", "if", "flags", "&", "SPLIT", ":", "splitted", "=", "[", "]", "for", "pattern", "in", "(", "[", "patterns", "]", "if", "isinstance", "(", "patterns", ",", "(", "str", ",", "bytes", ")", ")", ...
29.7
22.4
def domain_delete(domain, logger): """libvirt domain undefinition. @raise: libvirt.libvirtError. """ if domain is not None: try: if domain.isActive(): domain.destroy() except libvirt.libvirtError: logger.exception("Unable to destroy the domain.") try: domain.undefine() except libvirt.libvirtError: try: domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA) # domain with snapshots except libvirt.libvirtError: logger.exception("Unable to undefine the domain.")
[ "def", "domain_delete", "(", "domain", ",", "logger", ")", ":", "if", "domain", "is", "not", "None", ":", "try", ":", "if", "domain", ".", "isActive", "(", ")", ":", "domain", ".", "destroy", "(", ")", "except", "libvirt", ".", "libvirtError", ":", "...
32.473684
17.736842
def get_correspondance_dict(self, classA, classB, restrict=None, replace_numeric=True): """ Returns a correspondance between classification A and B as dict Parameters ---------- classA: str Valid classification (column name of data) classB: str Valid classification (column name of data). restrict: boolean vector of size cc.data, optional where cc is the name of the CountryConverter instance. Used to restrict the data sheet if necessary. E.g. to convert to countries which were OECD members before 1970 use cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD < 1970) replace_numeric: boolean, optional If True (default) replace numeric values with the column header. This can be used if get a correspondance to, for example, 'OECD' instead of to the OECD membership years. Set to False if the actual numbers are required (as for UNcode). Returns ------- dict with keys: based on classA items: list of correspoding entries in classB or None """ result = {nn: None for nn in self.data[classA].values} if restrict is None: df = self.data.copy() else: df = self.data[restrict].copy() if replace_numeric and df[classB].dtype.kind in 'bifc': df.loc[~df[classB].isnull(), classB] = classB df.loc[df[classB].isnull(), classB] = None result.update(df.groupby(classA) .aggregate(lambda x: list(x.unique())) .to_dict()[classB]) return result
[ "def", "get_correspondance_dict", "(", "self", ",", "classA", ",", "classB", ",", "restrict", "=", "None", ",", "replace_numeric", "=", "True", ")", ":", "result", "=", "{", "nn", ":", "None", "for", "nn", "in", "self", ".", "data", "[", "classA", "]",...
35.24
22.88
def obj_to_csv(self, file_path=None, quote_everything=False, space_columns=True, quote_numbers=True): """ This will return a str of a csv text that is friendly to excel :param file_path: str to the path :param quote_everything: bool if True will quote everything if it needs it or not, this is so it looks pretty in excel. :param quote_numbers: bool if True will quote numbers that are strings :param space_columns: bool if True it will align columns with spaces :return: str """ list_of_list, column_widths = self.get_data_and_shared_column_widths( data_kwargs=dict(quote_numbers=quote_numbers, quote_everything=quote_everything, safe_str=self._excel_cell), width_kwargs=dict(padding=0)) if space_columns: csv = [','.join([cell.ljust(column_widths[i]) for i, cell in enumerate(row)]) for row in list_of_list] else: csv = [','.join(row) for row in list_of_list] if os.name == 'posix': ret = '\r\n'.join(csv) else: ret = '\n'.join(csv) self._save_file(file_path, ret) return ret
[ "def", "obj_to_csv", "(", "self", ",", "file_path", "=", "None", ",", "quote_everything", "=", "False", ",", "space_columns", "=", "True", ",", "quote_numbers", "=", "True", ")", ":", "list_of_list", ",", "column_widths", "=", "self", ".", "get_data_and_shared...
43.387097
19.193548
def buttonDown(self, button=mouse.LEFT): """ Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.press(button) self._lock.release()
[ "def", "buttonDown", "(", "self", ",", "button", "=", "mouse", ".", "LEFT", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "mouse", ".", "press", "(", "button", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
29.125
10.75
def err_write(self, msg, **kwargs): r"""Print `msg` as an error message. The message is buffered (won't display) until linefeed ("\n"). """ if self._thread_invalid(): # special case: if a non-main thread writes to stderr # i.e. due to an uncaught exception, pass it through # without raising an additional exception. self.async_call(self.err_write, msg, **kwargs) return return self.request('nvim_err_write', msg, **kwargs)
[ "def", "err_write", "(", "self", ",", "msg", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_thread_invalid", "(", ")", ":", "# special case: if a non-main thread writes to stderr", "# i.e. due to an uncaught exception, pass it through", "# without raising an additi...
42.75
17
def main_hrun(): """ API test: parse command line options and run commands. """ import argparse from httprunner import logger from httprunner.__about__ import __description__, __version__ from httprunner.api import HttpRunner from httprunner.compat import is_py2 from httprunner.validator import validate_json_file from httprunner.utils import (create_scaffold, get_python2_retire_msg, prettify_json_file) parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testcase_paths', nargs='*', help="testcase file path") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument( '--report-template', help="specify report template path.") parser.add_argument( '--report-dir', help="specify report save directory.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded tests and parsed tests to JSON file.") parser.add_argument( '--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate JSON testcase format.") parser.add_argument( '--prettify', nargs='*', help="Prettify JSON testcase format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) exit(0) runner = HttpRunner( failfast=args.failfast, save_tests=args.save_tests, report_template=args.report_template, report_dir=args.report_dir ) try: for path in args.testcase_paths: runner.run(path, dot_env_path=args.dot_env_path) except Exception: logger.log_error("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage)) raise return 0
[ "def", "main_hrun", "(", ")", ":", "import", "argparse", "from", "httprunner", "import", "logger", "from", "httprunner", ".", "__about__", "import", "__description__", ",", "__version__", "from", "httprunner", ".", "api", "import", "HttpRunner", "from", "httprunne...
32.476744
17.72093
def inline_inputs(self): """Inline all input latex files references by this document. The inlining is accomplished recursively. The document is modified in place. """ self.text = texutils.inline(self.text, os.path.dirname(self._filepath)) # Remove children self._children = {}
[ "def", "inline_inputs", "(", "self", ")", ":", "self", ".", "text", "=", "texutils", ".", "inline", "(", "self", ".", "text", ",", "os", ".", "path", ".", "dirname", "(", "self", ".", "_filepath", ")", ")", "# Remove children", "self", ".", "_children"...
40
14.555556
def length_of_associated_transcript(effect): """ Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.sequence), default=0)
[ "def", "length_of_associated_transcript", "(", "effect", ")", ":", "return", "apply_to_transcript_if_exists", "(", "effect", "=", "effect", ",", "fn", "=", "lambda", "t", ":", "len", "(", "t", ".", "sequence", ")", ",", "default", "=", "0", ")" ]
32.222222
9.111111
def getCert(certHost=vos.vos.SERVER, certfile=None, certQuery="/cred/proxyCert?daysValid=",daysValid=2): """Access the cadc certificate server""" if certfile is None: certfile = os.path.join(os.getenv("HOME","/tmp"),".ssl/cadcproxy.pem") dirname = os.path.dirname(certfile) try: os.makedirs(dirname) except OSError as e: if os.path.isdir(dirname): pass elif e.errno == 20 or e.errno == 17: sys.stderr.write(str(e)+": %s \n" % dirname) sys.stderr.write("Expected %s to be a directory.\n" % ( dirname)) sys.exit(e.errno) else: raise e # Example taken from voidspace.org.uk # create a password manager password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() (username, passwd) = getUserPassword(host=certHost) # Add the username and password. # If we knew the realm, we could use it instead of ``None``. top_level_url = "http://"+certHost password_mgr.add_password(None, top_level_url, username, passwd) handler = urllib2.HTTPBasicAuthHandler(password_mgr) # create "opener" (OpenerDirector instance) opener = urllib2.build_opener(handler) # Install the opener. urllib2.install_opener(opener) # Now all calls to urllib2.urlopen use our opener. url="http://"+certHost+certQuery+str(daysValid) r= urllib2.urlopen(url) w= file(certfile,'w') while True: buf=r.read() if not buf: break w.write(buf) w.close() r.close() return
[ "def", "getCert", "(", "certHost", "=", "vos", ".", "vos", ".", "SERVER", ",", "certfile", "=", "None", ",", "certQuery", "=", "\"/cred/proxyCert?daysValid=\"", ",", "daysValid", "=", "2", ")", ":", "if", "certfile", "is", "None", ":", "certfile", "=", "...
30.352941
20.235294
def _appendSegment(self, type=None, points=None, smooth=False, **kwargs): """ Subclasses may override this method. """ self._insertSegment(len(self), type=type, points=points, smooth=smooth, **kwargs)
[ "def", "_appendSegment", "(", "self", ",", "type", "=", "None", ",", "points", "=", "None", ",", "smooth", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_insertSegment", "(", "len", "(", "self", ")", ",", "type", "=", "type", ",", ...
42.5
12.166667
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec): """Get a list of key_ranges.KeyRanges objects, one for each shard. This method uses scatter index to split each namespace into pieces and assign those pieces to shards. Args: app: app_id in str. namespaces: a list of namespaces in str. shard_count: number of shards to split. query_spec: model.QuerySpec. Returns: a list of key_ranges.KeyRanges objects. """ key_ranges_by_ns = [] # Split each ns into n splits. If a ns doesn't have enough scatter to # split into n, the last few splits are None. for namespace in namespaces: ranges = cls._split_ns_by_scatter( shard_count, namespace, query_spec.entity_kind, app) # The nth split of each ns will be assigned to the nth shard. # Shuffle so that None are not all by the end. random.shuffle(ranges) key_ranges_by_ns.append(ranges) # KeyRanges from different namespaces might be very different in size. # Use round robin to make sure each shard can have at most one split # or a None from a ns. ranges_by_shard = [[] for _ in range(shard_count)] for ranges in key_ranges_by_ns: for i, k_range in enumerate(ranges): if k_range: ranges_by_shard[i].append(k_range) key_ranges_by_shard = [] for ranges in ranges_by_shard: if ranges: key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list( ranges)) return key_ranges_by_shard
[ "def", "_to_key_ranges_by_shard", "(", "cls", ",", "app", ",", "namespaces", ",", "shard_count", ",", "query_spec", ")", ":", "key_ranges_by_ns", "=", "[", "]", "# Split each ns into n splits. If a ns doesn't have enough scatter to", "# split into n, the last few splits are Non...
35.045455
17.977273
def list_ipsecpolicies(self, retrieve_all=True, **_params): """Fetches a list of all configured IPsecPolicies for a project.""" return self.list('ipsecpolicies', self.ipsecpolicies_path, retrieve_all, **_params)
[ "def", "list_ipsecpolicies", "(", "self", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "list", "(", "'ipsecpolicies'", ",", "self", ".", "ipsecpolicies_path", ",", "retrieve_all", ",", "*", "*", "_params", ")...
49.5
6
def interpolate_delays(augmented_stop_times, dist_threshold, delay_threshold=3600, delay_cols=None): """ Given an augment stop times DataFrame as output by the function :func:`build_augmented_stop_times`, a distance threshold (float) in the same units as the ``'shape_dist_traveled'`` column of ``augmented_stop_times``, if that column is present, and a delay threshold (integer number of seconds), alter the delay values of the augmented stop times as follows. Drop all delays with absolute value more than ``delay_threshold`` seconds. For each trip and for each delay type (arrival delay or departure delay) do the following. If the trip has all null values for the delay type, then leave the values as is. Otherwise: - If the first delay is more than ``dist_threshold`` distance units from the first stop, then set the first stop delay to zero (charitably); otherwise set the first stop delay to the first delay. - If the last delay is more than ``dist_threshold`` distance units from the last stop, then set the last stop delay to zero (charitably); otherwise set the last stop delay to the last delay. - Linearly interpolate the remaining stop delays by distance. Return the resulting DataFrame. If a list of delay column names is given in ``delay_cols``, then alter those columns instead of the ``arrival_delay`` and ``departure_delay`` columns. This is useful if the given stop times have extra delay columns. """ f = augmented_stop_times.copy() if delay_cols is None or not set(delay_cols) <= set(f.columns): delay_cols = ['arrival_delay', 'departure_delay'] # Return f if nothing to do if 'shape_dist_traveled' not in f.columns or\ not f['shape_dist_traveled'].notnull().any() or\ all([f[col].count() == f[col].shape[0] for col in delay_cols]): return f # Nullify fishy delays for col in delay_cols: f.loc[abs(f[col]) > delay_threshold, col] = np.nan # Fill null delays def fill(group): # Only columns that have at least one nonnull value. fill_cols = [] for col in delay_cols: if group[col].count() >= 1: fill_cols.append(col) for col in fill_cols: # Set first and last delays for i in [0, -1]: j = group[col].dropna().index[i] dist_diff = (abs(group['shape_dist_traveled'].iat[i] - group['shape_dist_traveled'].ix[j])) if dist_diff > dist_threshold: group[col].iat[i] = 0 else: group[col].iat[i] = group[col].ix[j] # Interpolate remaining delays ind = np.where(group[col].notnull())[0] group[col] = np.interp(group['shape_dist_traveled'], group.iloc[ind]['shape_dist_traveled'], group.iloc[ind][col]) return group f = f.groupby('trip_id').apply(fill) # Round f[delay_cols] = f[delay_cols].round(0) return f
[ "def", "interpolate_delays", "(", "augmented_stop_times", ",", "dist_threshold", ",", "delay_threshold", "=", "3600", ",", "delay_cols", "=", "None", ")", ":", "f", "=", "augmented_stop_times", ".", "copy", "(", ")", "if", "delay_cols", "is", "None", "or", "no...
37.02439
20.463415
def reload_current_page(self, *args, **kwds): '''重新载入当前页面. 所有的页面都应该实现reload()方法. ''' index = self.notebook.get_current_page() self.notebook.get_nth_page(index).reload()
[ "def", "reload_current_page", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "index", "=", "self", ".", "notebook", ".", "get_current_page", "(", ")", "self", ".", "notebook", ".", "get_nth_page", "(", "index", ")", ".", "reload", "(",...
30.142857
16.428571
def body(self): """ Return body request parameter :return: Body parameter :rtype: Parameter or None """ body = self.get_parameters_by_location(['body']) return self.root.schemas.get(body[0].type) if body else None
[ "def", "body", "(", "self", ")", ":", "body", "=", "self", ".", "get_parameters_by_location", "(", "[", "'body'", "]", ")", "return", "self", ".", "root", ".", "schemas", ".", "get", "(", "body", "[", "0", "]", ".", "type", ")", "if", "body", "else...
31.875
15.625
def parse_version(version): """Use parse_version from pkg_resources or distutils as available.""" global parse_version try: from pkg_resources import parse_version except ImportError: from distutils.version import LooseVersion as parse_version return parse_version(version)
[ "def", "parse_version", "(", "version", ")", ":", "global", "parse_version", "try", ":", "from", "pkg_resources", "import", "parse_version", "except", "ImportError", ":", "from", "distutils", ".", "version", "import", "LooseVersion", "as", "parse_version", "return",...
37.75
14.875
def send_url(amount, redirect_url, url, api): ''' return payment gateway url to redirect user to it for payment. ''' values = {'api': api, 'amount': amount, 'redirect': redirect_url} send_request = requests.post(SEND_URL_FINAL, data=values) id_get = send_request.text print(id_get) if int(id_get) > 0: print(".معتبر است id_get") payment_gateway_url = '%s%s' % (GATEWAY_URL_FINAL, id_get) return payment_gateway_url elif id_get == "-1": print( "‫‪ api‬ارسالی با نوع ‪ api‬تعریف شده در ‪ payline‬سازگار نیست.‬") elif id_get == "-2": print( "‫مقدار ‪ amount‬داده عددي نمی باشد و یا کمتر از 1000 ریال است.‬") elif id_get == "-3": print("‫مقدار ‪ redirect‬رشته ‪ null‬است.‬") elif id_get == "-4": print( "‫درگاهی با اطلاعات ارسالی یافت نشده و یا در حالت انتظار می باشد‬") else: print("some other error(s) occurred.")
[ "def", "send_url", "(", "amount", ",", "redirect_url", ",", "url", ",", "api", ")", ":", "values", "=", "{", "'api'", ":", "api", ",", "'amount'", ":", "amount", ",", "'redirect'", ":", "redirect_url", "}", "send_request", "=", "requests", ".", "post", ...
36.230769
20.769231
def paths(self): """ Sequence of closed paths, encoded by entity index. Returns --------- paths: (n,) sequence of (*,) int referencing self.entities """ paths = traversal.closed_paths(self.entities, self.vertices) return paths
[ "def", "paths", "(", "self", ")", ":", "paths", "=", "traversal", ".", "closed_paths", "(", "self", ".", "entities", ",", "self", ".", "vertices", ")", "return", "paths" ]
29.090909
18.363636
def walk_from_list(cls, files): """A function that mimics :func:`os.walk()` by simulating a directory with the list of files passed as an argument. :param files: A list of file paths :return: A function that mimics :func:`os.walk()` walking a directory containing only the files listed in the argument """ tree = cls.list_to_tree(files) def walk(directory, **kwargs): return cls.tree_walk(directory, tree) return walk
[ "def", "walk_from_list", "(", "cls", ",", "files", ")", ":", "tree", "=", "cls", ".", "list_to_tree", "(", "files", ")", "def", "walk", "(", "directory", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "tree_walk", "(", "directory", ",", "tr...
35.714286
16.785714
def get_component(self, colour, tolerance=0, default=None): """ Get the component corresponding to a display colour. This is for generating a Striplog object from a colour image of a striplog. Args: colour (str): The hex colour string to look up. tolerance (float): The colourspace distance within which to match. default (component or None): The component to return in the event of no match. Returns: component. The component best matching the provided colour. """ if not (0 <= tolerance <= np.sqrt(195075)): raise LegendError('Tolerance must be between 0 and 441.67') for decor in self.__list: if colour.lower() == decor.colour: return decor.component # If we're here, we didn't find one yet. r1, g1, b1 = utils.hex_to_rgb(colour) # Start with a best match of black. best_match = '#000000' best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.) # Now compare to each colour in the legend. for decor in self.__list: r2, g2, b2 = decor.rgb distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.) if distance < best_match_dist: best_match = decor.component best_match_dist = distance best_match_colour = decor.colour if best_match_dist <= tolerance: return best_match else: with warnings.catch_warnings(): warnings.simplefilter("always") w = "No match found for {0} ".format(colour.lower()) w += "with tolerance of {0}. Best match is ".format(tolerance) w += "{0}, {1}".format(best_match.summary(), best_match_colour) w += ", d={0}".format(best_match_dist) warnings.warn(w) return default
[ "def", "get_component", "(", "self", ",", "colour", ",", "tolerance", "=", "0", ",", "default", "=", "None", ")", ":", "if", "not", "(", "0", "<=", "tolerance", "<=", "np", ".", "sqrt", "(", "195075", ")", ")", ":", "raise", "LegendError", "(", "'T...
38.673469
19.44898
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None): """ Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll), or using a specific dynamic library on Linux/BSD (.so). This can also be used to configure oscrypto to use LibreSSL dynamic libraries. This method must be called before any oscrypto submodules are imported. :param libcrypto_path: A unicode string of the file path to the OpenSSL/LibreSSL libcrypto dynamic library. :param libssl_path: A unicode string of the file path to the OpenSSL/LibreSSL libssl dynamic library. :param trust_list_path: An optional unicode string of the path to a file containing OpenSSL-compatible CA certificates in PEM format. If this is not provided and the platform is OS X or Windows, the system trust roots will be exported from the OS and used for all TLS connections. :raises: ValueError - when one of the paths is not a unicode string OSError - when the trust_list_path does not exist on the filesystem oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem RuntimeError - when this function is called after another part of oscrypto has been imported """ if not isinstance(libcrypto_path, str_cls): raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path)) if not isinstance(libssl_path, str_cls): raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path)) if not os.path.exists(libcrypto_path): raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path) if not os.path.exists(libssl_path): raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path) if trust_list_path is not None: if not isinstance(trust_list_path, str_cls): raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path)) if not os.path.exists(trust_list_path): raise OSError('trust_list_path does not exist at %s' % trust_list_path) with _backend_lock: if _module_values['backend'] is not None: raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL') _module_values['backend'] = 'openssl' _module_values['backend_config'] = { 'libcrypto_path': libcrypto_path, 'libssl_path': libssl_path, 'trust_list_path': trust_list_path, }
[ "def", "use_openssl", "(", "libcrypto_path", ",", "libssl_path", ",", "trust_list_path", "=", "None", ")", ":", "if", "not", "isinstance", "(", "libcrypto_path", ",", "str_cls", ")", ":", "raise", "ValueError", "(", "'libcrypto_path must be a unicode string, not %s'",...
43.491525
28.508475
def get_image_location(image, sdir, image_list, recurred=False): """Take a raw image name + directory and return the location of image. :param: image (string): the name of the raw image from the TeX :param: sdir (string): the directory where everything was unzipped to :param: image_list ([string, string, ...]): the list of images that were extracted from the tarball and possibly converted :return: converted_image (string): the full path to the (possibly converted) image file """ if isinstance(image, list): # image is a list, not good return None image = image.encode('utf-8', 'ignore') image = image.strip() figure_or_file = '(figure=|file=)' figure_or_file_in_image = re.findall(figure_or_file, image) if len(figure_or_file_in_image) > 0: image = image.replace(figure_or_file_in_image[0], '') includegraphics = r'\\includegraphics{(.+)}' includegraphics_in_image = re.findall(includegraphics, image) if len(includegraphics_in_image) > 0: image = includegraphics_in_image[0] image = image.strip() some_kind_of_tag = '\\\\\\w+ ' if image.startswith('./'): image = image[2:] if re.match(some_kind_of_tag, image): image = image[len(image.split(' ')[0]) + 1:] if image.startswith('='): image = image[1:] if len(image) == 1: return None image = image.strip() converted_image_should_be = get_converted_image_name(image) if image_list is None: image_list = os.listdir(sdir) for png_image in image_list: png_image_rel = os.path.relpath(png_image, start=sdir) if converted_image_should_be == png_image_rel: return png_image # maybe it's in a subfolder (TeX just understands that) for prefix in ['eps', 'fig', 'figs', 'figures', 'figs', 'images']: if os.path.isdir(os.path.join(sdir, prefix)): image_list = os.listdir(os.path.join(sdir, prefix)) for png_image in image_list: if converted_image_should_be == png_image: return os.path.join(sdir, prefix, png_image) # maybe it is actually just loose. for png_image in os.listdir(sdir): if os.path.split(converted_image_should_be)[-1] == png_image: return converted_image_should_be if os.path.isdir(os.path.join(sdir, png_image)): # try that, too! we just do two levels, because that's all that's # reasonable.. sub_dir = os.path.join(sdir, png_image) for sub_dir_file in os.listdir(sub_dir): if os.path.split(converted_image_should_be)[-1] == sub_dir_file: # noqa return os.path.join(sub_dir, converted_image_should_be) # maybe it's actually up a directory or two: this happens in nested # tarballs where the TeX is stored in a different directory from the images for png_image in os.listdir(os.path.split(sdir)[0]): if os.path.split(converted_image_should_be)[-1] == png_image: return converted_image_should_be for png_image in os.listdir(os.path.split(os.path.split(sdir)[0])[0]): if os.path.split(converted_image_should_be)[-1] == png_image: return converted_image_should_be if recurred: return None # agh, this calls for drastic measures for piece in image.split(' '): res = get_image_location(piece, sdir, image_list, recurred=True) if res is not None: return res for piece in image.split(','): res = get_image_location(piece, sdir, image_list, recurred=True) if res is not None: return res for piece in image.split('='): res = get_image_location(piece, sdir, image_list, recurred=True) if res is not None: return res return None
[ "def", "get_image_location", "(", "image", ",", "sdir", ",", "image_list", ",", "recurred", "=", "False", ")", ":", "if", "isinstance", "(", "image", ",", "list", ")", ":", "# image is a list, not good", "return", "None", "image", "=", "image", ".", "encode"...
37.078431
21.068627
def nested_join(array, o_str= '{', c_str= '}'): ''' Builds a string out of a given nested list. Args : array : An array retruned by pyparsing nestedExpr. o_str : Opening str. c_str : Closing str. ''' result = '' for x in array : if type(x) == type([]) : result +=o_str+ nested_join(x)+ c_str else : result +=x return result
[ "def", "nested_join", "(", "array", ",", "o_str", "=", "'{'", ",", "c_str", "=", "'}'", ")", ":", "result", "=", "''", "for", "x", "in", "array", ":", "if", "type", "(", "x", ")", "==", "type", "(", "[", "]", ")", ":", "result", "+=", "o_str", ...
30.133333
16.8
def _run_info_from_yaml(dirs, run_info_yaml, config, sample_names=None, is_cwl=False, integrations=None): """Read run information from a passed YAML file. """ validate_yaml(run_info_yaml, run_info_yaml) with open(run_info_yaml) as in_handle: loaded = yaml.safe_load(in_handle) fc_name, fc_date = None, None if dirs.get("flowcell"): try: fc_name, fc_date = flowcell.parse_dirname(dirs.get("flowcell")) except ValueError: pass global_config = {} global_vars = {} resources = {} integration_config = {} if isinstance(loaded, dict): global_config = copy.deepcopy(loaded) del global_config["details"] if "fc_name" in loaded: fc_name = loaded["fc_name"].replace(" ", "_") if "fc_date" in loaded: fc_date = str(loaded["fc_date"]).replace(" ", "_") global_vars = global_config.pop("globals", {}) resources = global_config.pop("resources", {}) for iname in ["arvados"]: integration_config[iname] = global_config.pop(iname, {}) loaded = loaded["details"] if sample_names: loaded = [x for x in loaded if x["description"] in sample_names] if integrations: for iname, retriever in integrations.items(): if iname in config: config[iname] = retriever.set_cache(config[iname]) loaded = retriever.add_remotes(loaded, config[iname]) run_details = [] for i, item in enumerate(loaded): item = _normalize_files(item, dirs.get("flowcell")) if "lane" not in item: item["lane"] = str(i + 1) item["lane"] = _clean_characters(item["lane"]) if "description" not in item: if _item_is_bam(item): item["description"] = get_sample_name(item["files"][0]) else: raise ValueError("No `description` sample name provided for input #%s" % (i + 1)) description = _clean_characters(item["description"]) item["description"] = description # make names R safe if we are likely to use R downstream if item["analysis"].lower() in R_DOWNSTREAM_ANALYSIS: if description[0].isdigit(): valid = "X" + description logger.info("%s is not a valid R name, converting to %s." % (description, valid)) item["description"] = valid if "upload" not in item and not is_cwl: upload = global_config.get("upload", {}) # Handle specifying a local directory directly in upload if isinstance(upload, six.string_types): upload = {"dir": upload} if not upload: upload["dir"] = "../final" if fc_name: upload["fc_name"] = fc_name if fc_date: upload["fc_date"] = fc_date upload["run_id"] = "" if upload.get("dir"): upload["dir"] = _file_to_abs(upload["dir"], [dirs.get("work")], makedir=True) item["upload"] = upload item["algorithm"] = _replace_global_vars(item["algorithm"], global_vars) item["algorithm"] = genome.abs_file_paths(item["algorithm"], ignore_keys=ALGORITHM_NOPATH_KEYS, fileonly_keys=ALGORITHM_FILEONLY_KEYS, do_download=all(not x for x in integrations.values())) item["genome_build"] = str(item.get("genome_build", "")) item["algorithm"] = _add_algorithm_defaults(item["algorithm"], item.get("analysis", ""), is_cwl) item["metadata"] = add_metadata_defaults(item.get("metadata", {})) item["rgnames"] = prep_rg_names(item, config, fc_name, fc_date) if item.get("files"): item["files"] = [genome.abs_file_paths(f, do_download=all(not x for x in integrations.values())) for f in item["files"]] elif "files" in item: del item["files"] if item.get("vrn_file") and isinstance(item["vrn_file"], six.string_types): item["vrn_file"] = genome.abs_file_paths(item["vrn_file"], do_download=all(not x for x in integrations.values())) if os.path.isfile(item["vrn_file"]): # Try to prepare in place (or use ready to go inputs) try: item["vrn_file"] = vcfutils.bgzip_and_index(item["vrn_file"], config, remove_orig=False) # In case of permission errors, fix in inputs directory except IOError: inputs_dir = utils.safe_makedir(os.path.join(dirs.get("work", os.getcwd()), "inputs", item["description"])) item["vrn_file"] = vcfutils.bgzip_and_index(item["vrn_file"], config, remove_orig=False, out_dir=inputs_dir) if not tz.get_in(("metadata", "batch"), item) and tz.get_in(["algorithm", "validate"], item): raise ValueError("%s: Please specify a metadata batch for variant file (vrn_file) input.\n" % (item["description"]) + "Batching with a standard sample provides callable regions for validation.") item = _clean_metadata(item) item = _clean_algorithm(item) item = _organize_tools_on(item, is_cwl) item = _clean_background(item) # Add any global resource specifications if "resources" not in item: item["resources"] = {} for prog, pkvs in resources.items(): if prog not in item["resources"]: item["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): item["resources"][prog][key] = val for iname, ivals in integration_config.items(): if ivals: if iname not in item: item[iname] = {} for k, v in ivals.items(): item[iname][k] = v run_details.append(item) _check_sample_config(run_details, run_info_yaml, config) return run_details
[ "def", "_run_info_from_yaml", "(", "dirs", ",", "run_info_yaml", ",", "config", ",", "sample_names", "=", "None", ",", "is_cwl", "=", "False", ",", "integrations", "=", "None", ")", ":", "validate_yaml", "(", "run_info_yaml", ",", "run_info_yaml", ")", "with",...
50.023622
20.409449
def __reorganize_authors(authors): """ Separate the string of authors and put it into a BibJSON compliant list :param str authors: :return list: List of dictionaries of author names. """ # String SHOULD be semi-colon separated names. l = [] s = authors.split(";") for author in s: try: l.append({"name": author.strip()}) except AttributeError: logger_noaa_lpd.warning("reorganize_authors: AttributeError: authors incorrectly formatted") return l
[ "def", "__reorganize_authors", "(", "authors", ")", ":", "# String SHOULD be semi-colon separated names.", "l", "=", "[", "]", "s", "=", "authors", ".", "split", "(", "\";\"", ")", "for", "author", "in", "s", ":", "try", ":", "l", ".", "append", "(", "{", ...
37.8
18.333333
def summarize_classes(self): """ Summary of classes: names, numeric labels and sizes Returns ------- tuple : class_set, label_set, class_sizes class_set : list List of names of all the classes label_set : list Label for each class in class_set class_sizes : list Size of each class (number of samples) """ class_sizes = np.zeros(len(self.class_set)) for idx, cls in enumerate(self.class_set): class_sizes[idx] = self.class_sizes[cls] # TODO consider returning numeric label set e.g. for use in scikit-learn return self.class_set, self.label_set, class_sizes
[ "def", "summarize_classes", "(", "self", ")", ":", "class_sizes", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "class_set", ")", ")", "for", "idx", ",", "cls", "in", "enumerate", "(", "self", ".", "class_set", ")", ":", "class_sizes", "[", ...
30.086957
19.391304
def run(self): """ Starts a development server for the zengine application """ print("Development server started on http://%s:%s. \n\nPress Ctrl+C to stop\n" % ( self.manager.args.addr, self.manager.args.port) ) if self.manager.args.server_type == 'falcon': self.run_with_falcon() elif self.manager.args.server_type == 'tornado': self.run_with_tornado()
[ "def", "run", "(", "self", ")", ":", "print", "(", "\"Development server started on http://%s:%s. \\n\\nPress Ctrl+C to stop\\n\"", "%", "(", "self", ".", "manager", ".", "args", ".", "addr", ",", "self", ".", "manager", ".", "args", ".", "port", ")", ")", "if...
37.666667
14.5
def clear(self) -> None: """Resets all headers and content for this response.""" self._headers = httputil.HTTPHeaders( { "Server": "TornadoServer/%s" % tornado.version, "Content-Type": "text/html; charset=UTF-8", "Date": httputil.format_timestamp(time.time()), } ) self.set_default_headers() self._write_buffer = [] # type: List[bytes] self._status_code = 200 self._reason = httputil.responses[200]
[ "def", "clear", "(", "self", ")", "->", "None", ":", "self", ".", "_headers", "=", "httputil", ".", "HTTPHeaders", "(", "{", "\"Server\"", ":", "\"TornadoServer/%s\"", "%", "tornado", ".", "version", ",", "\"Content-Type\"", ":", "\"text/html; charset=UTF-8\"", ...
39.615385
15.692308
def wrap(text, width=70, **kwargs): """Wrap multiple paragraphs of text, returning a list of wrapped lines. Reformat the multiple paragraphs 'text' so they fit in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ w = ParagraphWrapper(width=width, **kwargs) return w.wrap(text)
[ "def", "wrap", "(", "text", ",", "width", "=", "70", ",", "*", "*", "kwargs", ")", ":", "w", "=", "ParagraphWrapper", "(", "width", "=", "width", ",", "*", "*", "kwargs", ")", "return", "w", ".", "wrap", "(", "text", ")" ]
47.583333
20.583333
def _check_hyperedge_attributes_consistency(self): """Consistency Check 1: consider all hyperedge IDs listed in _hyperedge_attributes :raises: ValueError -- detected inconsistency among dictionaries """ # required_attrs are attributes that every hyperedge must have. required_attrs = ['weight', 'tail', 'head', '__frozen_tail', '__frozen_head'] # Get list of hyperedge_ids from the hyperedge attributes dict hyperedge_ids_from_attributes = set(self._hyperedge_attributes.keys()) # Perform consistency checks on each hyperedge id. for hyperedge_id in hyperedge_ids_from_attributes: # Check 1.1: make sure every hyperedge id has a weight, # tail, head, frozen tail, and frozen head hyperedge_attr_dict = self._hyperedge_attributes[hyperedge_id] for required_attr in required_attrs: if required_attr not in hyperedge_attr_dict: raise ValueError( 'Consistency Check 1.1 Failed: hyperedge ' + 'attribute dictionary for hyperedge_id ' + '%s is missing required attribute %s' % (hyperedge_id, required_attr)) # Check 1.2: make sure frozenset(tail) == __frozen_tail if frozenset(hyperedge_attr_dict['tail']) != \ hyperedge_attr_dict['__frozen_tail']: raise ValueError( 'Consistency Check 1.2 Failed: frozenset ' + 'tail is different from __frozen_tail ' + 'attribute for hyperedge id %s' % (hyperedge_id)) # Check 1.3: make sure frozenset(head) == __frozen_head if frozenset(hyperedge_attr_dict['head']) != \ hyperedge_attr_dict['__frozen_head']: raise ValueError( 'Consistency Check 1.3 Failed: frozenset ' + 'head is different from __frozen_head ' + 'attribute for hyperedge id %s' % (hyperedge_id)) # get tail and head frozenset tailset = hyperedge_attr_dict['__frozen_tail'] headset = hyperedge_attr_dict['__frozen_head'] # Check 1.4: make sure successors dictionary contains the # hyperedge id. Need to also check that tailset and # headset are entries into the dict. if tailset not in self._successors or \ headset not in self._successors[tailset] or \ self._successors[tailset][headset] != hyperedge_id: raise ValueError( 'Consistency Check 1.4 Failed: hyperedge ' + 'id %s not in self._successors.' % (hyperedge_id)) # Check 1.5: make sure predecessors dictionary contains # the hyperedge id. Need to also check that headset and # tailset are entries into the dict. if headset not in self._predecessors or \ tailset not in self._predecessors[headset] or \ self._predecessors[headset][tailset] != hyperedge_id: raise ValueError( 'Consistency Check 1.5 Failed: hyperedge ' + 'id %s not in self._predecessors.' % (hyperedge_id)) # Check 1.6: make sure every tail node in tailset # contains the hyperedge_id in the forward star. for tail_node in tailset: if hyperedge_id not in self._forward_star[tail_node]: raise ValueError( 'Consistency Check 1.6 Failed: hyperedge ' + 'id ' + hyperedge_id + ' is not in the ' + 'forward star of tail node ' + tail_node) # Check 1.7: make sure every head node in headset # contains the hyperedge_id in the backward star. for head_node in headset: if hyperedge_id not in self._backward_star[head_node]: raise ValueError( 'Consistency Check 1.7 Failed: hyperedge ' + 'id ' + hyperedge_id + ' is not in the ' + 'backward star of head node ' + tail_node)
[ "def", "_check_hyperedge_attributes_consistency", "(", "self", ")", ":", "# required_attrs are attributes that every hyperedge must have.", "required_attrs", "=", "[", "'weight'", ",", "'tail'", ",", "'head'", ",", "'__frozen_tail'", ",", "'__frozen_head'", "]", "# Get list o...
49.623529
22.2
def CurrentNode(self): """Hacking interface allowing to get the xmlNodePtr correponding to the current node being accessed by the xmlTextReader. This is dangerous because the underlying node may be destroyed on the next Reads. """ ret = libxml2mod.xmlTextReaderCurrentNode(self._o) if ret is None:raise treeError('xmlTextReaderCurrentNode() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "CurrentNode", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderCurrentNode", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlTextReaderCurrentNode() failed'", ")", "__tmp", "=", "xmlNode",...
50.111111
16.333333
def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time """ if len(inputs) > 1: mxnet_op = symbol.maximum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.maximum(mxnet_op, op_input) else: mxnet_op = symbol.maximum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
[ "def", "maximum", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "len", "(", "inputs", ")", ">", "1", ":", "mxnet_op", "=", "symbol", ".", "maximum", "(", "inputs", "[", "0", "]", ",", "inputs", "[", "1", "]", ")", "for", "op_inpu...
37.142857
11.428571
def fast_roc(actuals, controls): """ approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
[ "def", "fast_roc", "(", "actuals", ",", "controls", ")", ":", "assert", "(", "type", "(", "actuals", ")", "is", "np", ".", "ndarray", ")", "assert", "(", "type", "(", "controls", ")", "is", "np", ".", "ndarray", ")", "actuals", "=", "np", ".", "rav...
40.589744
17.25641
def delta_hv(scatterer): """ Delta_hv for the current setup. Args: scatterer: a Scatterer instance. Returns: Delta_hv [rad]. """ Z = scatterer.get_Z() return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
[ "def", "delta_hv", "(", "scatterer", ")", ":", "Z", "=", "scatterer", ".", "get_Z", "(", ")", "return", "np", ".", "arctan2", "(", "Z", "[", "2", ",", "3", "]", "-", "Z", "[", "3", ",", "2", "]", ",", "-", "Z", "[", "2", ",", "2", "]", "-...
19.833333
17.333333
def copy(self, repository=None, tag=None, source_transport=None, target_transport=SkopeoTransport.DOCKER, source_path=None, target_path=None, logs=True): """ Copy this image :param repository to be copied to :param tag :param source_transport Transport :param target_transport Transport :param source_path needed to specify for dir, docker-archive or oci transport :param target_path needed to specify for dir, docker-archive or oci transport :param logs enable/disable logs :return: the new DockerImage """ if not repository: repository = self.name if not tag: tag = self.tag if self.tag else "latest" if target_transport == SkopeoTransport.OSTREE and tag and logs: logging.warning("tag was ignored") target = (DockerImage(repository, tag, pull_policy=DockerImagePullPolicy.NEVER) .using_transport(target_transport, target_path)) self.using_transport(source_transport, source_path) try: run_cmd(["skopeo", "copy", transport_param(self), transport_param(target)]) except subprocess.CalledProcessError: raise ConuException("There was an error while copying repository", self.name) return target
[ "def", "copy", "(", "self", ",", "repository", "=", "None", ",", "tag", "=", "None", ",", "source_transport", "=", "None", ",", "target_transport", "=", "SkopeoTransport", ".", "DOCKER", ",", "source_path", "=", "None", ",", "target_path", "=", "None", ","...
40.558824
16.970588
def dusk(self, date=None, local=True, use_elevation=True): """Calculates the dusk time (the time in the evening when the sun is a certain number of degrees below the horizon. By default this is 6 degrees but can be changed by setting the :attr:`solar_depression` property.) :param date: The date for which to calculate the dusk time. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: The date and time at which dusk occurs. :rtype: :class:`~datetime.datetime` """ if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 dusk = self.astral.dusk_utc(date, self.latitude, self.longitude, observer_elevation=elevation) if local: return dusk.astimezone(self.tz) else: return dusk
[ "def", "dusk", "(", "self", ",", "date", "=", "None", ",", "local", "=", "True", ",", "use_elevation", "=", "True", ")", ":", "if", "local", "and", "self", ".", "timezone", "is", "None", ":", "raise", "ValueError", "(", "\"Local time requested but Location...
41.225
24.9
def get_translations(self, domain=None, locale=None): """Load translations for given or configuration domain. :param domain: Messages domain (str) :param locale: Locale object """ if locale is None: if self.locale is None: return support.NullTranslations() locale = self.locale if domain is None: domain = self.cfg.domain if (domain, locale.language) not in self.translations: translations = None for locales_dir in reversed(self.cfg.locales_dirs): trans = support.Translations.load( locales_dir, locales=locale, domain=domain) if translations: translations._catalog.update(trans._catalog) else: translations = trans self.translations[(domain, locale.language)] = translations return self.translations[(domain, locale.language)]
[ "def", "get_translations", "(", "self", ",", "domain", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "locale", "is", "None", ":", "if", "self", ".", "locale", "is", "None", ":", "return", "support", ".", "NullTranslations", "(", ")", "locale...
33.344828
18.724138
def reversebait(self, maskmiddle='f', k=19): """ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned """ logging.info('Performing reverse kmer baiting of targets with FASTQ files') if self.kmer_size is None: kmer = k else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis: outfile = os.path.join(sample[self.analysistype].outputdir, 'baitedtargets.fa') sample[self.analysistype].revbbdukcmd = \ 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' \ 'maskmiddle={mm} outm={outm}' \ .format(mem=self.mem, ref=sample[self.analysistype].baitedfastq, in1=sample[self.analysistype].baitfile, kmer=kmer, cpus=str(self.cpus), mcf=self.cutoff, mm=maskmiddle, outm=outfile) # Run the system call (if necessary) if not os.path.isfile(outfile): out, err = run_subprocess(sample[self.analysistype].revbbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # Set the baitfile to use in the mapping steps as the newly created outfile sample[self.analysistype].baitfile = outfile
[ "def", "reversebait", "(", "self", ",", "maskmiddle", "=", "'f'", ",", "k", "=", "19", ")", ":", "logging", ".", "info", "(", "'Performing reverse kmer baiting of targets with FASTQ files'", ")", "if", "self", ".", "kmer_size", "is", "None", ":", "kmer", "=", ...
62.842105
28.052632
def download_vault_folder(remote_path, local_path, dry_run=False, force=False): """Recursively downloads a folder in a vault to a local directory. Only downloads files, not datasets.""" local_path = os.path.normpath(os.path.expanduser(local_path)) if not os.access(local_path, os.W_OK): raise Exception( 'Write access to local path ({}) is required' .format(local_path)) full_path, path_dict = solvebio.Object.validate_full_path(remote_path) vault = solvebio.Vault.get_by_full_path(path_dict['vault']) print('Downloading all files from {} to {}'.format(full_path, local_path)) if path_dict['path'] == '/': parent_object_id = None else: parent_object = solvebio.Object.get_by_full_path( remote_path, assert_type='folder') parent_object_id = parent_object.id # Scan the folder for all sub-folders and create them locally print('Creating local directory structure at: {}'.format(local_path)) if not os.path.exists(local_path): if not dry_run: os.makedirs(local_path) folders = vault.folders(parent_object_id=parent_object_id) for f in folders: path = os.path.normpath(local_path + f.path) if not os.path.exists(path): print('Creating folder: {}'.format(path)) if not dry_run: os.makedirs(path) files = vault.files(parent_object_id=parent_object_id) for f in files: path = os.path.normpath(local_path + f.path) if os.path.exists(path): if force: # Delete the local copy print('Deleting local file (force download): {}'.format(path)) if not dry_run: os.remove(path) else: print('Skipping file (already exists): {}'.format(path)) continue print('Downloading file: {}'.format(path)) if not dry_run: f.download(path)
[ "def", "download_vault_folder", "(", "remote_path", ",", "local_path", ",", "dry_run", "=", "False", ",", "force", "=", "False", ")", ":", "local_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "expanduser", "(", "local_path", ...
38.058824
19
def _parametersToDefaults(self, parameters): """ Extract the defaults from C{parameters}, constructing a dictionary mapping parameter names to default values, suitable for passing to L{ListChangeParameter}. @type parameters: C{list} of L{liveform.Parameter} or L{liveform.ChoiceParameter}. @rtype: C{dict} """ defaults = {} for p in parameters: if isinstance(p, liveform.ChoiceParameter): selected = [] for choice in p.choices: if choice.selected: selected.append(choice.value) defaults[p.name] = selected else: defaults[p.name] = p.default return defaults
[ "def", "_parametersToDefaults", "(", "self", ",", "parameters", ")", ":", "defaults", "=", "{", "}", "for", "p", "in", "parameters", ":", "if", "isinstance", "(", "p", ",", "liveform", ".", "ChoiceParameter", ")", ":", "selected", "=", "[", "]", "for", ...
34.409091
14.590909
def set_fold_trigger(block, val): """ Set the block fold trigger flag (True means the block is a fold trigger). :param block: block to set :param val: value to set """ if block is None: return state = block.userState() if state == -1: state = 0 state &= 0x7BFFFFFF state |= int(val) << 26 block.setUserState(state)
[ "def", "set_fold_trigger", "(", "block", ",", "val", ")", ":", "if", "block", "is", "None", ":", "return", "state", "=", "block", ".", "userState", "(", ")", "if", "state", "==", "-", "1", ":", "state", "=", "0", "state", "&=", "0x7BFFFFFF", "state",...
26.25
14