text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def iter_ensure_instance(iterable, types): """ Iterate over object and check each item type >>> iter_ensure_instance([1,2,3], [str]) Traceback (most recent call last): TypeError: >>> iter_ensure_instance([1,2,3], int) >>> iter_ensure_instance(1, int) Traceback (most recent call last): TypeError: """ ensure_instance(iterable, Iterable) [ ensure_instance(item, types) for item in iterable ]
[ "def", "iter_ensure_instance", "(", "iterable", ",", "types", ")", ":", "ensure_instance", "(", "iterable", ",", "Iterable", ")", "[", "ensure_instance", "(", "item", ",", "types", ")", "for", "item", "in", "iterable", "]" ]
30.428571
9.571429
def initialize(self, configfile=None): """Initialize and load the Fortran library (and model, if applicable). The Fortran library is loaded and ctypes is used to annotate functions inside the library. The Fortran library's initialization is called. Normally a path to an ``*.ini`` model file is passed to the :meth:`__init__`. If so, that model is loaded. Note that :meth:`_load_model` changes the working directory to that of the model. """ if configfile is not None: self.configfile = configfile try: self.configfile except AttributeError: raise ValueError("Specify configfile during construction or during initialize") abs_name = os.path.abspath(self.configfile) os.chdir(os.path.dirname(self.configfile) or '.') logmsg = "Loading model {} in directory {}".format( self.configfile, os.path.abspath(os.getcwd()) ) logger.info(logmsg) # Fortran init function. self.library.initialize.argtypes = [c_char_p] self.library.initialize.restype = None # initialize by abs_name because we already chdirred # if configfile is a relative path we would have a problem ierr = wrap(self.library.initialize)(abs_name) if ierr: errormsg = "Loading model {config} failed with exit code {code}" raise RuntimeError(errormsg.format(config=self.configfile, code=ierr))
[ "def", "initialize", "(", "self", ",", "configfile", "=", "None", ")", ":", "if", "configfile", "is", "not", "None", ":", "self", ".", "configfile", "=", "configfile", "try", ":", "self", ".", "configfile", "except", "AttributeError", ":", "raise", "ValueE...
43.628571
20.914286
def on_data(self, data): """ This is the function called by the handler object upon receipt of incoming client data. The data is passed to the responder's parser class (via the :method:`consume` method), which digests and stores the HTTP data. Upon completion of parsing the HTTP headers, the responder creates the request and response objects, and passes them to the begin_application method, which starts the parent application's middleware chain. Parameters: data (bytes): HTTP data from the socket, expected to be passed directly from the transport/protocol objects. Raises: HTTPErrorBadRequest: If there is a problem parsing headers or body length exceeds expectation. """ # Headers have not been read in yet if len(self.headers) == 0: # forward data to the parser data = self.parser.consume(data) # Headers are finished - build the request and response if data is not None: # setup the request line attributes self.set_request_line(self.parser.method, self.parser.parsed_url, self.parser.version) # initialize "content_length" and "body_buffer" attributes self.init_body_buffer(self.method, self.headers) # builds request and response out of self.headers and protocol self.req, self.res = self.build_req_and_res() # add instruct handler to begin running the application # with the created req and res pairs self._handler.begin_application(self.req, self.res) # if truthy, 'data' now holds body data if data: self.validate_and_store_body_data(data) # if we have reached end of content - put in the request's body if len(self.body_buffer) == self.content_length: self.set_body_data(bytes(self.body_buffer))
[ "def", "on_data", "(", "self", ",", "data", ")", ":", "# Headers have not been read in yet", "if", "len", "(", "self", ".", "headers", ")", "==", "0", ":", "# forward data to the parser", "data", "=", "self", ".", "parser", ".", "consume", "(", "data", ")", ...
40.038462
23
def exponential_backoff(attempt: int, cap: int=1200) -> timedelta: """Calculate a delay to retry using an exponential backoff algorithm. It is an exponential backoff with random jitter to prevent failures from being retried at the same time. It is a good fit for most applications. :arg attempt: the number of attempts made :arg cap: maximum delay, defaults to 20 minutes """ base = 3 temp = min(base * 2 ** attempt, cap) return timedelta(seconds=temp / 2 + random.randint(0, temp / 2))
[ "def", "exponential_backoff", "(", "attempt", ":", "int", ",", "cap", ":", "int", "=", "1200", ")", "->", "timedelta", ":", "base", "=", "3", "temp", "=", "min", "(", "base", "*", "2", "**", "attempt", ",", "cap", ")", "return", "timedelta", "(", "...
39.692308
19.846154
def _get_vlan_body_on_trunk_int(self, nexus_host, vlanid, intf_type, interface, is_native, is_delete, add_mode): """Prepares an XML snippet for VLAN on a trunk interface. :param nexus_host: IP address of Nexus switch :param vlanid: Vlanid(s) to add to interface :param intf_type: String which specifies interface type. example: ethernet :param interface: String indicating which interface. example: 1/19 :param is_native: Is native vlan config desired? :param is_delete: Is this a delete operation? :param add_mode: Add mode trunk :returns path_snippet, body_snippet """ starttime = time.time() LOG.debug("NexusDriver get if body config for host %s: " "if_type %s port %s", nexus_host, intf_type, interface) if intf_type == "ethernet": body_if_type = "l1PhysIf" path_interface = "phys-[eth" + interface + "]" else: body_if_type = "pcAggrIf" path_interface = "aggr-[po" + interface + "]" path_snip = (snipp.PATH_IF % (path_interface)) mode = snipp.BODY_PORT_CH_MODE if add_mode else '' if is_delete: increment_it = "-" debug_desc = "delif" native_vlan = "" else: native_vlan = 'vlan-' + str(vlanid) debug_desc = "createif" if vlanid is "": increment_it = "" else: increment_it = "+" if is_native: body_snip = (snipp.BODY_NATIVE_TRUNKVLAN % (body_if_type, mode, increment_it + str(vlanid), str(native_vlan))) else: body_snip = (snipp.BODY_TRUNKVLAN % (body_if_type, mode, increment_it + str(vlanid))) self.capture_and_print_timeshot( starttime, debug_desc, switch=nexus_host) return path_snip, body_snip
[ "def", "_get_vlan_body_on_trunk_int", "(", "self", ",", "nexus_host", ",", "vlanid", ",", "intf_type", ",", "interface", ",", "is_native", ",", "is_delete", ",", "add_mode", ")", ":", "starttime", "=", "time", ".", "time", "(", ")", "LOG", ".", "debug", "(...
36.333333
16.403509
def wash_for_xml(text, xml_version='1.0'): """Remove any character which isn't a allowed characters for XML. The allowed characters depends on the version of XML. - XML 1.0: <http://www.w3.org/TR/REC-xml/#charsets> - XML 1.1: <http://www.w3.org/TR/xml11/#charsets> :param text: input string to wash. :param xml_version: version of the XML for which we wash the input. Value for this parameter can be '1.0' or '1.1' """ if xml_version == '1.0': return RE_ALLOWED_XML_1_0_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8') else: return RE_ALLOWED_XML_1_1_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8')
[ "def", "wash_for_xml", "(", "text", ",", "xml_version", "=", "'1.0'", ")", ":", "if", "xml_version", "==", "'1.0'", ":", "return", "RE_ALLOWED_XML_1_0_CHARS", ".", "sub", "(", "''", ",", "unicode", "(", "text", ",", "'utf-8'", ")", ")", ".", "encode", "(...
34
16.857143
def from_ligolw_table(cls, table, columns=None, cast_to_dtypes=None): """Converts the given ligolw table into an FieldArray. The `tableName` attribute is copied to the array's `name`. Parameters ---------- table : LIGOLw table instance The table to convert. columns : {None|list} Optionally specify a list of columns to retrieve. All of the columns must be in the table's validcolumns attribute. If None provided, all the columns in the table will be converted. dtype : {None | dict} Override the columns' dtypes using the given dictionary. The dictionary should be keyed by the column names, with the values a tuple that can be understood by numpy.dtype. For example, to cast a ligolw column called "foo" to a field called "bar" with type float, cast_to_dtypes would be: ``{"foo": ("bar", float)}``. Returns ------- array : FieldArray The input table as an FieldArray. """ name = table.tableName.split(':')[0] if columns is None: # get all the columns columns = table.validcolumns else: # note: this will raise a KeyError if one or more columns is # not in the table's validcolumns new_columns = {} for col in columns: new_columns[col] = table.validcolumns[col] columns = new_columns if cast_to_dtypes is not None: dtype = [cast_to_dtypes[col] for col in columns] else: dtype = columns.items() # get the values if _default_types_status['ilwd_as_int']: input_array = \ [tuple(getattr(row, col) if dt != 'ilwd:char' else int(getattr(row, col)) for col,dt in columns.items()) for row in table] else: input_array = \ [tuple(getattr(row, col) for col in columns) for row in table] # return the values as an instance of cls return cls.from_records(input_array, dtype=dtype, name=name)
[ "def", "from_ligolw_table", "(", "cls", ",", "table", ",", "columns", "=", "None", ",", "cast_to_dtypes", "=", "None", ")", ":", "name", "=", "table", ".", "tableName", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "columns", "is", "None", ":",...
41.788462
17.846154
def send(self, host_message_class, *args): """Send a host message. :param type host_message_class: a subclass of :class:`AYABImterface.communication.host_messages.Message` :param args: additional arguments that shall be passed to the :paramref:`host_message_class` as arguments """ message = host_message_class(self._file, self, *args) with self.lock: message.send() for callable in self._on_message: callable(message)
[ "def", "send", "(", "self", ",", "host_message_class", ",", "*", "args", ")", ":", "message", "=", "host_message_class", "(", "self", ".", "_file", ",", "self", ",", "*", "args", ")", "with", "self", ".", "lock", ":", "message", ".", "send", "(", ")"...
39.615385
14.538462
def extract_iface_name_from_path(path, name): """ Extract the 'real' interface name from the path name. Basically this puts the '@' back in the name in place of the underscore, where the name contains a '.' or contains 'macvtap' or 'macvlan'. Examples: +------------------+-----------------+ | real name | path name | +==================+=================+ | bond0.104\@bond0 | bond0.104_bond0 | +------------------+-----------------+ | __tmp1111 | __tmp1111 | +------------------+-----------------+ | macvtap\@bond0 | macvlan_bond0 | +------------------+-----------------+ | prod_bond | prod_bond | +------------------+-----------------+ """ if name in path: ifname = os.path.basename(path).split("_", 2)[-1].strip() if "." in ifname or "macvtap" in ifname or "macvlan" in ifname: ifname = ifname.replace("_", "@") return ifname
[ "def", "extract_iface_name_from_path", "(", "path", ",", "name", ")", ":", "if", "name", "in", "path", ":", "ifname", "=", "os", ".", "path", ".", "basename", "(", "path", ")", ".", "split", "(", "\"_\"", ",", "2", ")", "[", "-", "1", "]", ".", "...
38.36
12.68
def delete_webhook(self, policy, webhook): """ Deletes the specified webhook from the specified policy. """ return self.manager.delete_webhook(self, policy, webhook)
[ "def", "delete_webhook", "(", "self", ",", "policy", ",", "webhook", ")", ":", "return", "self", ".", "manager", ".", "delete_webhook", "(", "self", ",", "policy", ",", "webhook", ")" ]
38.6
10.2
def init(self): """ Initialize a new password db store """ self.y = {"version": int(time.time())} recipient_email = raw_input("Enter Email ID: ") self.import_key(emailid=recipient_email) self.encrypt(emailid_list=[recipient_email])
[ "def", "init", "(", "self", ")", ":", "self", ".", "y", "=", "{", "\"version\"", ":", "int", "(", "time", ".", "time", "(", ")", ")", "}", "recipient_email", "=", "raw_input", "(", "\"Enter Email ID: \"", ")", "self", ".", "import_key", "(", "emailid",...
35
8.5
def create_build_context(self, variant, build_type, build_path): """Create a context to build the variant within.""" request = variant.get_requires(build_requires=True, private_build_requires=True) req_strs = map(str, request) quoted_req_strs = map(quote, req_strs) self._print("Resolving build environment: %s", ' '.join(quoted_req_strs)) if build_type == BuildType.local: packages_path = self.package.config.packages_path else: packages_path = self.package.config.nonlocal_packages_path if self.package.config.is_overridden("package_filter"): from rez.package_filter import PackageFilterList data = self.package.config.package_filter package_filter = PackageFilterList.from_pod(data) else: package_filter = None context = ResolvedContext(request, package_paths=packages_path, package_filter=package_filter, building=True) if self.verbose: context.print_info() # save context before possible fail, so user can debug rxt_filepath = os.path.join(build_path, "build.rxt") context.save(rxt_filepath) if context.status != ResolverStatus.solved: raise BuildContextResolveError(context) return context, rxt_filepath
[ "def", "create_build_context", "(", "self", ",", "variant", ",", "build_type", ",", "build_path", ")", ":", "request", "=", "variant", ".", "get_requires", "(", "build_requires", "=", "True", ",", "private_build_requires", "=", "True", ")", "req_strs", "=", "m...
40.166667
20.694444
def _normalize_histogram2d(self, counts, type): """Normalize the values of the counts for a 2D histogram. This normalizes the values of a numpy array to the range 0-255. :param counts: a NumPy array which is to be rescaled. :param type: either 'bw' or 'reverse_bw'. """ counts = (255 * (counts - np.nanmin(counts)) / (np.nanmax(counts) - np.nanmin(counts))) if type == 'reverse_bw': counts = 255 - counts return counts.astype(np.uint8)
[ "def", "_normalize_histogram2d", "(", "self", ",", "counts", ",", "type", ")", ":", "counts", "=", "(", "255", "*", "(", "counts", "-", "np", ".", "nanmin", "(", "counts", ")", ")", "/", "(", "np", ".", "nanmax", "(", "counts", ")", "-", "np", "....
32.4375
19.8125
def parsesamplesheet(self): """Parses the sample sheet (SampleSheet.csv) to determine certain values important for the creation of the assembly report""" # Open the sample sheet with open(self.samplesheet, "r") as samplesheet: # Iterate through the sample sheet samples, prev, header = False, 0, [] for count, line in enumerate(samplesheet): # Remove new lines, and split on commas # line = line.decode('utf-8') # Turn from bytes to string, since python3 is finicky. data = line.rstrip().split(",") if any(data): if "[Settings]" in line: samples = False if not line.startswith("[") and not samples and not data == ['']: # Grab an data not in the [Data] Section setattr(self.header, data[0].replace(" ", ""), "".join(data[1:])) elif "[Data]" in line or "[Reads]" in line: samples = True elif samples and "Sample_ID" in line: header.extend([x.replace("_", "").replace(' ', "") for x in data]) prev = count elif header: # Try and replicate the Illumina rules to create file names from "Sample_Name" samplename = samplenamer(data) # Create an object for storing nested static variables strainmetadata = MetadataObject() # Set the sample name in the object strainmetadata.name = samplename # Add the header object to strainmetadata # strainmetadata.__setattr__("run", GenObject(dict(self.header))) strainmetadata.run = GenObject(copy.copy(self.header.datastore)) # Create the run object, so it will be easier to populate the object (eg run.SampleName = ... # instead of strainmetadata.run.SampleName = ... run = strainmetadata.run # Capture Sample_ID, Sample_Name, I7_Index_ID, index1, I5_Index_ID, index2, Sample_Project for idx, item in enumerate(data): setattr(run, header[idx], item) if item else setattr(run, header[idx], "NA") # Add the sample number run.SampleNumber = count - prev # Create the 'General' category for strainmetadata strainmetadata.general = GenObject({'outputdirectory': os.path.join(self.path, samplename), 'pipelinecommit': self.commit}) strainmetadata.general.logout = os.path.join(self.path, samplename, '{}_log_out.txt'.format(samplename)) strainmetadata.general.logerr = os.path.join(self.path, samplename, '{}_log_err.txt'.format(samplename)) # Add the output directory to the general category # Append the strainmetadata object to a list self.samples.append(strainmetadata) elif samples: setattr(self.header, 'forwardlength', data[0]) \ if 'forwardlength' not in self.header.datastore else \ setattr(self.header, 'reverselength', data[0]) self.totalreads += int(data[0]) self.date = self.header.Date if "Date" in self.header.datastore else self.date for sample in self.samples: if 'InvestigatorName' not in sample.run.datastore: sample.run.InvestigatorName = 'NA'
[ "def", "parsesamplesheet", "(", "self", ")", ":", "# Open the sample sheet", "with", "open", "(", "self", ".", "samplesheet", ",", "\"r\"", ")", "as", "samplesheet", ":", "# Iterate through the sample sheet", "samples", ",", "prev", ",", "header", "=", "False", ...
67.033898
28.050847
def debug_query(self, sql: str, *args) -> None: """Executes SQL and writes the result to the log.""" rows = self.fetchall(sql, *args) debug_query_result(rows)
[ "def", "debug_query", "(", "self", ",", "sql", ":", "str", ",", "*", "args", ")", "->", "None", ":", "rows", "=", "self", ".", "fetchall", "(", "sql", ",", "*", "args", ")", "debug_query_result", "(", "rows", ")" ]
44.75
3.75
def max(x, y, context=None): """ Return the maximum of x and y. If x and y are both NaN, return NaN. If exactly one of x and y is NaN, return the non-NaN value. If x and y are zeros of different signs, return +0. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_max, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
[ "def", "max", "(", "x", ",", "y", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_max", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", "BigFloat", ".", "...
24.611111
20.722222
def printdata(self) -> None: """ Prints data to stdout """ np.set_printoptions(threshold=np.nan) print(self.data) np.set_printoptions(threshold=1000)
[ "def", "printdata", "(", "self", ")", "->", "None", ":", "np", ".", "set_printoptions", "(", "threshold", "=", "np", ".", "nan", ")", "print", "(", "self", ".", "data", ")", "np", ".", "set_printoptions", "(", "threshold", "=", "1000", ")" ]
35.4
7.2
def _fluent_params(self, fluents, ordering) -> FluentParamsList: '''Returns the instantiated `fluents` for the given `ordering`. For each fluent in `fluents`, it instantiates each parameter type w.r.t. the contents of the object table. Returns: Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name and a list of instantiated fluents represented as strings. ''' variables = [] for fluent_id in ordering: fluent = fluents[fluent_id] param_types = fluent.param_types objects = () names = [] if param_types is None: names = [fluent.name] else: objects = tuple(self.object_table[ptype]['objects'] for ptype in param_types) for values in itertools.product(*objects): values = ','.join(values) var_name = '{}({})'.format(fluent.name, values) names.append(var_name) variables.append((fluent_id, names)) return tuple(variables)
[ "def", "_fluent_params", "(", "self", ",", "fluents", ",", "ordering", ")", "->", "FluentParamsList", ":", "variables", "=", "[", "]", "for", "fluent_id", "in", "ordering", ":", "fluent", "=", "fluents", "[", "fluent_id", "]", "param_types", "=", "fluent", ...
41.807692
19.653846
def create_blueprint(self): """ Create blueprint and register rules :return: Blueprint of the current nemo app :rtype: flask.Blueprint """ self.register_plugins() self.blueprint = Blueprint( self.name, "nemo", url_prefix=self.prefix, template_folder=self.template_folder, static_folder=self.static_folder, static_url_path=self.static_url_path ) for url, name, methods, instance in self._urls: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance), methods=methods ) for url, name, methods, instance in self._semantic_url: self.blueprint.add_url_rule( url, view_func=self.view_maker(name, instance), endpoint=_plugin_endpoint_rename(name, instance)+"_semantic", methods=methods ) self.register_assets() self.register_filters() # We extend the loading list by the instance value self.__templates_namespaces__.extend(self.__instance_templates__) # We generate a template loader for namespace, directory in self.__templates_namespaces__[::-1]: if namespace not in self.__template_loader__: self.__template_loader__[namespace] = [] self.__template_loader__[namespace].append( jinja2.FileSystemLoader(op.abspath(directory)) ) self.blueprint.jinja_loader = jinja2.PrefixLoader( {namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()}, "::" ) if self.cache is not None: for func, instance in self.cached: setattr(instance, func.__name__, self.cache.memoize()(func)) return self.blueprint
[ "def", "create_blueprint", "(", "self", ")", ":", "self", ".", "register_plugins", "(", ")", "self", ".", "blueprint", "=", "Blueprint", "(", "self", ".", "name", ",", "\"nemo\"", ",", "url_prefix", "=", "self", ".", "prefix", ",", "template_folder", "=", ...
35.745455
20.218182
def svdd(self, data: ['SASdata', str] = None, code: str = None, id: str = None, input: [str, list, dict] = None, kernel: str = None, savestate: str = None, solver: str = None, weight: str = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the SVDD procedure Documentation link: https://go.documentation.sas.com/?docsetId=casml&docsetTarget=casml_svdd_toc.htm&docsetVersion=8.3&locale=en :param data: SASdata object or string. This parameter is required. :parm code: The code variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm kernel: The kernel variable can only be a string type. :parm savestate: The savestate variable can only be a string type. :parm solver: The solver variable can only be a string type. :parm weight: The weight variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "svdd", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "code", ":", "str", "=", "None", ",", "id", ":", "str", "=", "None", ",", "input", ":", "[", "str", ",", "list", ",", "dict", "]", "=", "None",...
51.965517
24.724138
def lparse(inlist, delim, nmax): """ Parse a list of items delimited by a single character. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lparse_c.html :param inlist: list of items delimited by delim. :type inlist: list :param delim: Single character used to delimit items. :type delim: str :param nmax: Maximum number of items to return. :type nmax: int :return: Items in the list, left justified. :rtype: list of str """ delim = stypes.stringToCharP(delim) lenout = ctypes.c_int(len(inlist)) inlist = stypes.stringToCharP(inlist) nmax = ctypes.c_int(nmax) items = stypes.emptyCharArray(lenout, nmax) n = ctypes.c_int() libspice.lparse_c(inlist, delim, nmax, lenout, ctypes.byref(n), ctypes.byref(items)) return [stypes.toPythonString(x.value) for x in items[0:n.value]]
[ "def", "lparse", "(", "inlist", ",", "delim", ",", "nmax", ")", ":", "delim", "=", "stypes", ".", "stringToCharP", "(", "delim", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "len", "(", "inlist", ")", ")", "inlist", "=", "stypes", ".", "stringToC...
35.916667
15
def earthquake_contour_preprocessor(impact_function): """Preprocessor to create contour from an earthquake :param impact_function: Impact function to run. :type impact_function: ImpactFunction :return: The contour layer. :rtype: QgsMapLayer """ contour_path = create_smooth_contour(impact_function.hazard) if os.path.exists(contour_path): from safe.gis.tools import load_layer return load_layer(contour_path, tr('Contour'), 'ogr')[0]
[ "def", "earthquake_contour_preprocessor", "(", "impact_function", ")", ":", "contour_path", "=", "create_smooth_contour", "(", "impact_function", ".", "hazard", ")", "if", "os", ".", "path", ".", "exists", "(", "contour_path", ")", ":", "from", "safe", ".", "gis...
33.642857
16.285714
def models(self): """Return self.application models.""" Model_ = self.app.config['PEEWEE_MODELS_CLASS'] ignore = self.app.config['PEEWEE_MODELS_IGNORE'] models = [] if Model_ is not Model: try: mod = import_module(self.app.config['PEEWEE_MODELS_MODULE']) for model in dir(mod): models = getattr(mod, model) if not isinstance(model, pw.Model): continue models.append(models) except ImportError: return models elif isinstance(Model_, BaseSignalModel): models = BaseSignalModel.models return [m for m in models if m._meta.name not in ignore]
[ "def", "models", "(", "self", ")", ":", "Model_", "=", "self", ".", "app", ".", "config", "[", "'PEEWEE_MODELS_CLASS'", "]", "ignore", "=", "self", ".", "app", ".", "config", "[", "'PEEWEE_MODELS_IGNORE'", "]", "models", "=", "[", "]", "if", "Model_", ...
37.25
15.7
def simxGetObjectHandle(clientID, objectName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' handle = ct.c_int() if (sys.version_info[0] == 3) and (type(objectName) is str): objectName=objectName.encode('utf-8') return c_GetObjectHandle(clientID, objectName, ct.byref(handle), operationMode), handle.value
[ "def", "simxGetObjectHandle", "(", "clientID", ",", "objectName", ",", "operationMode", ")", ":", "handle", "=", "ct", ".", "c_int", "(", ")", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "objectName", ...
49.125
29.875
def _get_token(self, regex=None): """ Consumes the next token in the token stream. `regex` Validate against the specified `re.compile()` regex instance. Returns token string. * Raises a ``ParseError`` exception if stream is empty or regex match fails. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if regex and not regex.match(token): pattern = u"Unexpected format in token '{0}' on line {1}" token_val = common.from_utf8(token.strip()) raise ParseError(pattern.format(token_val, line_no)) return token
[ "def", "_get_token", "(", "self", ",", "regex", "=", "None", ")", ":", "item", "=", "self", ".", "_lexer", ".", "get_token", "(", ")", "if", "not", "item", ":", "raise", "ParseError", "(", "u'Unexpected end of file'", ")", "else", ":", "line_no", ",", ...
31.875
22.333333
def decode_buffer(buffer: dict) -> np.ndarray: """ Translate a DataBuffer into a numpy array. :param buffer: Dictionary with 'data' byte array, 'dtype', and 'shape' fields :return: NumPy array of decoded data """ buf = np.frombuffer(buffer['data'], dtype=buffer['dtype']) return buf.reshape(buffer['shape'])
[ "def", "decode_buffer", "(", "buffer", ":", "dict", ")", "->", "np", ".", "ndarray", ":", "buf", "=", "np", ".", "frombuffer", "(", "buffer", "[", "'data'", "]", ",", "dtype", "=", "buffer", "[", "'dtype'", "]", ")", "return", "buf", ".", "reshape", ...
36.444444
12.888889
def opHaltStatus(symbol=None, token='', version=''): '''The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message. IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities. In the spin, IEX will send out an Operational Halt Message with “N” (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session. If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session. After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security. https://iexcloud.io/docs/api/#deep-operational-halt-status Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if symbol: return _getJson('deep/op-halt-status?symbols=' + symbol, token, version) return _getJson('deep/op-halt-status', token, version)
[ "def", "opHaltStatus", "(", "symbol", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "if", "symbol", ":", "return", "_getJson", "(", "'deep/op-halt-status?symbols='", "+", "symbol", ",", "to...
56.869565
48.086957
def get_image(self): """ Gets first image from post set. """ posts_with_images = self.post_set.filter(image__gt='') if posts_with_images: return posts_with_images[0].image
[ "def", "get_image", "(", "self", ")", ":", "posts_with_images", "=", "self", ".", "post_set", ".", "filter", "(", "image__gt", "=", "''", ")", "if", "posts_with_images", ":", "return", "posts_with_images", "[", "0", "]", ".", "image" ]
31.857143
8.142857
def maximum(lhs, rhs): """Returns element-wise maximum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise maximum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.maximum(x, 2).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> mx.nd.maximum(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.maximum(y, z).asnumpy() array([[ 0., 1.], [ 1., 1.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_maximum, lambda x, y: x if x > y else y, _internal._maximum_scalar, None)
[ "def", "maximum", "(", "lhs", ",", "rhs", ")", ":", "# pylint: disable= no-member, protected-access", "return", "_ufunc_helper", "(", "lhs", ",", "rhs", ",", "op", ".", "broadcast_maximum", ",", "lambda", "x", ",", "y", ":", "x", "if", "x", ">", "y", "else...
28.777778
17.537037
def _scale(self, mode): """ Returns value scaling coefficient for the given mode. """ if mode in self._mode_scale: scale = self._mode_scale[mode] else: scale = 10**(-self.decimals) self._mode_scale[mode] = scale return scale
[ "def", "_scale", "(", "self", ",", "mode", ")", ":", "if", "mode", "in", "self", ".", "_mode_scale", ":", "scale", "=", "self", ".", "_mode_scale", "[", "mode", "]", "else", ":", "scale", "=", "10", "**", "(", "-", "self", ".", "decimals", ")", "...
27.181818
12.090909
def getDataHandler(self, measurementId, deviceId): """ finds the handler. :param measurementId: the measurement :param deviceId: the device. :return: active measurement and handler """ am = next((m for m in self.activeMeasurements if m.id == measurementId), None) if am is None: return None, None else: device = self.deviceController.getDevice(deviceId) if device is None: return None, None else: return am, device.dataHandler
[ "def", "getDataHandler", "(", "self", ",", "measurementId", ",", "deviceId", ")", ":", "am", "=", "next", "(", "(", "m", "for", "m", "in", "self", ".", "activeMeasurements", "if", "m", ".", "id", "==", "measurementId", ")", ",", "None", ")", "if", "a...
35.1875
13.0625
def fifo_async(wrst, rrst, wclk, rclk, wfull, we, wdata, rempty, re, rdata, depth=None, width=None): ''' Asynchronous FIFO Implements the design described in: Clifford E. Cummings, "Simulation and Synthesis Techniques for Asynchronous FIFO Design," SNUG 2002 (Synopsys Users Group Conference, San Jose, CA, 2002) User Papers, March 2002, Section TB2, 2nd paper. Also available at www.sunburst-design.com/papers Write side interface: wrst - reset wclk - clock wfull - full flag, immediate set on 'write', delayed clear on 'read' due to clock domain synchronization we - write enable wdata - write data Read side interface rrst - reset cclk - clock rempty - empty flag, immediate set on 'read', delayed clear on 'write' due to clock domain synchronization re - read enable rdata - read data Parameters depth - fifo depth. If not set, default 4 is used. Must be >=4. Must be power of 2 width - data width. If not set, data with equals len(wdata). Can be [0,1,2,3...) It is possible to instantiate a fifo with data width 0 (no data) if width=0 or width=None and wdata=None ''' if (width == None): width = 0 if wdata is not None: width = len(wdata) if (depth == None): depth = 4 assert depth >= 4, "Fifo_async parameter 'depth' must be >= 4 , detected depth={}".format(depth) assert (depth & (depth-1)) == 0, "Fifo_async parameter 'depth' must be 2**n, detected depth={}".format(depth) full_flg = Signal(bool(1)) empty_flg = Signal(bool(1)) full_val = Signal(bool(1)) empty_val = Signal(bool(1)) we_safe = Signal(bool(0)) re_safe = Signal(bool(0)) rd_ptr = Signal(intbv(0, min=0, max=depth)) wr_ptr = Signal(intbv(0, min=0, max=depth)) WIDTH = len(rd_ptr) rd_ptr_bin = Signal(intbv(0)[WIDTH+1:]) rd_ptr_bin_new = Signal(intbv(0)[WIDTH+1:]) rd_ptr_gray = Signal(intbv(0)[WIDTH+1:]) rd_ptr_gray_new = Signal(intbv(0)[WIDTH+1:]) rd_ptr_gray_sync1 = Signal(intbv(0)[WIDTH+1:]) rd_ptr_gray_sync2 = Signal(intbv(0)[WIDTH+1:]) wr_ptr_bin = Signal(intbv(0)[WIDTH+1:]) wr_ptr_bin_new = Signal(intbv(0)[WIDTH+1:]) wr_ptr_gray = Signal(intbv(0)[WIDTH+1:]) wr_ptr_gray_new = Signal(intbv(0)[WIDTH+1:]) wr_ptr_gray_sync1 = Signal(intbv(0)[WIDTH+1:]) wr_ptr_gray_sync2 = Signal(intbv(0)[WIDTH+1:]) @always_comb def safe_read_write(): wfull.next = full_flg rempty.next = empty_flg we_safe.next = we and not full_flg re_safe.next = re and not empty_flg @always(wclk.posedge) def sync_r2w(): ''' Read-domain to write-domain synchronizer ''' if (wrst): rd_ptr_gray_sync1.next = 0 rd_ptr_gray_sync2.next = 0 else: rd_ptr_gray_sync1.next = rd_ptr_gray rd_ptr_gray_sync2.next = rd_ptr_gray_sync1 @always(rclk.posedge) def sync_w2r(): ''' Write-domain to read-domain synchronizer ''' if (rrst): wr_ptr_gray_sync1.next = 0 wr_ptr_gray_sync2.next = 0 else: wr_ptr_gray_sync1.next = wr_ptr_gray wr_ptr_gray_sync2.next = wr_ptr_gray_sync1 @always_comb def bin_comb(): wr_ptr_bin_new.next = wr_ptr_bin rd_ptr_bin_new.next = rd_ptr_bin if (we_safe): wr_ptr_bin_new.next = (wr_ptr_bin + 1) % wr_ptr_bin.max if (re_safe): rd_ptr_bin_new.next = (rd_ptr_bin + 1) % rd_ptr_bin.max @always_comb def gray_comb(): wr_ptr_gray_new.next = (wr_ptr_bin_new >> 1) ^ wr_ptr_bin_new rd_ptr_gray_new.next = (rd_ptr_bin_new >> 1) ^ rd_ptr_bin_new @always_comb def full_empty_comb(): empty_val.next = (rd_ptr_gray_new == wr_ptr_gray_sync2) full_val.next = (wr_ptr_gray_new[WIDTH] != rd_ptr_gray_sync2[WIDTH]) and \ (wr_ptr_gray_new[WIDTH-1] != rd_ptr_gray_sync2[WIDTH-1]) and \ (wr_ptr_gray_new[WIDTH-1:] == rd_ptr_gray_sync2[WIDTH-1:]) @always(wclk.posedge) def wptr_proc(): if (wrst): wr_ptr_bin.next = 0 wr_ptr_gray.next = 0 full_flg.next = 0 else: wr_ptr_bin.next = wr_ptr_bin_new wr_ptr_gray.next = wr_ptr_gray_new full_flg.next = full_val @always(rclk.posedge) def rptr_proc(): if (rrst): rd_ptr_bin.next = 0 rd_ptr_gray.next = 0 empty_flg.next = 1 else: rd_ptr_bin.next = rd_ptr_bin_new rd_ptr_gray.next = rd_ptr_gray_new empty_flg.next = empty_val if width>0: #=========================================================================== # Memory instance #=========================================================================== mem_we = Signal(bool(0)) mem_addrw = Signal(intbv(0, min=0, max=depth)) mem_addrr = Signal(intbv(0, min=0, max=depth)) mem_di = Signal(intbv(0)[width:0]) mem_do = Signal(intbv(0)[width:0]) # RAM: Simple-Dual-Port, Asynchronous read mem = ram_sdp_ar( clk = wclk, we = mem_we, addrw = mem_addrw, addrr = mem_addrr, di = mem_di, do = mem_do ) @always_comb def mem_connect(): mem_we.next = we_safe mem_addrw.next = wr_ptr_bin[WIDTH:] mem_addrr.next = rd_ptr_bin[WIDTH:] mem_di.next = wdata rdata.next = mem_do return instances()
[ "def", "fifo_async", "(", "wrst", ",", "rrst", ",", "wclk", ",", "rclk", ",", "wfull", ",", "we", ",", "wdata", ",", "rempty", ",", "re", ",", "rdata", ",", "depth", "=", "None", ",", "width", "=", "None", ")", ":", "if", "(", "width", "==", "N...
37.067901
21.32716
def m_s(ms2, scale, f, alphasMZ=0.1185, loop=3): r"""Get running s quark mass in the MSbar scheme at the scale `scale` in the theory with `f` dynamical quark flavours starting from $m_s(2 \,\text{GeV})$""" if scale == 2 and f == 3: return ms2 # nothing to do _sane(scale, f) crd = rundec.CRunDec() alphas_2 = alpha_s(2, 3, alphasMZ=alphasMZ, loop=loop) if f == 3: alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop) return crd.mMS2mMS(ms2, alphas_2, alphas_scale, f, loop) elif f == 4: crd.nfMmu.Mth = 1.3 crd.nfMmu.muth = 1.3 crd.nfMmu.nf = 4 return crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, scale, loop) elif f == 5: mc = 1.3 crd.nfMmu.Mth = mc crd.nfMmu.muth = mc crd.nfMmu.nf = 4 msmc = crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, mc, loop) crd.nfMmu.Mth = 4.8 crd.nfMmu.muth = 4.8 crd.nfMmu.nf = 5 alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop) return crd.mL2mH(msmc, alphas_mc, mc, crd.nfMmu, scale, loop) else: raise ValueError("Invalid input: f={}, scale={}".format(f, scale))
[ "def", "m_s", "(", "ms2", ",", "scale", ",", "f", ",", "alphasMZ", "=", "0.1185", ",", "loop", "=", "3", ")", ":", "if", "scale", "==", "2", "and", "f", "==", "3", ":", "return", "ms2", "# nothing to do", "_sane", "(", "scale", ",", "f", ")", "...
39.793103
17.965517
def _iter_descendants_levelorder(self, is_leaf_fn=None): """ Iterate over all desdecendant nodes.""" tovisit = deque([self]) while len(tovisit) > 0: node = tovisit.popleft() yield node if not is_leaf_fn or not is_leaf_fn(node): tovisit.extend(node.children)
[ "def", "_iter_descendants_levelorder", "(", "self", ",", "is_leaf_fn", "=", "None", ")", ":", "tovisit", "=", "deque", "(", "[", "self", "]", ")", "while", "len", "(", "tovisit", ")", ">", "0", ":", "node", "=", "tovisit", ".", "popleft", "(", ")", "...
40.75
9.375
def do_reload(self, args): """Reload a module in to the framework""" if args.module is not None: if args.module not in self.frmwk.modules: self.print_error('Invalid Module Selected.') return module = self.frmwk.modules[args.module] elif self.frmwk.current_module: module = self.frmwk.current_module else: self.print_error('Must \'use\' module first') return self.reload_module(module)
[ "def", "do_reload", "(", "self", ",", "args", ")", ":", "if", "args", ".", "module", "is", "not", "None", ":", "if", "args", ".", "module", "not", "in", "self", ".", "frmwk", ".", "modules", ":", "self", ".", "print_error", "(", "'Invalid Module Select...
31.153846
12.615385
def _post_query(self, **query_dict): """Perform a POST query against Solr and return the response as a Python dict.""" param_dict = query_dict.copy() return self._send_query(do_post=True, **param_dict)
[ "def", "_post_query", "(", "self", ",", "*", "*", "query_dict", ")", ":", "param_dict", "=", "query_dict", ".", "copy", "(", ")", "return", "self", ".", "_send_query", "(", "do_post", "=", "True", ",", "*", "*", "param_dict", ")" ]
45.8
5
def array_violations(array, events, slots, beta=None): """Take a schedule in array form and return any violated constraints Parameters ---------- array : np.array a schedule in array form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances constraints : list or tuple of generator functions which each produce instances of resources.Constraint Returns ------- Generator of a list of strings indicating the nature of the violated constraints """ return ( c.label for c in constraints.all_constraints(events, slots, array, beta=beta) if not c.condition )
[ "def", "array_violations", "(", "array", ",", "events", ",", "slots", ",", "beta", "=", "None", ")", ":", "return", "(", "c", ".", "label", "for", "c", "in", "constraints", ".", "all_constraints", "(", "events", ",", "slots", ",", "array", ",", "beta",...
29.192308
19
def deploy_api_gateway( self, api_id, stage_name, stage_description="", description="", cache_cluster_enabled=False, cache_cluster_size='0.5', variables=None, cloudwatch_log_level='OFF', cloudwatch_data_trace=False, cloudwatch_metrics_enabled=False, cache_cluster_ttl=300, cache_cluster_encrypted=False ): """ Deploy the API Gateway! Return the deployed API URL. """ print("Deploying API Gateway..") self.apigateway_client.create_deployment( restApiId=api_id, stageName=stage_name, stageDescription=stage_description, description=description, cacheClusterEnabled=cache_cluster_enabled, cacheClusterSize=cache_cluster_size, variables=variables or {} ) if cloudwatch_log_level not in self.cloudwatch_log_levels: cloudwatch_log_level = 'OFF' self.apigateway_client.update_stage( restApiId=api_id, stageName=stage_name, patchOperations=[ self.get_patch_op('logging/loglevel', cloudwatch_log_level), self.get_patch_op('logging/dataTrace', cloudwatch_data_trace), self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled), self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)), self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted) ] ) return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
[ "def", "deploy_api_gateway", "(", "self", ",", "api_id", ",", "stage_name", ",", "stage_description", "=", "\"\"", ",", "description", "=", "\"\"", ",", "cache_cluster_enabled", "=", "False", ",", "cache_cluster_size", "=", "'0.5'", ",", "variables", "=", "None"...
40.489362
18.191489
def capture_update_from_model(cls, table_name, record_id, *, update_fields=()): """ Create a fresh update record from the current model state in the database. For read-write connected models, this will lead to the attempted update of the values of a corresponding object in Salesforce. Args: table_name (str): The name of the table backing the connected model (without schema) record_id (int): The primary id of the connected model update_fields (Iterable[str]): If given, the names of fields that will be included in the write record Returns: A list of the created TriggerLog entries (usually one). Raises: LookupError: if ``table_name`` does not belong to a connected model """ include_cols = () if update_fields: model_cls = get_connected_model_for_table_name(table_name) include_cols = cls._fieldnames_to_colnames(model_cls, update_fields) raw_query = sql.SQL(""" SELECT {schema}.hc_capture_update_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """).format( schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA), table_name=sql.Identifier(table_name), include_cols=sql.SQL(', ').join(sql.Identifier(col) for col in include_cols), ) params = {'record_id': record_id, 'table_name': table_name} result_qs = TriggerLog.objects.raw(raw_query, params) return list(result_qs)
[ "def", "capture_update_from_model", "(", "cls", ",", "table_name", ",", "record_id", ",", "*", ",", "update_fields", "=", "(", ")", ")", ":", "include_cols", "=", "(", ")", "if", "update_fields", ":", "model_cls", "=", "get_connected_model_for_table_name", "(", ...
43.5
25.325
def tree(self): """ :rtype: cmdtree.tree.CmdTree """ from cmdtree.tree import CmdTree if self._tree is None: self._tree = CmdTree() return self._tree
[ "def", "tree", "(", "self", ")", ":", "from", "cmdtree", ".", "tree", "import", "CmdTree", "if", "self", ".", "_tree", "is", "None", ":", "self", ".", "_tree", "=", "CmdTree", "(", ")", "return", "self", ".", "_tree" ]
25.25
7.5
def begin_span(self, name, span_type, context=None, leaf=False, tags=None): """ Begin a new span :param name: name of the span :param span_type: type of the span :param context: a context dict :param leaf: True if this is a leaf span :param tags: a flat string/string dict of tags :return: the Span object """ return self._begin_span(name, span_type, context=context, leaf=leaf, tags=tags, parent_span_id=None)
[ "def", "begin_span", "(", "self", ",", "name", ",", "span_type", ",", "context", "=", "None", ",", "leaf", "=", "False", ",", "tags", "=", "None", ")", ":", "return", "self", ".", "_begin_span", "(", "name", ",", "span_type", ",", "context", "=", "co...
43.636364
14.181818
def ApprovalSymlinkUrnBuilder(approval_type, subject_id, user, approval_id): """Build an approval symlink URN.""" return aff4.ROOT_URN.Add("users").Add(user).Add("approvals").Add( approval_type).Add(subject_id).Add(approval_id)
[ "def", "ApprovalSymlinkUrnBuilder", "(", "approval_type", ",", "subject_id", ",", "user", ",", "approval_id", ")", ":", "return", "aff4", ".", "ROOT_URN", ".", "Add", "(", "\"users\"", ")", ".", "Add", "(", "user", ")", ".", "Add", "(", "\"approvals\"", ")...
60
20
def _return_rows(self, table, cols, values, return_type): """Return fetched rows in the desired type.""" if return_type is dict: # Pack each row into a dictionary cols = self.get_columns(table) if cols is '*' else cols if len(values) > 0 and isinstance(values[0], (set, list, tuple)): return [dict(zip(cols, row)) for row in values] else: return dict(zip(cols, values)) elif return_type is tuple: return [tuple(row) for row in values] else: return values
[ "def", "_return_rows", "(", "self", ",", "table", ",", "cols", ",", "values", ",", "return_type", ")", ":", "if", "return_type", "is", "dict", ":", "# Pack each row into a dictionary", "cols", "=", "self", ".", "get_columns", "(", "table", ")", "if", "cols",...
44.461538
15.692308
def push_url(interface): ''' Decorates a function returning the url of translation API. Creates and maintains HTTP connection state Returns a dict response object from the server containing the translated text and metadata of the request body :param interface: Callable Request Interface :type interface: Function ''' @functools.wraps(interface) def connection(*args, **kwargs): """ Extends and wraps a HTTP interface. :return: Response Content :rtype: Dictionary """ session = Session() session.mount('http://', HTTPAdapter(max_retries=2)) session.mount('https://', HTTPAdapter(max_retries=2)) request = Request(**interface(*args, **kwargs)) prepare = session.prepare_request(request) response = session.send(prepare, verify=True) if response.status_code != requests.codes.ok: response.raise_for_status() cleanup = re.subn(r',(?=,)', '', response.content.decode('utf-8'))[0] return json.loads(cleanup.replace(r'\xA0', r' ').replace('[,', '[1,'), encoding='UTF-8') return connection
[ "def", "push_url", "(", "interface", ")", ":", "@", "functools", ".", "wraps", "(", "interface", ")", "def", "connection", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Extends and wraps a HTTP interface.\n\n :return: Response Content\n ...
31.305556
21.805556
def remove(self, steamid): """ Remove a friend :param steamid: their steamid :type steamid: :class:`int`, :class:`.SteamID`, :class:`.SteamUser` """ if isinstance(steamid, SteamUser): steamid = steamid.steam_id self._steam.send(MsgProto(EMsg.ClientRemoveFriend), {'friendid': steamid})
[ "def", "remove", "(", "self", ",", "steamid", ")", ":", "if", "isinstance", "(", "steamid", ",", "SteamUser", ")", ":", "steamid", "=", "steamid", ".", "steam_id", "self", ".", "_steam", ".", "send", "(", "MsgProto", "(", "EMsg", ".", "ClientRemoveFriend...
31.363636
17.727273
def find_mature(x, y, win=10): """ Window apprach to find hills in the expression profile """ previous = min(y) peaks = [] intervals = range(x, y, win) for pos in intervals: if y[pos] > previous * 10: previous = y[pos] peaks.add(pos) peaks = _summarize_peaks(peaks)
[ "def", "find_mature", "(", "x", ",", "y", ",", "win", "=", "10", ")", ":", "previous", "=", "min", "(", "y", ")", "peaks", "=", "[", "]", "intervals", "=", "range", "(", "x", ",", "y", ",", "win", ")", "for", "pos", "in", "intervals", ":", "i...
26.5
11
def init_app(self, app): """ Initialize the application and register the blueprint :param app: Flask Application :return: Blueprint of the current nemo app :rtype: flask.Blueprint """ self.app = app self.__blueprint__ = Blueprint( self.__name__, self.__name__, url_prefix=self.__prefix__, ) for url, name, methods in self.__urls__: self.blueprint.add_url_rule( url, view_func=getattr(self, name), endpoint=name.replace("r_", ""), methods=methods ) self.app = self.app.register_blueprint(self.blueprint) return self.blueprint
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "app", "=", "app", "self", ".", "__blueprint__", "=", "Blueprint", "(", "self", ".", "__name__", ",", "self", ".", "__name__", ",", "url_prefix", "=", "self", ".", "__prefix__", ",", ...
28.64
15.52
def simple_lesk(context_sentence: str, ambiguous_word: str, pos: str = None, lemma=True, stem=False, hyperhypo=True, stop=True, context_is_lemmatized=False, nbest=False, keepscore=False, normalizescore=False, from_cache=True) -> "wn.Synset": """ Simple Lesk is somewhere in between using more than the original Lesk algorithm (1986) and using less signature words than adapted Lesk (Banerjee and Pederson, 2002) :param context_sentence: String, sentence or document. :param ambiguous_word: String, a single word. :param pos: String, one of 'a', 'r', 's', 'n', 'v', or None. :return: A Synset for the estimated best sense. """ # Ensure that ambiguous word is a lemma. ambiguous_word = lemmatize(ambiguous_word, pos=pos) # If ambiguous word not in WordNet return None if not wn.synsets(ambiguous_word): return None # Get the signatures for each synset. ss_sign = simple_signatures(ambiguous_word, pos, lemma, stem, hyperhypo, stop, from_cache=from_cache) # Disambiguate the sense in context. context_sentence = context_sentence.split() if context_is_lemmatized else lemmatize_sentence(context_sentence) return compare_overlaps(context_sentence, ss_sign, nbest=nbest, keepscore=keepscore, normalizescore=normalizescore)
[ "def", "simple_lesk", "(", "context_sentence", ":", "str", ",", "ambiguous_word", ":", "str", ",", "pos", ":", "str", "=", "None", ",", "lemma", "=", "True", ",", "stem", "=", "False", ",", "hyperhypo", "=", "True", ",", "stop", "=", "True", ",", "co...
51.666667
18.777778
def boundary_maximum_division(graph, xxx_todo_changeme5): r""" Boundary term processing adjacent voxels maximum value using a division relationship. An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. The same as `boundary_difference_division`, but working on the gradient image instead of the original. See there for details. Parameters ---------- graph : GCGraph The graph to add the weights to. gradient_image : ndarray The gradient image. sigma : float The sigma parameter to use in the boundary term. spacing : sequence of float or False A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. Notes ----- This function requires the gradient image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the gradient image. """ (gradient_image, sigma, spacing) = xxx_todo_changeme5 gradient_image = scipy.asarray(gradient_image) def boundary_term_division(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply 1 / (1 + x/sigma) intensities /= sigma intensities = 1. / (intensities + 1) intensities[intensities <= 0] = sys.float_info.min return intensities __skeleton_difference(graph, gradient_image, boundary_term_division, spacing)
[ "def", "boundary_maximum_division", "(", "graph", ",", "xxx_todo_changeme5", ")", ":", "(", "gradient_image", ",", "sigma", ",", "spacing", ")", "=", "xxx_todo_changeme5", "gradient_image", "=", "scipy", ".", "asarray", "(", "gradient_image", ")", "def", "boundary...
38.837209
22.534884
def RechazarCTG(self, carta_porte, ctg, motivo): "El Destino puede rechazar el CTG a través de la siguiente operatoria" response = self.client.rechazarCTG(request=dict( auth={ 'token': self.Token, 'sign': self.Sign, 'cuitRepresentado': self.Cuit, }, datosRechazarCTG={ 'cartaPorte': carta_porte, 'ctg': ctg, 'motivoRechazo': motivo, }))['response'] datos = response.get('datosResponse') self.__analizar_errores(response) if datos: self.CartaPorte = str(datos['cartaPorte']) self.NumeroCTG = str(datos['CTG']) self.FechaHora = str(datos['fechaHora']) self.CodigoOperacion = str(datos['codigoOperacion'])
[ "def", "RechazarCTG", "(", "self", ",", "carta_porte", ",", "ctg", ",", "motivo", ")", ":", "response", "=", "self", ".", "client", ".", "rechazarCTG", "(", "request", "=", "dict", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'s...
50.705882
14.588235
def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro
[ "def", "_connect", "(", "*", "*", "kwargs", ")", ":", "connargs", "=", "dict", "(", ")", "# Shamelessy ripped from the mysql module", "def", "__connarg", "(", "name", ",", "key", "=", "None", ",", "default", "=", "None", ")", ":", "'''\n Add key to conn...
34.772727
19
def histogram2d(x, y, bins, range, weights=None): """ Compute a 2D histogram assuming equally spaced bins. Parameters ---------- x, y : `~numpy.ndarray` The position of the points to bin in the 2D histogram bins : int or iterable The number of bins in each dimension. If given as an integer, the same number of bins is used for each dimension. range : iterable The range to use in each dimention, as an iterable of value pairs, i.e. [(xmin, xmax), (ymin, ymax)] weights : `~numpy.ndarray` The weights of the points in the 1D histogram Returns ------- array : `~numpy.ndarray` The 2D histogram array """ if isinstance(bins, numbers.Integral): nx = ny = bins else: nx, ny = bins if not np.isscalar(nx) or not np.isscalar(ny): raise TypeError('bins should be an iterable of two integers') (xmin, xmax), (ymin, ymax) = range if not np.isfinite(xmin): raise ValueError("xmin should be finite") if not np.isfinite(xmax): raise ValueError("xmax should be finite") if not np.isfinite(ymin): raise ValueError("ymin should be finite") if not np.isfinite(ymax): raise ValueError("ymax should be finite") if xmax <= xmin: raise ValueError("xmax should be greater than xmin") if ymax <= ymin: raise ValueError("xmax should be greater than xmin") if nx <= 0: raise ValueError("nx should be strictly positive") if ny <= 0: raise ValueError("ny should be strictly positive") if weights is None: return _histogram2d(x, y, nx, xmin, xmax, ny, ymin, ymax) else: return _histogram2d_weighted(x, y, weights, nx, xmin, xmax, ny, ymin, ymax)
[ "def", "histogram2d", "(", "x", ",", "y", ",", "bins", ",", "range", ",", "weights", "=", "None", ")", ":", "if", "isinstance", "(", "bins", ",", "numbers", ".", "Integral", ")", ":", "nx", "=", "ny", "=", "bins", "else", ":", "nx", ",", "ny", ...
28.459016
22.491803
def add_line(preso, x1, y1, x2, y2, width="3pt", color="red"): """ Arrow pointing up to right: context.xml: office:automatic-styles/ <style:style style:name="gr1" style:family="graphic" style:parent-style-name="objectwithoutfill"> <style:graphic-properties draw:marker-end="Arrow" draw:marker-end-width="0.3cm" draw:fill="none" draw:textarea-vertical-align="middle"/> </style:style> 3pt width color red <style:style style:name="gr2" style:family="graphic" style:parent-style-name="objectwithoutfill"> <style:graphic-properties svg:stroke-width="0.106cm" svg:stroke-color="#ed1c24" draw:marker-start-width="0.359cm" draw:marker-end="Arrow" draw:marker-end-width="0.459cm" draw:fill="none" draw:textarea-vertical-align="middle" fo:padding-top="0.178cm" fo:padding-bottom="0.178cm" fo:padding-left="0.303cm" fo:padding-right="0.303cm"/> </style:style> ... office:presentation/draw:page <draw:line draw:style-name="gr1" draw:text-style-name="P2" draw:layer="layout" svg:x1="6.35cm" svg:y1="10.16cm" svg:x2="10.668cm" svg:y2="5.842cm"><text:p/></draw:line> """ marker_end_ratio = .459 / 3 # .459cm/3pt marker_start_ratio = .359 / 3 # .359cm/3pt stroke_ratio = .106 / 3 # .106cm/3pt w = float(width[0:width.index("pt")]) sw = w * stroke_ratio mew = w * marker_end_ratio msw = w * marker_start_ratio attribs = { "svg:stroke-width": "{}cm".format(sw), "svg:stroke-color": color, # "#ed1c24", "draw:marker-start-width": "{}cm".format(msw), "draw:marker-end": "Arrow", "draw:marker-end-width": "{}cm".format(mew), "draw:fill": "none", "draw:textarea-vertical-align": "middle", } style = LineStyle(**attribs) # node = style.style_node() preso.add_style(style) line_attrib = { "draw:style-name": style.name, "draw:layer": "layout", "svg:x1": x1, "svg:y1": y1, "svg:x2": x2, "svg:y2": y2, } line_node = el("draw:line", attrib=line_attrib) preso.slides[-1]._page.append(line_node)
[ "def", "add_line", "(", "preso", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "width", "=", "\"3pt\"", ",", "color", "=", "\"red\"", ")", ":", "marker_end_ratio", "=", ".459", "/", "3", "# .459cm/3pt", "marker_start_ratio", "=", ".359", "/", "3", ...
30.257143
19.028571
def _groups_or_na_fun(regex): """Used in both extract_noexpand and extract_frame""" if regex.groups == 0: raise ValueError("pattern contains no capture groups") empty_row = [np.nan] * regex.groups def f(x): if not isinstance(x, str): return empty_row m = regex.search(x) if m: return [np.nan if item is None else item for item in m.groups()] else: return empty_row return f
[ "def", "_groups_or_na_fun", "(", "regex", ")", ":", "if", "regex", ".", "groups", "==", "0", ":", "raise", "ValueError", "(", "\"pattern contains no capture groups\"", ")", "empty_row", "=", "[", "np", ".", "nan", "]", "*", "regex", ".", "groups", "def", "...
30.4
18.466667
async def CharmInfo(self, url): ''' url : str Returns -> typing.Union[_ForwardRef('CharmActions'), typing.Mapping[str, ~CharmOption], _ForwardRef('CharmMeta'), _ForwardRef('CharmMetrics'), int, str] ''' # map input types to rpc msg _params = dict() msg = dict(type='Charms', request='CharmInfo', version=2, params=_params) _params['url'] = url reply = await self.rpc(msg) return reply
[ "async", "def", "CharmInfo", "(", "self", ",", "url", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Charms'", ",", "request", "=", "'CharmInfo'", ",", "version", "=", "2", ",", "params...
36.285714
20.857143
def pass_multipart(with_completed=False): """Decorate to retrieve an object.""" def decorate(f): @wraps(f) def inner(self, bucket, key, upload_id, *args, **kwargs): obj = MultipartObject.get( bucket, key, upload_id, with_completed=with_completed) if obj is None: abort(404, 'uploadId does not exists.') return f(self, obj, *args, **kwargs) return inner return decorate
[ "def", "pass_multipart", "(", "with_completed", "=", "False", ")", ":", "def", "decorate", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "inner", "(", "self", ",", "bucket", ",", "key", ",", "upload_id", ",", "*", "args", ",", "*", "*", ...
38.416667
14.833333
def render_image(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. *** internal method-- do not use *** """ if self.surface is None: return self.logger.debug("redraw surface") # get window contents as a buffer and load it into the AGG surface rgb_buf = self.viewer.getwin_buffer(order=self.rgb_order, dtype=np.uint8) self.surface.frombytes(rgb_buf)
[ "def", "render_image", "(", "self", ",", "rgbobj", ",", "dst_x", ",", "dst_y", ")", ":", "if", "self", ".", "surface", "is", "None", ":", "return", "self", ".", "logger", ".", "debug", "(", "\"redraw surface\"", ")", "# get window contents as a buffer and load...
40.307692
13.384615
def num_discarded(self): """Get the number of values discarded due to exceeding both limits.""" if not self._data: return 0 n = 0 while n < len(self._data): if not isinstance(self._data[n], _TensorValueDiscarded): break n += 1 return n
[ "def", "num_discarded", "(", "self", ")", ":", "if", "not", "self", ".", "_data", ":", "return", "0", "n", "=", "0", "while", "n", "<", "len", "(", "self", ".", "_data", ")", ":", "if", "not", "isinstance", "(", "self", ".", "_data", "[", "n", ...
27.2
20.6
def migrate(self, expression, name_migration_map=None): """ Migrate an expression created for a different constraint set to self. Returns an expression that can be used with this constraintSet All the foreign variables used in the expression are replaced by variables of this constraint set. If the variable was replaced before the replacement is taken from the provided migration map. The migration mapping is updated with new replacements. :param expression: the potentially foreign expression :param name_migration_map: mapping of already migrated variables. maps from string name of foreign variable to its currently existing migrated string name. this is updated during this migration. :return: a migrated expression where all the variables are local. name_migration_map is updated """ if name_migration_map is None: name_migration_map = {} # name_migration_map -> object_migration_map # Based on the name mapping in name_migration_map build an object to # object mapping to be used in the replacing of variables # inv: object_migration_map's keys should ALWAYS be external/foreign # expressions, and its values should ALWAYS be internal/local expressions object_migration_map = {} #List of foreign vars used in expression foreign_vars = itertools.filterfalse(self.is_declared, get_variables(expression)) for foreign_var in foreign_vars: # If a variable with the same name was previously migrated if foreign_var.name in name_migration_map: migrated_name = name_migration_map[foreign_var.name] native_var = self.get_variable(migrated_name) assert native_var is not None, "name_migration_map contains a variable that does not exist in this ConstraintSet" object_migration_map[foreign_var] = native_var else: # foreign_var was not found in the local declared variables nor # any variable with the same name was previously migrated # let's make a new unique internal name for it migrated_name = foreign_var.name if migrated_name in self._declarations: migrated_name = self._make_unique_name(f'{foreign_var.name}_migrated') # Create and declare a new variable of given type if isinstance(foreign_var, Bool): new_var = self.new_bool(name=migrated_name) elif isinstance(foreign_var, BitVec): new_var = self.new_bitvec(foreign_var.size, name=migrated_name) elif isinstance(foreign_var, Array): # Note that we are discarding the ArrayProxy encapsulation new_var = self.new_array(index_max=foreign_var.index_max, index_bits=foreign_var.index_bits, value_bits=foreign_var.value_bits, name=migrated_name).array else: raise NotImplemented(f"Unknown expression type {type(var)} encountered during expression migration") # Update the var to var mapping object_migration_map[foreign_var] = new_var # Update the name to name mapping name_migration_map[foreign_var.name] = new_var.name # Actually replace each appearance of migrated variables by the new ones migrated_expression = replace(expression, object_migration_map) return migrated_expression
[ "def", "migrate", "(", "self", ",", "expression", ",", "name_migration_map", "=", "None", ")", ":", "if", "name_migration_map", "is", "None", ":", "name_migration_map", "=", "{", "}", "# name_migration_map -> object_migration_map", "# Based on the name mapping in name_m...
60.389831
31.779661
def get_rating_for_user(self, user, ip_address=None, cookies={}): """get_rating_for_user(user, ip_address=None, cookie=None) Returns the rating for a user or anonymous IP.""" kwargs = dict( content_type = self.get_content_type(), object_id = self.instance.pk, key = self.field.key, ) if not (user and user.is_authenticated()): if not ip_address: raise ValueError('``user`` or ``ip_address`` must be present.') kwargs['user__isnull'] = True kwargs['ip_address'] = ip_address else: kwargs['user'] = user use_cookies = (self.field.allow_anonymous and self.field.use_cookies) if use_cookies: # TODO: move 'vote-%d.%d.%s' to settings or something cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest? cookie = cookies.get(cookie_name) if cookie: kwargs['cookie'] = cookie else: kwargs['cookie__isnull'] = True try: rating = Vote.objects.get(**kwargs) return rating.score except Vote.MultipleObjectsReturned: pass except Vote.DoesNotExist: pass return
[ "def", "get_rating_for_user", "(", "self", ",", "user", ",", "ip_address", "=", "None", ",", "cookies", "=", "{", "}", ")", ":", "kwargs", "=", "dict", "(", "content_type", "=", "self", ".", "get_content_type", "(", ")", ",", "object_id", "=", "self", ...
38.055556
18.916667
def vprjp(vin, plane): """ Project a vector onto a specified plane, orthogonally. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vprjp_c.html :param vin: The projected vector. :type vin: 3-Element Array of floats :param plane: Plane containing vin. :type plane: spiceypy.utils.support_types.Plane :return: Vector resulting from projection. :rtype: 3-Element Array of floats """ vin = stypes.toDoubleVector(vin) vout = stypes.emptyDoubleVector(3) libspice.vprjp_c(vin, ctypes.byref(plane), vout) return stypes.cVectorToPython(vout)
[ "def", "vprjp", "(", "vin", ",", "plane", ")", ":", "vin", "=", "stypes", ".", "toDoubleVector", "(", "vin", ")", "vout", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "vprjp_c", "(", "vin", ",", "ctypes", ".", "byref", "("...
34.294118
11.117647
def to_normalized_batch(self): """Convert this unnormalized batch to an instance of Batch. As this method is intended to be called before augmentation, it assumes that none of the ``*_aug`` attributes is yet set. It will produce an AssertionError otherwise. The newly created Batch's ``*_unaug`` attributes will match the ones in this batch, just in normalized form. Returns ------- imgaug.augmentables.batches.Batch The batch, with ``*_unaug`` attributes being normalized. """ assert all([ attr is None for attr_name, attr in self.__dict__.items() if attr_name.endswith("_aug")]), \ "Expected UnnormalizedBatch to not contain any augmented data " \ "before normalization, but at least one '*_aug' attribute was " \ "already set." images_unaug = nlib.normalize_images(self.images_unaug) shapes = None if images_unaug is not None: shapes = [image.shape for image in images_unaug] return Batch( images=images_unaug, heatmaps=nlib.normalize_heatmaps( self.heatmaps_unaug, shapes), segmentation_maps=nlib.normalize_segmentation_maps( self.segmentation_maps_unaug, shapes), keypoints=nlib.normalize_keypoints( self.keypoints_unaug, shapes), bounding_boxes=nlib.normalize_bounding_boxes( self.bounding_boxes_unaug, shapes), polygons=nlib.normalize_polygons( self.polygons_unaug, shapes), line_strings=nlib.normalize_line_strings( self.line_strings_unaug, shapes), data=self.data )
[ "def", "to_normalized_batch", "(", "self", ")", ":", "assert", "all", "(", "[", "attr", "is", "None", "for", "attr_name", ",", "attr", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "attr_name", ".", "endswith", "(", "\"_aug\"", ")", "]",...
39.568182
19.113636
def _find_combo_data(widget, value): """ Returns the index in a combo box where itemData == value Raises a ValueError if data is not found """ # Here we check that the result is True, because some classes may overload # == and return other kinds of objects whether true or false. for idx in range(widget.count()): if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True: return idx else: raise ValueError("%s not found in combo box" % (value,))
[ "def", "_find_combo_data", "(", "widget", ",", "value", ")", ":", "# Here we check that the result is True, because some classes may overload", "# == and return other kinds of objects whether true or false.", "for", "idx", "in", "range", "(", "widget", ".", "count", "(", ")", ...
39.461538
19.307692
def SecondsToZuluTS(secs=None): """Returns Zulu TS from unix time seconds. If secs is not provided will convert the current time. """ if not secs: secs = int(time.time()) return(datetime.utcfromtimestamp(secs).strftime("%Y-%m-%dT%H:%M:%SZ"))
[ "def", "SecondsToZuluTS", "(", "secs", "=", "None", ")", ":", "if", "not", "secs", ":", "secs", "=", "int", "(", "time", ".", "time", "(", ")", ")", "return", "(", "datetime", ".", "utcfromtimestamp", "(", "secs", ")", ".", "strftime", "(", "\"%Y-%m-...
26.888889
19.666667
def seek(self, position, modifier=0): """move the cursor on the file descriptor to a different location :param position: an integer offset from the location indicated by the modifier :type position: int :param modifier: an indicator of how to find the seek location. - ``os.SEEK_SET`` means start from the beginning of the file - ``os.SEEK_CUR`` means start wherever the cursor already is - ``os.SEEK_END`` means start from the end of the file the default is ``os.SEEK_SET`` """ os.lseek(self._fileno, position, modifier) # clear out the buffer buf = self._rbuf buf.seek(0) buf.truncate()
[ "def", "seek", "(", "self", ",", "position", ",", "modifier", "=", "0", ")", ":", "os", ".", "lseek", "(", "self", ".", "_fileno", ",", "position", ",", "modifier", ")", "# clear out the buffer", "buf", "=", "self", ".", "_rbuf", "buf", ".", "seek", ...
34.47619
20.285714
def handle_inform(self, msg): """Dispatch an inform message to the appropriate method. Parameters ---------- msg : Message object The inform message to dispatch. """ method = self._inform_handlers.get( msg.name, self.__class__.unhandled_inform) try: return method(self, msg) except Exception: e_type, e_value, trace = sys.exc_info() reason = "\n".join(traceback.format_exception( e_type, e_value, trace, self._tb_limit)) self._logger.error("Inform %s FAIL: %s" % (msg.name, reason))
[ "def", "handle_inform", "(", "self", ",", "msg", ")", ":", "method", "=", "self", ".", "_inform_handlers", ".", "get", "(", "msg", ".", "name", ",", "self", ".", "__class__", ".", "unhandled_inform", ")", "try", ":", "return", "method", "(", "self", ",...
32.578947
17.473684
def subvolume_created(name, device, qgroupids=None, set_default=False, copy_on_write=True, force_set_default=True, __dest=None): ''' Makes sure that a btrfs subvolume is present. name Name of the subvolume to add device Device where to create the subvolume qgroupids Add the newly created subcolume to a qgroup. This parameter is a list set_default If True, this new subvolume will be set as default when mounted, unless subvol option in mount is used copy_on_write If false, set the subvolume with chattr +C force_set_default If false and the subvolume is already present, it will not force it as default if ``set_default`` is True ''' ret = { 'name': name, 'result': False, 'changes': {}, 'comment': [], } path = os.path.join(__dest, name) exists = __salt__['btrfs.subvolume_exists'](path) if exists: ret['comment'].append('Subvolume {} already present'.format(name)) # Resolve first the test case. The check is not complete, but at # least we will report if a subvolume needs to be created. Can # happend that the subvolume is there, but we also need to set it # as default, or persist in fstab. if __opts__['test']: ret['result'] = None if not exists: ret['comment'].append('Subvolume {} will be created'.format(name)) return ret if not exists: # Create the directories where the subvolume lives _path = os.path.dirname(path) res = __states__['file.directory'](_path, makedirs=True) if not res['result']: ret['comment'].append('Error creating {} directory'.format(_path)) return ret try: __salt__['btrfs.subvolume_create'](name, dest=__dest, qgroupids=qgroupids) except CommandExecutionError: ret['comment'].append('Error creating subvolume {}'.format(name)) return ret ret['changes'][name] = 'Created subvolume {}'.format(name) # If the volume was already present, we can opt-out the check for # default subvolume. if (not exists or (exists and force_set_default)) and \ set_default and not _is_default(path, __dest, name): ret['changes'][name + '_default'] = _set_default(path, __dest, name) if not copy_on_write and _is_cow(path): ret['changes'][name + '_no_cow'] = _unset_cow(path) ret['result'] = True return ret
[ "def", "subvolume_created", "(", "name", ",", "device", ",", "qgroupids", "=", "None", ",", "set_default", "=", "False", ",", "copy_on_write", "=", "True", ",", "force_set_default", "=", "True", ",", "__dest", "=", "None", ")", ":", "ret", "=", "{", "'na...
32.602564
24.602564
def avhrr(scans_nb, scan_points, scan_angle=55.37, frequency=1 / 6.0, apply_offset=True): """Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ # build the avhrr instrument (scan angles) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1]) if apply_offset: offset = np.arange(np.int(scans_nb)) * frequency times += np.expand_dims(offset, 1) return ScanGeometry(avhrr_inst, times)
[ "def", "avhrr", "(", "scans_nb", ",", "scan_points", ",", "scan_angle", "=", "55.37", ",", "frequency", "=", "1", "/", "6.0", ",", "apply_offset", "=", "True", ")", ":", "# build the avhrr instrument (scan angles)", "avhrr_inst", "=", "np", ".", "vstack", "(",...
38
18.68
def _heapqmergesorted(key=None, *iterables): """Return a single iterator over the given iterables, sorted by the given `key` function, assuming the input iterables are already sorted by the same function. (I.e., the merge part of a general merge sort.) Uses :func:`heapq.merge` for the underlying implementation.""" if key is None: keyed_iterables = iterables for element in heapq.merge(*keyed_iterables): yield element else: keyed_iterables = [(_Keyed(key(obj), obj) for obj in iterable) for iterable in iterables] for element in heapq.merge(*keyed_iterables): yield element.obj
[ "def", "_heapqmergesorted", "(", "key", "=", "None", ",", "*", "iterables", ")", ":", "if", "key", "is", "None", ":", "keyed_iterables", "=", "iterables", "for", "element", "in", "heapq", ".", "merge", "(", "*", "keyed_iterables", ")", ":", "yield", "ele...
44.866667
17.8
def lstm_cell(x, h, c, state_size, w_init=None, b_init=None, fix_parameters=False): """Long Short-Term Memory. Long Short-Term Memory, or LSTM, is a building block for recurrent neural networks (RNN) layers. LSTM unit consists of a cell and input, output, forget gates whose functions are defined as following: .. math:: f_t&&=\\sigma(W_fx_t+U_fh_{t-1}+b_f) \\\\ i_t&&=\\sigma(W_ix_t+U_ih_{t-1}+b_i) \\\\ o_t&&=\\sigma(W_ox_t+U_oh_{t-1}+b_o) \\\\ c_t&&=f_t\\odot c_{t-1}+i_t\\odot\\tanh(W_cx_t+U_ch_{t-1}+b_c) \\\\ h_t&&=o_t\\odot\\tanh(c_t). References: S. Hochreiter, and J. Schmidhuber. "Long Short-Term Memory." Neural Computation. 1997. Args: x (~nnabla.Variable): Input N-D array with shape (batch_size, input_size). h (~nnabla.Variable): Input N-D array with shape (batch_size, state_size). c (~nnabla.Variable): Input N-D array with shape (batch_size, state_size). state_size (int): Internal state size is set to `state_size`. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. Returns: :class:`~nnabla.Variable` """ xh = F.concatenate(*(x, h), axis=1) iofc = affine(xh, (4, state_size), w_init=w_init, b_init=b_init, fix_parameters=fix_parameters) i_t, o_t, f_t, gate = F.split(iofc, axis=1) c_t = F.sigmoid(f_t) * c + F.sigmoid(i_t) * F.tanh(gate) h_t = F.sigmoid(o_t) * F.tanh(c_t) return h_t, c_t
[ "def", "lstm_cell", "(", "x", ",", "h", ",", "c", ",", "state_size", ",", "w_init", "=", "None", ",", "b_init", "=", "None", ",", "fix_parameters", "=", "False", ")", ":", "xh", "=", "F", ".", "concatenate", "(", "*", "(", "x", ",", "h", ")", "...
50.179487
35.589744
def _getContextFactory(path, workbench): """Get a context factory. If the client already has a credentials at path, use them. Otherwise, generate them at path. Notifications are reported to the given workbench. """ try: return succeed(getContextFactory(path)) except IOError: d = prompt(workbench, u"E-mail entry", u"Enter e-mail:") d.addCallback(_makeCredentials, path, workbench) d.addCallback(lambda _result: getContextFactory(path)) return d
[ "def", "_getContextFactory", "(", "path", ",", "workbench", ")", ":", "try", ":", "return", "succeed", "(", "getContextFactory", "(", "path", ")", ")", "except", "IOError", ":", "d", "=", "prompt", "(", "workbench", ",", "u\"E-mail entry\"", ",", "u\"Enter e...
33.4
19.4
def _prop0(self, rho, T): """Ideal gas properties""" rhoc = self._constants.get("rhoref", self.rhoc) Tc = self._constants.get("Tref", self.Tc) delta = rho/rhoc tau = Tc/T ideal = self._phi0(tau, delta) fio = ideal["fio"] fiot = ideal["fiot"] fiott = ideal["fiott"] propiedades = _fase() propiedades.h = self.R*T*(1+tau*fiot) propiedades.s = self.R*(tau*fiot-fio) propiedades.cv = -self.R*tau**2*fiott propiedades.cp = self.R*(-tau**2*fiott+1) propiedades.alfap = 1/T propiedades.betap = rho return propiedades
[ "def", "_prop0", "(", "self", ",", "rho", ",", "T", ")", ":", "rhoc", "=", "self", ".", "_constants", ".", "get", "(", "\"rhoref\"", ",", "self", ".", "rhoc", ")", "Tc", "=", "self", ".", "_constants", ".", "get", "(", "\"Tref\"", ",", "self", "....
33.052632
11.684211
def write_file(self, file): """ Writes the editor file content into given file. :param file: File to write. :type file: unicode :return: Method success. :rtype: bool """ LOGGER.debug("> Writing '{0}' file.".format(file)) writer = foundations.io.File(file) writer.content = [self.toPlainText().toUtf8()] if writer.write(): self.set_file(file) self.file_saved.emit() return True
[ "def", "write_file", "(", "self", ",", "file", ")", ":", "LOGGER", ".", "debug", "(", "\"> Writing '{0}' file.\"", ".", "format", "(", "file", ")", ")", "writer", "=", "foundations", ".", "io", ".", "File", "(", "file", ")", "writer", ".", "content", "...
27
15.222222
def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed
[ "def", "parse_argv", "(", "tokens", ",", "options", ",", "options_first", "=", "False", ")", ":", "parsed", "=", "[", "]", "while", "tokens", ".", "current", "(", ")", "is", "not", "None", ":", "if", "tokens", ".", "current", "(", ")", "==", "'--'", ...
37.590909
19.272727
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] if isinstance(imt, PGA): imt_per = 0.0 else: imt_per = imt.period # Fix site parameters for consistent dS2S application. sites.vs30 = np.array([250]) sites.z1pt0 = np.array([330]) # intensity on a reference soil is used for both mean # and stddev calculations. ln_y_ref = self._get_ln_y_ref(rup, dists, C) # exp1 and exp2 are parts of eq. 7 exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360)) exp2 = np.exp(C['phi3'] * (1130 - 360)) # v1 is the period dependent site term. The Vs30 above which, the # amplification is constant v1 = self._get_v1(imt) # Get log-mean from regular unadjusted model b13a_mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2, v1) # Adjust mean and standard deviation mean = b13a_mean + self._get_dL2L(imt_per) + self._get_dS2S(imt_per) mean += convert_to_LHC(imt) stddevs = self._get_adjusted_stddevs(sites, rup, C, stddev_types, ln_y_ref, exp1, exp2, imt_per) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extracting dictionary of coefficients specific to required", "# intensity measure type.", "C", "=", "self", ".", "COEFFS", "[", "imt", "...
44.176471
15.647059
def metadata_wrapper(fn): """Save metadata of last api call.""" @functools.wraps(fn) def wrapped_f(self, *args, **kwargs): self.last_metadata = {} self.last_metadata["url"] = self.configuration.host + args[0] self.last_metadata["method"] = args[1] self.last_metadata["timestamp"] = time.time() try: return fn(self, *args, **kwargs) except Exception as e: self.last_metadata["exception"] = e raise return wrapped_f
[ "def", "metadata_wrapper", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrapped_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "last_metadata", "=", "{", "}", "self", ".", "last_me...
39.428571
12.928571
def update(self, claim, ttl=None, grace=None): """ Updates the specified claim with either a new TTL or grace period, or both. """ body = {} if ttl is not None: body["ttl"] = ttl if grace is not None: body["grace"] = grace if not body: raise exc.MissingClaimParameters("You must supply a value for " "'ttl' or 'grace' when calling 'update()'") uri = "/%s/%s" % (self.uri_base, utils.get_id(claim)) resp, resp_body = self.api.method_patch(uri, body=body)
[ "def", "update", "(", "self", ",", "claim", ",", "ttl", "=", "None", ",", "grace", "=", "None", ")", ":", "body", "=", "{", "}", "if", "ttl", "is", "not", "None", ":", "body", "[", "\"ttl\"", "]", "=", "ttl", "if", "grace", "is", "not", "None",...
38.333333
17.133333
def move(self, dst): "Closes then moves the file to dst." self.close() shutil.move(self.path, dst)
[ "def", "move", "(", "self", ",", "dst", ")", ":", "self", ".", "close", "(", ")", "shutil", ".", "move", "(", "self", ".", "path", ",", "dst", ")" ]
29.75
12.25
def create_service_module(service_name, apis): """ Dynamically creates a module named defined by the PEP-8 version of the string contained in service_name (from the YAML config). This module will contain a Client class, a Call Factory, and list of API definition objects. """ service_module = imp.new_module(service_name.lower()) for api in apis: setattr(service_module, api.__class__.__name__, api) ClientClass = create_rest_client_class(service_name, apis) setattr(service_module, 'resources', tuple(apis)) setattr(service_module, 'Client', ClientClass) sys.modules[service_name.lower()] = service_module return service_module
[ "def", "create_service_module", "(", "service_name", ",", "apis", ")", ":", "service_module", "=", "imp", ".", "new_module", "(", "service_name", ".", "lower", "(", ")", ")", "for", "api", "in", "apis", ":", "setattr", "(", "service_module", ",", "api", "....
34.684211
20.894737
def node_assign(self, node, val): """Assign a value (not the node.value object) to a node. This is used by on_assign, but also by for, list comprehension, etc. """ if node.__class__ == ast.Name: if not valid_symbol_name(node.id) or node.id in self.readonly_symbols: errmsg = "invalid symbol name (reserved word?) %s" % node.id self.raise_exception(node, exc=NameError, msg=errmsg) self.symtable[node.id] = val if node.id in self.no_deepcopy: self.no_deepcopy.remove(node.id) elif node.__class__ == ast.Attribute: if node.ctx.__class__ == ast.Load: msg = "cannot assign to attribute %s" % node.attr self.raise_exception(node, exc=AttributeError, msg=msg) setattr(self.run(node.value), node.attr, val) elif node.__class__ == ast.Subscript: sym = self.run(node.value) xslice = self.run(node.slice) if isinstance(node.slice, ast.Index): sym[xslice] = val elif isinstance(node.slice, ast.Slice): sym[slice(xslice.start, xslice.stop)] = val elif isinstance(node.slice, ast.ExtSlice): sym[xslice] = val elif node.__class__ in (ast.Tuple, ast.List): if len(val) == len(node.elts): for telem, tval in zip(node.elts, val): self.node_assign(telem, tval) else: raise ValueError('too many values to unpack')
[ "def", "node_assign", "(", "self", ",", "node", ",", "val", ")", ":", "if", "node", ".", "__class__", "==", "ast", ".", "Name", ":", "if", "not", "valid_symbol_name", "(", "node", ".", "id", ")", "or", "node", ".", "id", "in", "self", ".", "readonl...
41.918919
16.972973
def insert_weave_option_group(parser): """ Adds the options used to specify weave options. Parameters ---------- parser : object OptionParser instance """ optimization_group = parser.add_argument_group("Options for controlling " "weave") optimization_group.add_argument("--per-process-weave-cache", action="store_true", default=False, help="""If given, each process will use a separate directory for weave compilation. This is slower, but safer if several instances may be starting on the same machine at the same time.""") optimization_group.add_argument("--clear-weave-cache-at-start", action="store_true", default=False, help="If given, delete the contents of the weave cache " "when the process starts") optimization_group.add_argument("--clear-weave-cache-at-end", action="store_true", default=False, help="If given, delete the contents of the weave cache " "when the process exits") optimization_group.add_argument("--fixed-weave-cache", action="store_true", default=False, help="If given, use fixed directory PWD/pycbc_inspiral for " " the weave cache")
[ "def", "insert_weave_option_group", "(", "parser", ")", ":", "optimization_group", "=", "parser", ".", "add_argument_group", "(", "\"Options for controlling \"", "\"weave\"", ")", "optimization_group", ".", "add_argument", "(", "\"--per-process-weave-cache\"", ",", "action"...
40.72973
18.108108
def encode(self, *args): """encode(value1[, ...]) -> bytes Encodes the given values to a sequence of bytes according to this Array's underlying element type """ if len(args) != self.nelems: msg = 'ArrayType %s encode() requires %d values, but received %d.' raise ValueError(msg % (self.name, self.nelems, len(args))) return bytearray().join(self.type.encode(arg) for arg in args)
[ "def", "encode", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "self", ".", "nelems", ":", "msg", "=", "'ArrayType %s encode() requires %d values, but received %d.'", "raise", "ValueError", "(", "msg", "%", "(", "self", ".", ...
40.272727
21.181818
def get_hash_for(self, value): """Get hash for a given value. :param value: The value to be indexed :type value: object :return: Hashed value :rtype: str """ if isinstance(value,dict) and '__ref__' in value: return self.get_hash_for(value['__ref__']) serialized_value = self._serializer(value) if isinstance(serialized_value, dict): # Hash each item and return the hash of all the hashes return hash(frozenset([ self.get_hash_for(x) for x in serialized_value.items() ])) elif isinstance(serialized_value, (list,tuple)): # Hash each element and return the hash of all the hashes return hash(tuple([ self.get_hash_for(x) for x in serialized_value ])) return value
[ "def", "get_hash_for", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", "and", "'__ref__'", "in", "value", ":", "return", "self", ".", "get_hash_for", "(", "value", "[", "'__ref__'", "]", ")", "serialized_value", "...
35.833333
15.708333
def setitem(self, key, value): """Maps dictionary keys to values for assignment. Called for dictionary style access with assignment. """ with self.lock: self.tbl[key] = value
[ "def", "setitem", "(", "self", ",", "key", ",", "value", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "tbl", "[", "key", "]", "=", "value" ]
35.666667
7
def add_load(self, lv_load): """Adds a LV load to _loads and grid graph if not already existing Parameters ---------- lv_load : Description #TODO """ if lv_load not in self._loads and isinstance(lv_load, LVLoadDing0): self._loads.append(lv_load) self.graph_add_node(lv_load)
[ "def", "add_load", "(", "self", ",", "lv_load", ")", ":", "if", "lv_load", "not", "in", "self", ".", "_loads", "and", "isinstance", "(", "lv_load", ",", "LVLoadDing0", ")", ":", "self", ".", "_loads", ".", "append", "(", "lv_load", ")", "self", ".", ...
34.166667
14.083333
def start_heartbeat(self): """ Reset hearbeat timer """ self.stop_heartbeat() self._heartbeat_timer = task.LoopingCall(self._heartbeat) self._heartbeat_timer.start(self._heartbeat_interval, False)
[ "def", "start_heartbeat", "(", "self", ")", ":", "self", ".", "stop_heartbeat", "(", ")", "self", ".", "_heartbeat_timer", "=", "task", ".", "LoopingCall", "(", "self", ".", "_heartbeat", ")", "self", ".", "_heartbeat_timer", ".", "start", "(", "self", "."...
37.333333
19.666667
def ra_dec_to_cartesian( self, ra, dec): """*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates* **Key Arguments:** - ``ra`` -- right ascension in sexegesimal or decimal degress. - ``dec`` -- declination in sexegesimal or decimal degress. **Return:** - ``cartesians`` -- tuple of (x, y, z) coordinates .. todo:: - replace calculate_cartesians in all code **Usage:** .. code-block:: python from astrocalc.coords import unit_conversion converter = unit_conversion( log=log ) x, y, z = converter.ra_dec_to_cartesian( ra="23 45 21.23232", dec="+01:58:5.45341" ) print x, y, z # OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606 """ self.log.info('starting the ``ra_dec_to_cartesian`` method') ra = self.ra_sexegesimal_to_decimal( ra=ra ) dec = self.dec_sexegesimal_to_decimal( dec=dec ) ra = math.radians(ra) dec = math.radians(dec) cos_dec = math.cos(dec) cx = math.cos(ra) * cos_dec cy = math.sin(ra) * cos_dec cz = math.sin(dec) cartesians = (cx, cy, cz) self.log.info('completed the ``ra_dec_to_cartesian`` method') return cartesians
[ "def", "ra_dec_to_cartesian", "(", "self", ",", "ra", ",", "dec", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``ra_dec_to_cartesian`` method'", ")", "ra", "=", "self", ".", "ra_sexegesimal_to_decimal", "(", "ra", "=", "ra", ")", "dec", "="...
28
21.849057
def delete_company( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes specified company. Prerequisite: The company has no jobs associated with it. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.CompanyServiceClient() >>> >>> name = client.company_path('[PROJECT]', '[COMPANY]') >>> >>> client.delete_company(name) Args: name (str): Required. The resource name of the company to be deleted. The format is "projects/{project\_id}/companies/{company\_id}", for example, "projects/api-test-project/companies/foo". retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_company" not in self._inner_api_calls: self._inner_api_calls[ "delete_company" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_company, default_retry=self._method_configs["DeleteCompany"].retry, default_timeout=self._method_configs["DeleteCompany"].timeout, client_info=self._client_info, ) request = company_service_pb2.DeleteCompanyRequest(name=name) self._inner_api_calls["delete_company"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "delete_company", "(", "self", ",", "name", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadat...
41.586207
23.827586
def run(self): """ This method is the actual implementation of the job. By default, it calls the target function specified in the #Job constructor. """ if self.__target is not None: return self.__target(self, *self.__args, **self.__kwargs) raise NotImplementedError
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "__target", "is", "not", "None", ":", "return", "self", ".", "__target", "(", "self", ",", "*", "self", ".", "__args", ",", "*", "*", "self", ".", "__kwargs", ")", "raise", "NotImplementedError"...
32
18
def start(self): """Start the app""" if self.args.debug: self.app.run(port=self.args.port, debug=self.args.debug, host=self.args.interface) else: root = "http://%s:%s" % (self.args.interface, self.args.port) print("tornado web server running on " + root) self.shutdown_requested = False http_server = HTTPServer(WSGIContainer(self.app)) http_server.listen(port=self.args.port, address=self.args.interface) signal.signal(signal.SIGINT, self.signal_handler) tornado.ioloop.PeriodicCallback(self.check_shutdown_flag, 500).start() tornado.ioloop.IOLoop.instance().start()
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "args", ".", "debug", ":", "self", ".", "app", ".", "run", "(", "port", "=", "self", ".", "args", ".", "port", ",", "debug", "=", "self", ".", "args", ".", "debug", ",", "host", "=", "...
49.071429
24.857143
def find_contamination(pair, output_folder, databases_folder, forward_id='_R1', threads=1, keep_files=False, quality_cutoff=20, base_cutoff=2, base_fraction_cutoff=0.05, cgmlst_db=None, Xmx=None, tmpdir=None, data_type='Illumina', use_rmlst=False): """ This needs some documentation fairly badly, so here we go. :param pair: This has become a misnomer. If the input reads are actually paired, needs to be a list with the full filepath to forward reads at index 0 and full path to reverse reads at index 1. If reads are unpaired, should be a list of length 1 with the only entry being the full filepath to read set. :param output_folder: Folder where outputs (confindr log and report, and other stuff) will be stored. This will be created if it does not exist. (I think - should write a test that double checks this). :param databases_folder: Full path to folder where ConFindr's databases live. These files can be downloaded from figshare in .tar.gz format (https://ndownloader.figshare.com/files/11864267), and will be automatically downloaded if the script is run from the command line. :param forward_id: Identifier that marks reads as being in the forward direction for paired reads. Defaults to _R1 :param threads: Number of threads to run analyses with. All parts of this pipeline scale pretty well, so more is better. :param keep_files: Boolean that says whether or not to keep temporary files. :param quality_cutoff: Integer of the phred score required to have a base count towards a multiallelic site. :param base_cutoff: Integer of number of bases needed to have a base be part of a multiallelic site. :param base_fraction_cutoff: Float of fraction of bases needed to have a base be part of a multiallelic site. If specified will be used in parallel with base_cutoff :param cgmlst_db: if None, we're using rMLST, if True, using some sort of custom cgMLST database. This requires some custom parameters. :param Xmx: if None, BBTools will use auto memory detection. If string, BBTools will use what's specified as their memory request. :param tmpdir: if None, any genus-specifc databases that need to be created will be written to ConFindr DB location. Otherwise, genus-specific databases will be written here. """ if os.path.isfile(os.path.join(databases_folder, 'download_date.txt')): with open(os.path.join(databases_folder, 'download_date.txt')) as f: database_download_date = f.readline().rstrip() else: database_download_date = 'NA' log = os.path.join(output_folder, 'confindr_log.txt') if len(pair) == 2: sample_name = os.path.split(pair[0])[-1].split(forward_id)[0] paired = True logging.debug('Sample is paired. Sample name is {}'.format(sample_name)) else: sample_name = os.path.split(pair[0])[-1].split('.')[0] paired = False logging.debug('Sample is unpaired. Sample name is {}'.format(sample_name)) sample_tmp_dir = os.path.join(output_folder, sample_name) if not os.path.isdir(sample_tmp_dir): os.makedirs(sample_tmp_dir) logging.info('Checking for cross-species contamination...') if paired: genus = find_cross_contamination(databases_folder, pair, tmpdir=sample_tmp_dir, log=log, threads=threads) else: genus = find_cross_contamination_unpaired(databases_folder, reads=pair[0], tmpdir=sample_tmp_dir, log=log, threads=threads) if len(genus.split(':')) > 1: write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=0, genus=genus, percent_contam='NA', contam_stddev='NA', total_gene_length=0, database_download_date=database_download_date) logging.info('Found cross-contamination! Skipping rest of analysis...\n') if keep_files is False: shutil.rmtree(sample_tmp_dir) return # Setup genus-specific databases, if necessary. if cgmlst_db is not None: # Sanity check that the DB specified is actually a file, otherwise, quit with appropriate error message. if not os.path.isfile(cgmlst_db): logging.error('ERROR: Specified cgMLST file ({}) does not exist. Please check the path and try again.'.format(cgmlst_db)) quit(code=1) sample_database = cgmlst_db else: db_folder = databases_folder if tmpdir is None else tmpdir if not os.path.isdir(db_folder): os.makedirs(db_folder) if genus != 'NA': # Logic here is as follows: users can either have both rMLST databases, which cover all of bacteria, # cgmlst-derived databases, which cover only Escherichia, Salmonella, and Listeria (may add more at some # point), or they can have both. They can also set priority to either always use rMLST, or to use my # core-genome derived stuff and fall back on rMLST if they're trying to look at a genus I haven't created # a scheme for. # In the event rmlst databases have priority, always use them. if use_rmlst is True: sample_database = os.path.join(db_folder, '{}_db.fasta'.format(genus)) if not os.path.isfile(sample_database): if os.path.isfile(os.path.join(db_folder, 'gene_allele.txt')) and os.path.isfile(os.path.join(db_folder, 'rMLST_combined.fasta')): logging.info('Setting up genus-specific database for genus {}...'.format(genus)) allele_list = find_genusspecific_allele_list(os.path.join(db_folder, 'gene_allele.txt'), genus) setup_allelespecific_database(fasta_file=sample_database, database_folder=db_folder, allele_list=allele_list) else: # Check if a cgderived database is available. If not, try to use rMLST database. sample_database = os.path.join(db_folder, '{}_db_cgderived.fasta'.format(genus)) if not os.path.isfile(sample_database): sample_database = os.path.join(db_folder, '{}_db.fasta'.format(genus)) # Create genus specific database if it doesn't already exist and we have the necessary rMLST files. if os.path.isfile(os.path.join(db_folder, 'rMLST_combined.fasta')) and os.path.isfile(os.path.join(db_folder, 'gene_allele.txt')) and not os.path.isfile(sample_database): logging.info('Setting up genus-specific database for genus {}...'.format(genus)) allele_list = find_genusspecific_allele_list(os.path.join(db_folder, 'gene_allele.txt'), genus) setup_allelespecific_database(fasta_file=sample_database, database_folder=db_folder, allele_list=allele_list) else: sample_database = os.path.join(db_folder, 'rMLST_combined.fasta') # If a user has gotten to this point and they don't have any database available to do analysis because # they don't have rMLST downloaded and we don't have a cg-derived database available, boot them with a helpful # message. if not os.path.isfile(sample_database): write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=0, genus=genus, percent_contam='NA', contam_stddev='NA', total_gene_length=0, database_download_date=database_download_date) logging.info('Did not find databases for genus {genus}. You can download the rMLST database to get access to all ' 'genera (see https://olc-bioinformatics.github.io/ConFindr/install/). Alternatively, if you have a ' 'high-quality core-genome derived database for your genome of interest, we would be happy to ' 'add it - open an issue at https://github.com/OLC-Bioinformatics/ConFindr/issues with the ' 'title "Add genus-specific database: {genus}"\n'.format(genus=genus)) if keep_files is False: shutil.rmtree(sample_tmp_dir) return # Extract rMLST reads and quality trim. logging.info('Extracting conserved core genes...') if paired: if Xmx is None: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], reverse_in=pair[1], forward_out=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), threads=threads, returncmd=True) else: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], reverse_in=pair[1], forward_out=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), threads=threads, Xmx=Xmx, returncmd=True) else: if data_type == 'Nanopore': forward_out = os.path.join(sample_tmp_dir, 'trimmed.fastq.gz') else: forward_out = os.path.join(sample_tmp_dir, 'rmlst.fastq.gz') if Xmx is None: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], forward_out=forward_out, returncmd=True, threads=threads) else: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], forward_out=forward_out, Xmx=Xmx, returncmd=True, threads=threads) write_to_logfile(log, out, err, cmd) logging.info('Quality trimming...') if data_type == 'Illumina': if paired: if Xmx is None: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), threads=str(threads), returncmd=True) else: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), Xmx=Xmx, threads=str(threads), returncmd=True) else: if Xmx is None: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), returncmd=True, threads=threads) else: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), returncmd=True, threads=threads, Xmx=Xmx) write_to_logfile(log, out, err, cmd) logging.info('Detecting contamination...') # Now do mapping in two steps - first, map reads back to database with ambiguous reads matching all - this # will be used to get a count of number of reads aligned to each gene/allele so we can create a custom rmlst file # with only the most likely allele for each gene. if not os.path.isfile(sample_database + '.fai'): # Don't bother re-indexing, this only needs to happen once. pysam.faidx(sample_database) kma_database = sample_database.replace('.fasta', '') + '_kma' kma_report = os.path.join(sample_tmp_dir, 'kma_rmlst') if not os.path.isfile(kma_database + '.name'): # The .name is one of the files KMA creates when making a database. cmd = 'kma index -i {} -o {}'.format(sample_database, kma_database) # NOTE: Need KMA >=1.2.0 for this to work. out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) # Run KMA. if paired: cmd = 'kma -ipe {forward_in} {reverse_in} -t_db {kma_database} -o {kma_report} ' \ '-t {threads}'.format(forward_in=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: if data_type == 'Illumina': cmd = 'kma -i {input_reads} -t_db {kma_database} -o {kma_report} ' \ '-t {threads}'.format(input_reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) else: # Recommended Nanopore settings from KMA repo: https://bitbucket.org/genomicepidemiology/kma cmd = 'kma -i {input_reads} -t_db {kma_database} -o {kma_report} -mem_mode -mp 20 -mrs 0.0 -bcNano ' \ '-t {threads}'.format(input_reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) rmlst_report = os.path.join(output_folder, sample_name + '_rmlst.csv') gene_alleles = find_rmlst_type(kma_report=kma_report + '.res', rmlst_report=rmlst_report) with open(os.path.join(sample_tmp_dir, 'rmlst.fasta'), 'w') as f: for contig in SeqIO.parse(sample_database, 'fasta'): if contig.id in gene_alleles: f.write('>{}\n'.format(contig.id)) f.write(str(contig.seq) + '\n') rmlst_gene_length = find_total_sequence_length(os.path.join(sample_tmp_dir, 'rmlst.fasta')) logging.debug('Total gene length is {}'.format(rmlst_gene_length)) # Second step of mapping - Do a mapping of our baited reads against a fasta file that has only one allele per # rMLST gene. pysam.faidx(os.path.join(sample_tmp_dir, 'rmlst.fasta')) if paired: cmd = 'bbmap.sh ref={ref} in={forward_in} in2={reverse_in} out={outbam} threads={threads} mdtag ' \ 'nodisk'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), forward_in=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), outbam=os.path.join(sample_tmp_dir, 'out_2.bam'), threads=threads) if cgmlst_db is not None: # Lots of core genes seem to have relatives within a genome that are at ~70 percent identity - this means # that reads that shouldn't really map do, and cause false positives. Adding in this subfilter means that # reads can only have one mismatch, so they actually have to be from the right gene for this to work. cmd += ' subfilter=1' if Xmx: cmd += ' -Xmx{}'.format(Xmx) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: if data_type == 'Illumina': cmd = 'bbmap.sh ref={ref} in={forward_in} out={outbam} threads={threads} mdtag ' \ 'nodisk'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), forward_in=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), outbam=os.path.join(sample_tmp_dir, 'out_2.bam'), threads=threads) if cgmlst_db is not None: # Lots of core genes seem to have relatives within a genome that are at ~70 percent identity - this means # that reads that shouldn't really map do, and cause false positives. Adding in this subfilter means that # reads can only have one mismatch, so they actually have to be from the right gene for this to work. cmd += ' subfilter=1' if Xmx: cmd += ' -Xmx{}'.format(Xmx) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: cmd = 'minimap2 --MD -t {threads} -ax map-ont {ref} {reads} ' \ '> {outsam}'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), outsam=os.path.join(sample_tmp_dir, 'out_2.sam'), threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) outbam = os.path.join(sample_tmp_dir, 'out_2.bam') # Apparently have to perform equivalent of a touch on this file for this to work. fh = open(outbam, 'w') fh.close() pysam.view('-b', '-o', outbam, os.path.join(sample_tmp_dir, 'out_2.sam'), save_stdout=outbam) pysam.sort('-o', os.path.join(sample_tmp_dir, 'contamination.bam'), os.path.join(sample_tmp_dir, 'out_2.bam')) pysam.index(os.path.join(sample_tmp_dir, 'contamination.bam')) # Now find number of multi-positions for each rMLST gene/allele combination multi_positions = 0 # Run the BAM parsing in parallel! Some refactoring of the code would likely be a good idea so this # isn't quite so ugly, but it works. p = multiprocessing.Pool(processes=threads) bamfile_list = [os.path.join(sample_tmp_dir, 'contamination.bam')] * len(gene_alleles) # bamfile_list = [os.path.join(sample_tmp_dir, 'rmlst.bam')] * len(gene_alleles) reference_fasta_list = [os.path.join(sample_tmp_dir, 'rmlst.fasta')] * len(gene_alleles) quality_cutoff_list = [quality_cutoff] * len(gene_alleles) base_cutoff_list = [base_cutoff] * len(gene_alleles) base_fraction_list = [base_fraction_cutoff] * len(gene_alleles) multibase_dict_list = list() report_write_list = list() for multibase_dict, report_write in p.starmap(read_contig, zip(gene_alleles, bamfile_list, reference_fasta_list, quality_cutoff_list, base_cutoff_list, base_fraction_list), chunksize=1): multibase_dict_list.append(multibase_dict) report_write_list.append(report_write) p.close() p.join() # Write out report info. report_file = os.path.join(output_folder, sample_name + '_contamination.csv') with open(report_file, 'w') as r: r.write('{reference},{position},{bases},{coverage}\n'.format(reference='Gene', position='Position', bases='Bases', coverage='Coverage')) for item in report_write_list: for contamination_info in item: r.write(contamination_info) # Total up the number of multibase positions. for multibase_position_dict in multibase_dict_list: multi_positions += sum([len(snp_positions) for snp_positions in multibase_position_dict.values()]) if cgmlst_db is None: snp_cutoff = int(rmlst_gene_length/10000) + 1 else: snp_cutoff = 10 if multi_positions >= snp_cutoff: percent_contam, contam_stddev = estimate_percent_contamination(contamination_report_file=report_file) else: percent_contam = 0 contam_stddev = 0 logging.info('Done! Number of contaminating SNVs found: {}\n'.format(multi_positions)) write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=multi_positions, genus=genus, percent_contam=percent_contam, contam_stddev=contam_stddev, total_gene_length=rmlst_gene_length, snp_cutoff=snp_cutoff, cgmlst=cgmlst_db, database_download_date=database_download_date) if keep_files is False: shutil.rmtree(sample_tmp_dir)
[ "def", "find_contamination", "(", "pair", ",", "output_folder", ",", "databases_folder", ",", "forward_id", "=", "'_R1'", ",", "threads", "=", "1", ",", "keep_files", "=", "False", ",", "quality_cutoff", "=", "20", ",", "base_cutoff", "=", "2", ",", "base_fr...
62.368571
34.414286
def _matrix_add_column(matrix, column, default=0): """Given a matrix as a list of lists, add a column to the right, filling in with a default value if necessary. """ height_difference = len(column) - len(matrix) # The width of the matrix is the length of its longest row. width = max(len(row) for row in matrix) if matrix else 0 # For now our offset is 0. We may need to shift our column down later. offset = 0 # If we need extra rows, add them to the top of the matrix. if height_difference > 0: for _ in range(height_difference): matrix.insert(0, [default] * width) # If the column is shorter, we'll need to shift it down. if height_difference < 0: offset = -height_difference #column = ([default] * offset) + column for index, value in enumerate(column): # The row index is the index in the column plus our offset. row_index = index + offset row = matrix[row_index] # If this row is short, pad it with default values. width_difference = width - len(row) row.extend([default] * width_difference) row.append(value)
[ "def", "_matrix_add_column", "(", "matrix", ",", "column", ",", "default", "=", "0", ")", ":", "height_difference", "=", "len", "(", "column", ")", "-", "len", "(", "matrix", ")", "# The width of the matrix is the length of its longest row.", "width", "=", "max", ...
35.46875
18.09375
def check(self, url_data): """Parse PDF data.""" # XXX user authentication from url_data password = '' data = url_data.get_content() # PDFParser needs a seekable file object fp = StringIO(data) try: parser = PDFParser(fp) doc = PDFDocument(parser, password=password) for (pageno, page) in enumerate(PDFPage.create_pages(doc), start=1): if "Contents" in page.attrs: search_url(page.attrs["Contents"], url_data, pageno, set()) if "Annots" in page.attrs: search_url(page.attrs["Annots"], url_data, pageno, set()) except PSException as msg: if not msg.args: # at least show the class name msg = repr(msg) log.warn(LOG_PLUGIN, "Error parsing PDF file: %s", msg)
[ "def", "check", "(", "self", ",", "url_data", ")", ":", "# XXX user authentication from url_data", "password", "=", "''", "data", "=", "url_data", ".", "get_content", "(", ")", "# PDFParser needs a seekable file object", "fp", "=", "StringIO", "(", "data", ")", "t...
43.25
14.8
def echo_html_fenye_str(rec_num, fenye_num): ''' 生成分页的导航 ''' pagination_num = int(math.ceil(rec_num * 1.0 / 10)) if pagination_num == 1 or pagination_num == 0: fenye_str = '' elif pagination_num > 1: pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', '' fenye_str = '<ul class="pagination">' if fenye_num > 1: pager_home = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>First Page</a></li>'''.format('', 1) pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>Previous Page</a></li>'''.format('', fenye_num - 1) if fenye_num > 5: cur_num = fenye_num - 4 else: cur_num = 1 if pagination_num > 10 and cur_num < pagination_num - 10: show_num = cur_num + 10 else: show_num = pagination_num + 1 for num in range(cur_num, show_num): if num == fenye_num: checkstr = 'active' else: checkstr = '' tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>{1}</a></li>'''.format(checkstr, num) pager_mid += tmp_str_df if fenye_num < pagination_num: pager_next = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>Next Page</a></li>'''.format('', fenye_num + 1) pager_last = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>End Page</a></li>'''.format('', pagination_num) fenye_str += pager_home + pager_pre + pager_mid + pager_next + pager_last fenye_str += '</ul>' else: return '' return fenye_str
[ "def", "echo_html_fenye_str", "(", "rec_num", ",", "fenye_num", ")", ":", "pagination_num", "=", "int", "(", "math", ".", "ceil", "(", "rec_num", "*", "1.0", "/", "10", ")", ")", "if", "pagination_num", "==", "1", "or", "pagination_num", "==", "0", ":", ...
32.907407
26.092593
def _keep_cursor_in_buffer(self): """ Ensures that the cursor is inside the editing region. Returns whether the cursor was moved. """ moved = not self._in_buffer() if moved: cursor = self._control.textCursor() cursor.movePosition(QtGui.QTextCursor.End) self._control.setTextCursor(cursor) return moved
[ "def", "_keep_cursor_in_buffer", "(", "self", ")", ":", "moved", "=", "not", "self", ".", "_in_buffer", "(", ")", "if", "moved", ":", "cursor", "=", "self", ".", "_control", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QtGui", ".", ...
38
8.2
def mark_seen(self): """ Mark the selected message or comment as seen. """ data = self.get_selected_item() if data['is_new']: with self.term.loader('Marking as read'): data['object'].mark_as_read() if not self.term.loader.exception: data['is_new'] = False else: with self.term.loader('Marking as unread'): data['object'].mark_as_unread() if not self.term.loader.exception: data['is_new'] = True
[ "def", "mark_seen", "(", "self", ")", ":", "data", "=", "self", ".", "get_selected_item", "(", ")", "if", "data", "[", "'is_new'", "]", ":", "with", "self", ".", "term", ".", "loader", "(", "'Marking as read'", ")", ":", "data", "[", "'object'", "]", ...
36
8.8
def __get_settings(self): """ Returns the current search and replace settings. :return: Settings. :rtype: dict """ return {"case_sensitive": self.Case_Sensitive_checkBox.isChecked(), "whole_word": self.Whole_Word_checkBox.isChecked(), "regular_expressions": self.Regular_Expressions_checkBox.isChecked()}
[ "def", "__get_settings", "(", "self", ")", ":", "return", "{", "\"case_sensitive\"", ":", "self", ".", "Case_Sensitive_checkBox", ".", "isChecked", "(", ")", ",", "\"whole_word\"", ":", "self", ".", "Whole_Word_checkBox", ".", "isChecked", "(", ")", ",", "\"re...
34.181818
22.909091
def knx_to_datetime(knxdata): """Convert a an 8 byte KNX time and date object to its components""" if len(knxdata) != 8: raise KNXException("Can only convert an 8 Byte object to datetime") year = knxdata[0] + 1900 month = knxdata[1] day = knxdata[2] hour = knxdata[3] & 0x1f minute = knxdata[4] second = knxdata[5] return datetime(year, month, day, hour, minute, second)
[ "def", "knx_to_datetime", "(", "knxdata", ")", ":", "if", "len", "(", "knxdata", ")", "!=", "8", ":", "raise", "KNXException", "(", "\"Can only convert an 8 Byte object to datetime\"", ")", "year", "=", "knxdata", "[", "0", "]", "+", "1900", "month", "=", "k...
28.857143
21.142857
def from_xso(self, xso): """ Construct and return an instance from the given `xso`. .. note:: This is a static method (classmethod), even though sphinx does not document it as such. :param xso: A :xep:`4` data form :type xso: :class:`~.Data` :raises ValueError: if the ``FORM_TYPE`` mismatches :raises ValueError: if field types mismatch :return: newly created instance of this class The fields from the given `xso` are matched against the fields on the form. Any matching field loads its data from the `xso` field. Fields which occur on the form template but not in the `xso` are skipped. Fields which occur in the `xso` but not on the form template are also skipped (but are re-emitted when the form is rendered as reply, see :meth:`~.Form.render_reply`). If the form template has a ``FORM_TYPE`` attribute and the incoming `xso` also has a ``FORM_TYPE`` field, a mismatch between the two values leads to a :class:`ValueError`. The field types of matching fields are checked. If the field type on the incoming XSO may not be upcast to the field type declared on the form (see :meth:`~.FieldType.allow_upcast`), a :class:`ValueError` is raised. If the :attr:`~.Data.type_` does not indicate an actual form (but rather a cancellation request or tabular result), :class:`ValueError` is raised. """ my_form_type = getattr(self, "FORM_TYPE", None) f = self() for field in xso.fields: if field.var == "FORM_TYPE": if (my_form_type is not None and field.type_ == forms_xso.FieldType.HIDDEN and field.values): if my_form_type != field.values[0]: raise ValueError( "mismatching FORM_TYPE ({!r} != {!r})".format( field.values[0], my_form_type, ) ) continue if field.var is None: continue key = fields.descriptor_ns, field.var try: descriptor = self.DESCRIPTOR_MAP[key] except KeyError: continue if (field.type_ is not None and not field.type_.allow_upcast(descriptor.FIELD_TYPE)): raise ValueError( "mismatching type ({!r} != {!r}) on field var={!r}".format( field.type_, descriptor.FIELD_TYPE, field.var, ) ) data = descriptor.__get__(f, self) data.load(field) f._recv_xso = xso return f
[ "def", "from_xso", "(", "self", ",", "xso", ")", ":", "my_form_type", "=", "getattr", "(", "self", ",", "\"FORM_TYPE\"", ",", "None", ")", "f", "=", "self", "(", ")", "for", "field", "in", "xso", ".", "fields", ":", "if", "field", ".", "var", "==",...
37.090909
21.844156
def post_chat(self, msg, is_me=False, is_a=False): """Posts a msg to this room's chat. Set me=True if you want to /me""" if len(msg) > self.config.max_message: raise ValueError( f"Chat message must be at most {self.config.max_message} characters." ) while not self.user.nick: with ARBITRATOR.condition: ARBITRATOR.condition.wait() if is_a: if not self.admin and not self.staff: raise RuntimeError("Can't modchat if you're not a mod or trusted") self.conn.make_call("command", self.user.nick, "a", msg) return if is_me: self.conn.make_call("command", self.user.nick, "me", msg) return self.conn.make_call("chat", self.user.nick, msg)
[ "def", "post_chat", "(", "self", ",", "msg", ",", "is_me", "=", "False", ",", "is_a", "=", "False", ")", ":", "if", "len", "(", "msg", ")", ">", "self", ".", "config", ".", "max_message", ":", "raise", "ValueError", "(", "f\"Chat message must be at most ...
40.35
20.3