text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
[ "def", "count_nonzero", "(", "data", ",", "mapper", "=", "None", ",", "blen", "=", "None", ",", "storage", "=", "None", ",", "create", "=", "'array'", ",", "*", "*", "kwargs", ")", ":", "return", "reduce_axis", "(", "data", ",", "reducer", "=", "np",...
56.833333
15.5
def _remove_strings(code) : """ Remove strings in code """ removed_string = "" is_string_now = None for i in range(0, len(code)-1) : append_this_turn = False if code[i] == "'" and (i == 0 or code[i-1] != '\\') : if is_string_now == "'" : is_string_now = None elif is_string_now == None : is_string_now = "'" append_this_turn = True elif code[i] == '"' and (i == 0 or code[i-1] != '\\') : if is_string_now == '"' : is_string_now = None elif is_string_now == None : is_string_now = '"' append_this_turn = True if is_string_now == None or append_this_turn == True : removed_string += code[i] return removed_string
[ "def", "_remove_strings", "(", "code", ")", ":", "removed_string", "=", "\"\"", "is_string_now", "=", "None", "for", "i", "in", "range", "(", "0", ",", "len", "(", "code", ")", "-", "1", ")", ":", "append_this_turn", "=", "False", "if", "code", "[", ...
22.166667
19.666667
def best_fit_likelihood(self): """ returns the log likelihood of the best fit model of the current state of this class :return: log likelihood, float """ kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.best_fit(bijective=False) param_class = self._param_class likelihoodModule = self.likelihoodModule logL, _ = likelihoodModule.logL(param_class.kwargs2args(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo)) return logL
[ "def", "best_fit_likelihood", "(", "self", ")", ":", "kwargs_lens", ",", "kwargs_source", ",", "kwargs_lens_light", ",", "kwargs_ps", ",", "kwargs_cosmo", "=", "self", ".", "best_fit", "(", "bijective", "=", "False", ")", "param_class", "=", "self", ".", "_par...
49.5
26.666667
def send_message(self, message, mention_id=None, mentions=[]): """ Send the specified message to twitter, with appropriate mentions, tokenized as necessary :param message: Message to be sent :param mention_id: In-reply-to mention_id (to link messages to a previous message) :param mentions: List of usernames to mention in reply :return: """ messages = self.tokenize(message, self.MESSAGE_LENGTH, mentions) code = 0 for message in messages: if self.dry_run: mention_message = '' if mention_id: mention_message = " to mention_id '{0}'".format(mention_id) logging.info("Not posting to Twitter because DRY_RUN is set. Would have posted " "the following message{0}:\n{1}".format(mention_message, message)) else: try: self.twitter.statuses.update(status=message, in_reply_to_status_id=mention_id) except TwitterHTTPError as e: logging.error('Unable to post to twitter: {0}'.format(e)) code = e.response_data['errors'][0]['code'] return code
[ "def", "send_message", "(", "self", ",", "message", ",", "mention_id", "=", "None", ",", "mentions", "=", "[", "]", ")", ":", "messages", "=", "self", ".", "tokenize", "(", "message", ",", "self", ".", "MESSAGE_LENGTH", ",", "mentions", ")", "code", "=...
50.44
24.44
def dropAllCollections(self): """drops all public collections (graphs included) from the database""" for graph_name in self.graphs: self.graphs[graph_name].delete() for collection_name in self.collections: # Collections whose name starts with '_' are system collections if not collection_name.startswith('_'): self[collection_name].delete() return
[ "def", "dropAllCollections", "(", "self", ")", ":", "for", "graph_name", "in", "self", ".", "graphs", ":", "self", ".", "graphs", "[", "graph_name", "]", ".", "delete", "(", ")", "for", "collection_name", "in", "self", ".", "collections", ":", "# Collectio...
47
11.444444
def _register_engine(self, uid): """New engine with ident `uid` became available.""" # head of the line: self.targets.insert(0,uid) self.loads.insert(0,0) # initialize sets self.completed[uid] = set() self.failed[uid] = set() self.pending[uid] = {} # rescan the graph: self.update_graph(None)
[ "def", "_register_engine", "(", "self", ",", "uid", ")", ":", "# head of the line:", "self", ".", "targets", ".", "insert", "(", "0", ",", "uid", ")", "self", ".", "loads", ".", "insert", "(", "0", ",", "0", ")", "# initialize sets", "self", ".", "comp...
27.846154
13.615385
def get_parent_obj(obj): """ Gets the name of the object containing @obj and returns as a string @obj: any python object -> #str parent object name or None .. from redis_structures.debug import get_parent_obj get_parent_obj(get_parent_obj) # -> <module 'redis_structures.debug' from> .. """ try: cls = get_class_that_defined_method(obj) if cls and cls != obj: return cls except AttributeError: pass if hasattr(obj, '__module__') and obj.__module__: try: module = importlib.import_module(obj.__module__) objname = get_obj_name(obj).split(".") owner = getattr(module, objname[-2]) return getattr(owner, objname[-1]) except Exception: try: return module except Exception: pass try: assert hasattr(obj, '__qualname__') or hasattr(obj, '__name__') objname = obj.__qualname__ if hasattr(obj, '__qualname__') \ else obj.__name__ objname = objname.split(".") assert len(objname) > 1 return locate(".".join(objname[:-1])) except Exception: try: module = importlib.import_module(".".join(objname[:-1])) return module except Exception: pass return None
[ "def", "get_parent_obj", "(", "obj", ")", ":", "try", ":", "cls", "=", "get_class_that_defined_method", "(", "obj", ")", "if", "cls", "and", "cls", "!=", "obj", ":", "return", "cls", "except", "AttributeError", ":", "pass", "if", "hasattr", "(", "obj", "...
30.931818
18.068182
def unpack(cls, msg): """Construct an _OpReply from raw bytes.""" # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) # Convert Python 3 memoryview to bytes. Note we should call # memoryview.tobytes() if we start using memoryview in Python 2.7. documents = bytes(msg[20:]) return cls(flags, cursor_id, number_returned, documents)
[ "def", "unpack", "(", "cls", ",", "msg", ")", ":", "# PYTHON-945: ignore starting_from field.", "flags", ",", "cursor_id", ",", "_", ",", "number_returned", "=", "cls", ".", "UNPACK_FROM", "(", "msg", ")", "# Convert Python 3 memoryview to bytes. Note we should call", ...
47.555556
20.555556
def get_logger(name, file_name=None, stream=None, template=None, propagate=False, level=None): """Get a logger by name. """ logger = logging.getLogger(name) running_tests = ( 'test' in sys.argv # running with setup.py or sys.argv[0].endswith('py.test')) # running with py.test if running_tests and not level: # testing without level, this means tester does not want to see any log messages. level = logging.CRITICAL if not level: level = logging.INFO logger.setLevel(level) logger.propagate = propagate formatter = logging.Formatter(template) if not stream: stream = sys.stdout logger.handlers = [] handler = logging.StreamHandler(stream=stream) handler.setFormatter(formatter) logger.addHandler(handler) if file_name: handler = logging.FileHandler(file_name) handler.setFormatter(logging.Formatter('%(asctime)s '+template)) logger.addHandler(handler) return logger
[ "def", "get_logger", "(", "name", ",", "file_name", "=", "None", ",", "stream", "=", "None", ",", "template", "=", "None", ",", "propagate", "=", "False", ",", "level", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", "...
28.764706
21.323529
async def listArtifacts(self, *args, **kwargs): """ Get Artifacts from Run Returns a list of artifacts and associated meta-data for a given run. As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`. By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-artifacts-response.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
[ "async", "def", "listArtifacts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"listArtifacts\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ...
38.5
27.7
def next_question(self): """Returns the next `Question` in the questionnaire, or `None` if there are no questions left. Returns first question for whose key there is no answer and for which condition is satisfied, or for which there is no condition. """ for key, questions in self.questions.items(): if key in self.answers: continue for question in questions: if self.check_condition(question._condition): return question return None
[ "def", "next_question", "(", "self", ")", ":", "for", "key", ",", "questions", "in", "self", ".", "questions", ".", "items", "(", ")", ":", "if", "key", "in", "self", ".", "answers", ":", "continue", "for", "question", "in", "questions", ":", "if", "...
42.538462
15.153846
def open_file_from_iso(self, **kwargs): # type: (str) -> PyCdlibIO ''' Open a file for reading in a context manager. This allows the user to operate on the file in user-defined chunks (utilizing the read() method of the returned context manager). Parameters: iso_path - The absolute ISO path to the file on the ISO. rr_path - The absolute Rock Ridge path to the file on the ISO. joliet_path - The absolute Joliet path to the file on the ISO. udf_path - The absolute UDF path to the file on the ISO. Returns: A PyCdlibIO object allowing access to the file. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') num_paths = 0 rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]] for key in kwargs: if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']: if kwargs[key] is not None: num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if 'joliet_path' in kwargs: joliet_path = self._normalize_joliet_path(kwargs['joliet_path']) rec = self._find_joliet_record(joliet_path) elif 'udf_path' in kwargs: if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Can only specify a UDF path for a UDF ISO') (ident_unused, rec) = self._find_udf_record(utils.normpath(kwargs['udf_path'])) if rec is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry') elif 'rr_path' in kwargs: if not self.rock_ridge: raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a rr_path from a non-Rock Ridge ISO') rec = self._find_rr_record(utils.normpath(kwargs['rr_path'])) else: rec = self._find_iso_record(utils.normpath(kwargs['iso_path'])) if not rec.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Path to open must be a file') if rec.inode is None: raise pycdlibexception.PyCdlibInvalidInput('File has no data') return PyCdlibIO(rec.inode, self.pvd.logical_block_size())
[ "def", "open_file_from_iso", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# type: (str) -> PyCdlibIO", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initialized; call either open()...
49.471698
30.641509
def disconnect_sync(self, conn_id): """Synchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection Returns: dict: A dictionary with two elements 'success': a bool with the result of the connection attempt 'failure_reason': a string with the reason for the failure if we failed """ done = threading.Event() result = {} def disconnect_done(conn_id, adapter_id, status, reason): result['success'] = status result['failure_reason'] = reason done.set() self.disconnect_async(conn_id, disconnect_done) done.wait() return result
[ "def", "disconnect_sync", "(", "self", ",", "conn_id", ")", ":", "done", "=", "threading", ".", "Event", "(", ")", "result", "=", "{", "}", "def", "disconnect_done", "(", "conn_id", ",", "adapter_id", ",", "status", ",", "reason", ")", ":", "result", "...
30.875
23.375
def parametric_function(fx='sin(t)', fy='cos(t)', tmin=-1, tmax=1, steps=200, p='t', g=None, erange=False, **kwargs): """ Plots the parametric function over the specified range Parameters ---------- fx='sin(t)', fy='cos(t)' Functions or (matching) lists of functions to plot; can be string functions or python functions taking one argument tmin=-1, tmax=1, steps=200 Range over which to plot, and how many points to plot p='t' If using strings for functions, p is the parameter name g=None Optional dictionary of extra globals. Try g=globals()! erange=False Use exponential spacing of the t data? See spinmob.plot.xy.data() for additional optional keyword arguments. """ if not g: g = {} for k in list(globals().keys()): if k not in g: g[k] = globals()[k] # if the x-axis is a log scale, use erange if erange: r = _fun.erange(tmin, tmax, steps) else: r = _n.linspace(tmin, tmax, steps) # make sure it's a list so we can loop over it if not type(fy) in [type([]), type(())]: fy = [fy] if not type(fx) in [type([]), type(())]: fx = [fx] # loop over the list of functions xdatas = [] ydatas = [] labels = [] for fs in fx: if type(fs) == str: a = eval('lambda ' + p + ': ' + fs, g) a.__name__ = fs else: a = fs x = [] for z in r: x.append(a(z)) xdatas.append(x) labels.append(a.__name__) for n in range(len(fy)): fs = fy[n] if type(fs) == str: a = eval('lambda ' + p + ': ' + fs, g) a.__name__ = fs else: a = fs y = [] for z in r: y.append(a(z)) ydatas.append(y) labels[n] = labels[n]+', '+a.__name__ # plot! xy_data(xdatas, ydatas, label=labels, **kwargs)
[ "def", "parametric_function", "(", "fx", "=", "'sin(t)'", ",", "fy", "=", "'cos(t)'", ",", "tmin", "=", "-", "1", ",", "tmax", "=", "1", ",", "steps", "=", "200", ",", "p", "=", "'t'", ",", "g", "=", "None", ",", "erange", "=", "False", ",", "*...
27.791045
21.761194
def before_sample(analysis_request): """Method triggered before "sample" transition for the Analysis Request passed in is performed """ if not analysis_request.getDateSampled(): analysis_request.setDateSampled(DateTime()) if not analysis_request.getSampler(): analysis_request.setSampler(api.get_current_user().id)
[ "def", "before_sample", "(", "analysis_request", ")", ":", "if", "not", "analysis_request", ".", "getDateSampled", "(", ")", ":", "analysis_request", ".", "setDateSampled", "(", "DateTime", "(", ")", ")", "if", "not", "analysis_request", ".", "getSampler", "(", ...
42.875
7.125
def get_record(self): """Override the base get_record.""" self.update_system_numbers() self.add_systemnumber("CDS") self.fields_list = [ "024", "041", "035", "037", "088", "100", "110", "111", "242", "245", "246", "260", "269", "300", "502", "650", "653", "693", "700", "710", "773", "856", "520", "500", "980" ] self.keep_only_fields() self.determine_collections() self.add_cms_link() self.update_languages() self.update_reportnumbers() self.update_date() self.update_pagenumber() self.update_authors() self.update_subject_categories("SzGeCERN", "INSPIRE", "categories_inspire") self.update_keywords() self.update_experiments() self.update_collaboration() self.update_journals() self.update_links_and_ffts() if 'THESIS' in self.collections: self.update_thesis_supervisors() self.update_thesis_information() if 'NOTE' in self.collections: self.add_notes() for collection in self.collections: record_add_field(self.record, tag='980', subfields=[('a', collection)]) self.remove_controlfields() return self.record
[ "def", "get_record", "(", "self", ")", ":", "self", ".", "update_system_numbers", "(", ")", "self", ".", "add_systemnumber", "(", "\"CDS\"", ")", "self", ".", "fields_list", "=", "[", "\"024\"", ",", "\"041\"", ",", "\"035\"", ",", "\"037\"", ",", "\"088\"...
33.325
13.05
def status(self): """Get the status of the daemon.""" if self.pidfile is None: raise DaemonError('Cannot get status of daemon without PID file') pid = self._read_pidfile() if pid is None: self._emit_message( '{prog} -- not running\n'.format(prog=self.prog)) sys.exit(1) proc = psutil.Process(pid) # Default data data = { 'prog': self.prog, 'pid': pid, 'status': proc.status(), 'uptime': '0m', 'cpu': 0.0, 'memory': 0.0, } # Add up all the CPU and memory usage of all the # processes in the process group pgid = os.getpgid(pid) for gproc in psutil.process_iter(): try: if os.getpgid(gproc.pid) == pgid and gproc.pid != 0: data['cpu'] += gproc.cpu_percent(interval=0.1) data['memory'] += gproc.memory_percent() except (psutil.Error, OSError): continue # Calculate the uptime and format it in a human-readable but # also machine-parsable format try: uptime_mins = int(round((time.time() - proc.create_time()) / 60)) uptime_hours, uptime_mins = divmod(uptime_mins, 60) data['uptime'] = str(uptime_mins) + 'm' if uptime_hours: uptime_days, uptime_hours = divmod(uptime_hours, 24) data['uptime'] = str(uptime_hours) + 'h ' + data['uptime'] if uptime_days: data['uptime'] = str(uptime_days) + 'd ' + data['uptime'] except psutil.Error: pass template = ('{prog} -- pid: {pid}, status: {status}, ' 'uptime: {uptime}, %cpu: {cpu:.1f}, %mem: {memory:.1f}\n') self._emit_message(template.format(**data))
[ "def", "status", "(", "self", ")", ":", "if", "self", ".", "pidfile", "is", "None", ":", "raise", "DaemonError", "(", "'Cannot get status of daemon without PID file'", ")", "pid", "=", "self", ".", "_read_pidfile", "(", ")", "if", "pid", "is", "None", ":", ...
37.18
19.96
def _occursOn(self, myDate): """ Returns true iff an occurence of this event starts on this date (given in the event's own timezone). (Does not include postponements, but does exclude cancellations.) """ # TODO analyse which is faster (rrule or db) and test that first if myDate not in self.repeat: return False if CancellationPage.events.child_of(self) \ .filter(except_date=myDate).exists(): return False return True
[ "def", "_occursOn", "(", "self", ",", "myDate", ")", ":", "# TODO analyse which is faster (rrule or db) and test that first", "if", "myDate", "not", "in", "self", ".", "repeat", ":", "return", "False", "if", "CancellationPage", ".", "events", ".", "child_of", "(", ...
39.714286
19.285714
def _do_write(fname, variable, version, date, table): """Write combining tables to filesystem as python code.""" # pylint: disable=R0914 # Too many local variables (19/15) (col 4) print("writing {} ..".format(fname)) import unicodedata import datetime import string utc_now = datetime.datetime.utcnow() indent = 4 with open(fname, 'w') as fout: fout.write( '"""{variable_proper} table. Created by setup.py."""\n' "# Generated: {iso_utc}\n" "# Source: {version}\n" "# Date: {date}\n" "{variable} = (".format(iso_utc=utc_now.isoformat(), version=version, date=date, variable=variable, variable_proper=variable.title())) for start, end in table: ucs_start, ucs_end = unichr(start), unichr(end) hex_start, hex_end = ('0x{0:04x}'.format(start), '0x{0:04x}'.format(end)) try: name_start = string.capwords(unicodedata.name(ucs_start)) except ValueError: name_start = u'' try: name_end = string.capwords(unicodedata.name(ucs_end)) except ValueError: name_end = u'' fout.write('\n' + (' ' * indent)) fout.write('({0}, {1},),'.format(hex_start, hex_end)) fout.write(' # {0:24s}..{1}'.format( name_start[:24].rstrip() or '(nil)', name_end[:24].rstrip())) fout.write('\n)\n') print("complete.")
[ "def", "_do_write", "(", "fname", ",", "variable", ",", "version", ",", "date", ",", "table", ")", ":", "# pylint: disable=R0914", "# Too many local variables (19/15) (col 4)", "print", "(", "\"writing {} ..\"", ".", "format", "(", "fname", ")", ")", "import...
45.45
13.825
def set_result(self, result): """Complete all tasks. """ for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. future.set_result(result) if not self.done(): super().set_result(result)
[ "def", "set_result", "(", "self", ",", "result", ")", ":", "for", "future", "in", "self", ".", "traverse", "(", ")", ":", "# All cancelled futures should have callbacks to removed itself", "# from this linked list. However, these callbacks are scheduled in", "# an event loop, s...
47.222222
14.777778
def factory(self, classname, *args, **kwargs): """ Creates an instance of class looking for it in each module registered. You can add needed params to instance the class. :param classname: Class name you want to create an instance. :type classname: str :return: An instance of classname :rtype: object """ klass = self.load_class(classname) return self.get_factory_by_class(klass)(*args, **kwargs)
[ "def", "factory", "(", "self", ",", "classname", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "klass", "=", "self", ".", "load_class", "(", "classname", ")", "return", "self", ".", "get_factory_by_class", "(", "klass", ")", "(", "*", "args", ...
33.357143
18.928571
def udot(op1, op2): """Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. Examples -------- >>> from unyt import km, s >>> a = np.eye(2)*km >>> b = (np.ones((2, 2)) * 2)*s >>> print(udot(a, b)) [[2. 2.] [2. 2.]] km*s """ dot = np.dot(op1.d, op2.d) units = op1.units * op2.units if dot.shape == (): return unyt_quantity(dot, units) return unyt_array(dot, units)
[ "def", "udot", "(", "op1", ",", "op2", ")", ":", "dot", "=", "np", ".", "dot", "(", "op1", ".", "d", ",", "op2", ".", "d", ")", "units", "=", "op1", ".", "units", "*", "op2", ".", "units", "if", "dot", ".", "shape", "==", "(", ")", ":", "...
24.526316
16.368421
async def _send_report(self, status): """ Call all subscribed coroutines in _notify whenever a status update occurs. This method is a coroutine """ if len(self._notify) > 0: # Each client gets its own copy of the dict. asyncio.gather(*[coro(dict(status)) for coro in self._notify], loop=self.loop)
[ "async", "def", "_send_report", "(", "self", ",", "status", ")", ":", "if", "len", "(", "self", ".", "_notify", ")", ">", "0", ":", "# Each client gets its own copy of the dict.", "asyncio", ".", "gather", "(", "*", "[", "coro", "(", "dict", "(", "status",...
35.181818
13.909091
def neutralize_variables(self, tax_benefit_system): """ Neutralizing input variables not in input dataframe and keep some crucial variables """ for variable_name, variable in tax_benefit_system.variables.items(): if variable.formulas: continue if self.used_as_input_variables and (variable_name in self.used_as_input_variables): continue if self.non_neutralizable_variables and (variable_name in self.non_neutralizable_variables): continue if self.weight_column_name_by_entity and (variable_name in self.weight_column_name_by_entity.values()): continue tax_benefit_system.neutralize_variable(variable_name)
[ "def", "neutralize_variables", "(", "self", ",", "tax_benefit_system", ")", ":", "for", "variable_name", ",", "variable", "in", "tax_benefit_system", ".", "variables", ".", "items", "(", ")", ":", "if", "variable", ".", "formulas", ":", "continue", "if", "self...
49.933333
28.6
def collect_analysis(self): ''' :return: a dictionary which is used to get the serialized analyzer definition from the analyzer class. ''' analysis = {} for field in self.fields.values(): for analyzer_name in ('analyzer', 'index_analyzer', 'search_analyzer'): if not hasattr(field, analyzer_name): continue analyzer = getattr(field, analyzer_name) if not isinstance(analyzer, Analyzer): continue definition = analyzer.get_analysis_definition() if definition is None: continue for key in definition: analysis.setdefault(key, {}).update(definition[key]) return analysis
[ "def", "collect_analysis", "(", "self", ")", ":", "analysis", "=", "{", "}", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", ":", "for", "analyzer_name", "in", "(", "'analyzer'", ",", "'index_analyzer'", ",", "'search_analyzer'", ")",...
34.173913
24.347826
def padded_cross_entropy(logits, labels, label_smoothing, weights_fn=weights_nonzero, reduce_sum=True, cutoff=0.0, gaussian=False): """Compute cross-entropy assuming 0s are padding. Computes a loss numerator (the sum of losses), and loss denominator (the number of non-padding tokens). Args: logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. optionally a FactoredTensor. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. cutoff: a float, at which point to have no loss. gaussian: If true, use a Gaussian distribution for label smoothing Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens. Raises: ValueError: in case of unsupported argument types. """ if isinstance(logits, FactoredTensor): if gaussian: raise ValueError("Factored padded cross entropy with Gaussian smoothing " "is not implemented yet.") return padded_cross_entropy_factored( logits, labels, label_smoothing, weights_fn=weights_fn, reduce_sum=reduce_sum) confidence = 1.0 - label_smoothing logits_shape = shape_list(logits) vocab_size = logits_shape[-1] with tf.name_scope("padded_cross_entropy", values=[logits, labels]): if len(logits_shape) == 2: # Deal with the case where we did not insert extra dimensions due to # TPU issues. No pad-to-same-length happens in this case. # TODO(noam): remove this logic once TPU can handle extra dimensions. labels = tf.reshape(labels, [-1]) else: logits, labels = pad_with_zeros(logits, labels) logits = tf.reshape( logits, shape_list(labels) + [vocab_size], name="padded_cross_entropy_size_check") logits = tf.cast(logits, tf.float32) xent = smoothing_cross_entropy( logits, labels, vocab_size, confidence, gaussian=gaussian) weights = weights_fn(labels) if cutoff > 0.0: xent = tf.nn.relu(xent - cutoff) if not reduce_sum: return xent * weights, weights return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
[ "def", "padded_cross_entropy", "(", "logits", ",", "labels", ",", "label_smoothing", ",", "weights_fn", "=", "weights_nonzero", ",", "reduce_sum", "=", "True", ",", "cutoff", "=", "0.0", ",", "gaussian", "=", "False", ")", ":", "if", "isinstance", "(", "logi...
38.380952
16.253968
def retrieve_assignment_overridden_dates_for_quizzes(self, course_id, quiz_assignment_overrides_0_quiz_ids=None): """ Retrieve assignment-overridden dates for quizzes. Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - quiz_assignment_overrides[0][quiz_ids] """An array of quiz IDs. If omitted, overrides for all quizzes available to the operating user will be returned.""" if quiz_assignment_overrides_0_quiz_ids is not None: params["quiz_assignment_overrides[0][quiz_ids]"] = quiz_assignment_overrides_0_quiz_ids self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/assignment_overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/assignment_overrides".format(**path), data=data, params=params, single_item=True)
[ "def", "retrieve_assignment_overridden_dates_for_quizzes", "(", "self", ",", "course_id", ",", "quiz_assignment_overrides_0_quiz_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\...
52.217391
32.869565
def fit(self, X, y=None, **fit_params): """Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self """ # quic-specific outputs self.opt_ = None self.cputime_ = None self.iters_ = None self.duality_gap_ = None # these must be updated upon self.fit() self.path_ = None self.sample_covariance_ = None self.lam_scale_ = None self.lam_ = None self.is_fitted_ = False X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) # either use passed in path, or make our own path lam_1 = self.lam_scale_ lam_0 = 1e-2 * lam_1 if self.path is None: self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), 100)[::-1] elif isinstance(self.path, int): self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), self.path)[::-1] else: self.path_ = self.path self.path_ = _validate_path(self.path_) # fit along the path, temporarily populate # self.precision_, self.covariance_ with path values so we can use our # inherited selection function if self.method == "quic": (self.precision_, self.covariance_, _, _, _, _) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode="path", tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose, ) self.is_fitted_ = True else: raise NotImplementedError("Only method='quic' has been implemented.") # apply EBIC criteria best_lam_idx = self.ebic_select(gamma=self.gamma) self.lam_ = self.lam * self.lam_scale_ * self.path_[best_lam_idx] self.precision_ = self.precision_[best_lam_idx] self.covariance_ = self.covariance_[best_lam_idx] self.is_fitted_ = True return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "fit_params", ")", ":", "# quic-specific outputs", "self", ".", "opt_", "=", "None", "self", ".", "cputime_", "=", "None", "self", ".", "iters_", "=", "None", "self", ".", "...
33.188406
18.043478
def _cached_results(self, start_time, end_time): """ Retrieves cached results for any bucket that has a single cache entry. If a bucket has two cache entries, there is a chance that two different writers previously computed and cached a result since Kronos has no transaction semantics. While it might be safe to return one of the cached results if there are multiple, we currently do the safe thing and pretend we have no previously computed data for this bucket. """ cached_buckets = self._bucket_events( self._client.get(self._scratch_stream, start_time, end_time, namespace=self._scratch_namespace)) for bucket_events in cached_buckets: # If we have multiple cache entries for the same bucket, pretend # we have no results for that bucket. if len(bucket_events) == 1: first_result = bucket_events[0] yield (kronos_time_to_epoch_time(first_result[TIMESTAMP_FIELD]), first_result[QueryCache.CACHE_KEY])
[ "def", "_cached_results", "(", "self", ",", "start_time", ",", "end_time", ")", ":", "cached_buckets", "=", "self", ".", "_bucket_events", "(", "self", ".", "_client", ".", "get", "(", "self", ".", "_scratch_stream", ",", "start_time", ",", "end_time", ",", ...
48
16.285714
def jsontype(self, name, path=Path.rootPath()): """ Gets the type of the JSON value under ``path`` from key ``name`` """ return self.execute_command('JSON.TYPE', name, str_path(path))
[ "def", "jsontype", "(", "self", ",", "name", ",", "path", "=", "Path", ".", "rootPath", "(", ")", ")", ":", "return", "self", ".", "execute_command", "(", "'JSON.TYPE'", ",", "name", ",", "str_path", "(", "path", ")", ")" ]
42.2
13.8
def tmpl_first(text, count=1, skip=0, sep=u'; ', join_str=u'; '): """ * synopsis: ``%first{text}`` or ``%first{text,count,skip}`` or \ ``%first{text,count,skip,sep,join}`` * description: Returns the first item, separated by ; . You can use \ %first{text,count,skip}, where count is the number of items \ (default 1) and skip is number to skip (default 0). You can also \ use %first{text,count,skip,sep,join} where sep is the separator, \ like ; or / and join is the text to concatenate the items. :param text: the string :param count: The number of items included :param skip: The number of items skipped :param sep: the separator. Usually is '; ' (default) or '/ ' :param join_str: the string which will join the items, default '; '. """ skip = int(skip) count = skip + int(count) return join_str.join(text.split(sep)[skip:count])
[ "def", "tmpl_first", "(", "text", ",", "count", "=", "1", ",", "skip", "=", "0", ",", "sep", "=", "u'; '", ",", "join_str", "=", "u'; '", ")", ":", "skip", "=", "int", "(", "skip", ")", "count", "=", "skip", "+", "int", "(", "count", ")", "retu...
51.052632
21.684211
def get_object(cls, api_token, domain_name): """ Class method that will return a Domain object by ID. """ domain = cls(token=api_token, name=domain_name) domain.load() return domain
[ "def", "get_object", "(", "cls", ",", "api_token", ",", "domain_name", ")", ":", "domain", "=", "cls", "(", "token", "=", "api_token", ",", "name", "=", "domain_name", ")", "domain", ".", "load", "(", ")", "return", "domain" ]
32.428571
11.571429
def update(self, id, body): """Modifies a connection. Args: id: Id of the connection. body (dict): Specifies which fields are to be modified, and to what values. See: https://auth0.com/docs/api/management/v2#!/Connections/patch_connections_by_id Returns: The modified connection object. """ return self.client.patch(self._url(id), data=body)
[ "def", "update", "(", "self", ",", "id", ",", "body", ")", ":", "return", "self", ".", "client", ".", "patch", "(", "self", ".", "_url", "(", "id", ")", ",", "data", "=", "body", ")" ]
28.8
24.133333
def create_window(width, height, title, monitor, share): """ Creates a window and its associated context. Wrapper for: GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); """ return _glfw.glfwCreateWindow(width, height, _to_char_p(title), monitor, share)
[ "def", "create_window", "(", "width", ",", "height", ",", "title", ",", "monitor", ",", "share", ")", ":", "return", "_glfw", ".", "glfwCreateWindow", "(", "width", ",", "height", ",", "_to_char_p", "(", "title", ")", ",", "monitor", ",", "share", ")" ]
41.111111
22.666667
def _iter_info(self, niter, level=logging.INFO): """ Log iteration number and mismatch Parameters ---------- level logging level Returns ------- None """ max_mis = self.iter_mis[niter - 1] msg = ' Iter {:<d}. max mismatch = {:8.7f}'.format(niter, max_mis) logger.info(msg)
[ "def", "_iter_info", "(", "self", ",", "niter", ",", "level", "=", "logging", ".", "INFO", ")", ":", "max_mis", "=", "self", ".", "iter_mis", "[", "niter", "-", "1", "]", "msg", "=", "' Iter {:<d}. max mismatch = {:8.7f}'", ".", "format", "(", "niter", ...
24.533333
17.733333
def get_local_variable_or_declare_one(self, raw_name, type=None): ''' This function will first check if raw_name has been used to create some variables. If yes, the latest one named in self.variable_name_mapping[raw_name] will be returned. Otherwise, a new variable will be created and then returned. ''' onnx_name = self.get_onnx_variable_name(raw_name) if onnx_name in self.variables: return self.variables[onnx_name] else: variable = Variable(raw_name, onnx_name, self.name, type) self.variables[onnx_name] = variable if raw_name in self.variable_name_mapping: self.variable_name_mapping[raw_name].append(onnx_name) else: self.variable_name_mapping[raw_name] = [onnx_name] return variable
[ "def", "get_local_variable_or_declare_one", "(", "self", ",", "raw_name", ",", "type", "=", "None", ")", ":", "onnx_name", "=", "self", ".", "get_onnx_variable_name", "(", "raw_name", ")", "if", "onnx_name", "in", "self", ".", "variables", ":", "return", "self...
49.588235
26.058824
def get_group_members(self, group_id, max_results=None, paging_token=None): """GetGroupMembers. [Preview API] Get direct members of a Group. :param str group_id: Id of the Group. :param int max_results: Maximum number of results to retrieve. :param str paging_token: Paging Token from the previous page fetched. If the 'pagingToken' is null, the results would be fetched from the begining of the Members List. :rtype: :class:`<PagedGraphMemberList> <azure.devops.v5_0.member_entitlement_management.models.PagedGraphMemberList>` """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') query_parameters = {} if max_results is not None: query_parameters['maxResults'] = self._serialize.query('max_results', max_results, 'int') if paging_token is not None: query_parameters['pagingToken'] = self._serialize.query('paging_token', paging_token, 'str') response = self._send(http_method='GET', location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('PagedGraphMemberList', response)
[ "def", "get_group_members", "(", "self", ",", "group_id", ",", "max_results", "=", "None", ",", "paging_token", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "group_id", "is", "not", "None", ":", "route_values", "[", "'groupId'", "]", "=", ...
63.5
29.318182
def normalize(W, copy=True): ''' Normalizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray normalized connectivity matrix ''' if copy: W = W.copy() W /= np.max(np.abs(W)) return W
[ "def", "normalize", "(", "W", ",", "copy", "=", "True", ")", ":", "if", "copy", ":", "W", "=", "W", ".", "copy", "(", ")", "W", "/=", "np", ".", "max", "(", "np", ".", "abs", "(", "W", ")", ")", "return", "W" ]
23.454545
23.272727
def preprocess(self, data): """ Processes popcorn JSON and builds a sane data model out of it @param data : The popcorn editor project json blob """ print 'Beginning pre-process...' for url, video in data['media'][0]['clipData'].iteritems(): print 'Downloading {0} from {1}.'.format(video['title'], url) # Removes all white spaces and non alphanumeric chars from title video['title'] = re.sub( '[^\w\.]', '', video['title'] ) + '.webm' urllib.urlretrieve(url, video['title']) self.base_videos.append(video['title']) print 'video downloaded as %s!' % video['title'] print 'All videos downloaded.' events = [event for track in data['media'][0]['tracks'] for event in track['trackEvents']] for event in events: if event['type'] == 'skip' or event['type'] == 'loopPlugin': edit = TrackEdit(event['type'], event['popcornOptions']) self.track_edits.append(edit) if event['type'] == 'text' or event['type'] == 'image': item = TrackItem(event['type'], event['popcornOptions']) item.options['start_stamp'] = \ item.options['start'] item.options['end_stamp'] = \ item.options['end'] item.options['x_px'] = percent_to_px( item.options['left'], self.size[0] ) item.options['y_px'] = percent_to_px( item.options['top'], self.size[1] ) self.track_items.append(item) if event['type'] == 'sequencer': video = TrackVideo( event['type'], event['popcornOptions']['source'][0], event['popcornOptions'] ) self.track_videos.append(video) self.parse_duration() cfilter = r'color=c={0}:s={1}x{2}:d={3};aevalsrc=0:d={4}'.format( self.background_color, self.size[0], self.size[1], self.duration, self.duration, ) call(['ffmpeg', '-filter_complex', cfilter, '-y', self.current_video.name])
[ "def", "preprocess", "(", "self", ",", "data", ")", ":", "print", "'Beginning pre-process...'", "for", "url", ",", "video", "in", "data", "[", "'media'", "]", "[", "0", "]", "[", "'clipData'", "]", ".", "iteritems", "(", ")", ":", "print", "'Downloading ...
34.691176
18.397059
def filter_set(self, name): """ Adds filters from a particular global :class:`FilterSet`. Args: name (str): The name of the set whose filters should be added. """ filter_set = filter_sets[name] for name, filter in iter(filter_set.filters.items()): self.filters[name] = filter self.descriptions += filter_set.descriptions
[ "def", "filter_set", "(", "self", ",", "name", ")", ":", "filter_set", "=", "filter_sets", "[", "name", "]", "for", "name", ",", "filter", "in", "iter", "(", "filter_set", ".", "filters", ".", "items", "(", ")", ")", ":", "self", ".", "filters", "[",...
32.583333
17.916667
def check_dependencies(): """Check external dependecies Return a tuple with the available generators. """ available = [] try: shell('ebook-convert') available.append('calibre') except OSError: pass try: shell('pandoc --help') available.append('pandoc') except OSError: pass if not available: sys.exit(error('No generator found, you cannot use md2ebook.')) check_dependency_epubcheck() return available
[ "def", "check_dependencies", "(", ")", ":", "available", "=", "[", "]", "try", ":", "shell", "(", "'ebook-convert'", ")", "available", ".", "append", "(", "'calibre'", ")", "except", "OSError", ":", "pass", "try", ":", "shell", "(", "'pandoc --help'", ")",...
25.421053
16.684211
def alternative_parser(self, family_file): """ Parse alternative formatted family info This parses a information with more than six columns. For alternative information header comlumn must exist and each row must have the same amount of columns as the header. First six columns must be the same as in the ped format. Arguments: family_info (iterator): An iterator with family info """ alternative_header = None for line in family_file: if line.startswith('#'): alternative_header = line[1:].rstrip().split('\t') self.logger.info("Alternative header found: {0}".format(line)) elif line.strip(): if not alternative_header: raise WrongLineFormat(message="Alternative ped files must have "\ "headers! Please add a header line.") splitted_line = line.rstrip().split('\t') if len(splitted_line) < 6: # Try to split the line on another symbol: splitted_line = line.rstrip().split() try: self.check_line_length(splitted_line, len(alternative_header)) except SyntaxError as e: self.logger.error('Number of entrys differ from header.') self.logger.error("Header:\n{0}".format('\t'.join(alternative_header))) self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line))) self.logger.error("Length of Header: {0}. Length of "\ "Ped line: {1}".format( len(alternative_header), len(splitted_line)) ) raise e if len(line) > 1: sample_dict = dict(zip(self.header, splitted_line[:6])) family_id = sample_dict['family_id'] all_info = dict(zip(alternative_header, splitted_line)) if sample_dict['family_id'] not in self.families: self.families[family_id] = Family(family_id, {}) sample_dict['genetic_models'] = all_info.get('InheritanceModel', None) # Try other header naming: if not sample_dict['genetic_models']: sample_dict['genetic_models'] = all_info.get('Inheritance_model', None) sample_dict['proband'] = all_info.get('Proband', '.') sample_dict['consultand'] = all_info.get('Consultand', '.') sample_dict['alive'] = all_info.get('Alive', '.') ind_object = self.get_individual(**sample_dict) self.individuals[ind_object.individual_id] = ind_object self.families[ind_object.family].add_individual(ind_object) if sample_dict['genetic_models']: for model in self.get_models(sample_dict['genetic_models']): self.families[ind_object.family].models_of_inheritance.add(model) # If requested, we try is it is an id in the CMMS format: sample_id_parts = ind_object.individual_id.split('-') if self.cmms_check and (len(sample_id_parts) == 3): # If the id follow the CMMS convention we can # do a sanity check if self.check_cmms_id(ind_object.individual_id): self.logger.debug("Id follows CMMS convention: {0}".format( ind_object.individual_id )) self.logger.debug("Checking CMMS id affections status") try: self.check_cmms_affection_status(ind_object) except WrongAffectionStatus as e: self.logger.error("Wrong affection status for"\ " {0}. Affection status can be in"\ " {1}".format(e.cmms_id, e.valid_statuses)) raise e except WrongPhenotype as e: self.logger.error("Affection status for {0} "\ "({1}) disagrees with phenotype ({2})".format( e.cmms_id, e.phenotype, e.affection_status )) raise e try: self.check_cmms_gender(ind_object) except WrongGender as e: self.logger.error("Gender code for id {0}"\ "({1}) disagrees with sex:{2}".format( e.cmms_id, e.sex_code, e.sex )) raise e for i in range(6, len(splitted_line)): ind_object.extra_info[alternative_header[i]] = splitted_line[i]
[ "def", "alternative_parser", "(", "self", ",", "family_file", ")", ":", "alternative_header", "=", "None", "for", "line", "in", "family_file", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "alternative_header", "=", "line", "[", "1", ":", "]"...
53.07619
23.666667
def save(self, path, binary=False): """Save a set of constructs into the CLIPS data base. If binary is True, the constructs will be saved in binary format. The Python equivalent of the CLIPS load command. """ if binary: ret = lib.EnvBsave(self._env, path.encode()) else: ret = lib.EnvSave(self._env, path.encode()) if ret == 0: raise CLIPSError(self._env)
[ "def", "save", "(", "self", ",", "path", ",", "binary", "=", "False", ")", ":", "if", "binary", ":", "ret", "=", "lib", ".", "EnvBsave", "(", "self", ".", "_env", ",", "path", ".", "encode", "(", ")", ")", "else", ":", "ret", "=", "lib", ".", ...
31.214286
19.642857
def polite_string(a_string): """Returns a "proper" string that should work in both Py3/Py2""" if is_py3() and hasattr(a_string, 'decode'): try: return a_string.decode('utf-8') except UnicodeDecodeError: return a_string return a_string
[ "def", "polite_string", "(", "a_string", ")", ":", "if", "is_py3", "(", ")", "and", "hasattr", "(", "a_string", ",", "'decode'", ")", ":", "try", ":", "return", "a_string", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "return", ...
31
14.555556
def _set_queues_interface(self, v, load=False): """ Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """queues_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""", }) self.__queues_interface = t if hasattr(self, '_set'): self._set()
[ "def", "_set_queues_interface", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
81.590909
38.045455
def parse_value(self, tup_tree): """ Parse a VALUE element and return its text content as a unicode string. Whitespace is preserved. The conversion of the text representation of the value to a CIM data type object requires CIM type information which is not available on the VALUE element and therefore will be done when parsing higher level elements that have that information. :: <!ELEMENT VALUE (#PCDATA)> """ self.check_node(tup_tree, 'VALUE', (), (), (), allow_pcdata=True) return self.pcdata(tup_tree)
[ "def", "parse_value", "(", "self", ",", "tup_tree", ")", ":", "self", ".", "check_node", "(", "tup_tree", ",", "'VALUE'", ",", "(", ")", ",", "(", ")", ",", "(", ")", ",", "allow_pcdata", "=", "True", ")", "return", "self", ".", "pcdata", "(", "tup...
33.111111
24.111111
async def begin_twophase(self, xid=None): """Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of TwoPhaseTransaction, which in addition to the methods provided by Transaction, also provides a TwoPhaseTransaction.prepare() method. xid - the two phase transaction id. If not supplied, a random id will be generated. """ if self._transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " "is already in progress.") if xid is None: xid = self._dialect.create_xid() self._transaction = TwoPhaseTransaction(self, xid) await self.execute("XA START %s", xid) return self._transaction
[ "async", "def", "begin_twophase", "(", "self", ",", "xid", "=", "None", ")", ":", "if", "self", ".", "_transaction", "is", "not", "None", ":", "raise", "exc", ".", "InvalidRequestError", "(", "\"Cannot start a two phase transaction when a transaction \"", "\"is alre...
38.090909
13.818182
def t_bin_ZERO(t): r'[^01]' t.lexer.begin('INITIAL') t.type = 'NUMBER' t.value = 0 t.lexer.lexpos -= 1 return t
[ "def", "t_bin_ZERO", "(", "t", ")", ":", "t", ".", "lexer", ".", "begin", "(", "'INITIAL'", ")", "t", ".", "type", "=", "'NUMBER'", "t", ".", "value", "=", "0", "t", ".", "lexer", ".", "lexpos", "-=", "1", "return", "t" ]
18.428571
21.571429
def make_pre_authed_request(self, env, method=None, path=None, body=None, headers=None): """Nearly the same as swift.common.wsgi.make_pre_authed_request except that this also always sets the 'swift.source' and user agent. Newer Swift code will support swift_source as a kwarg, but we do it this way so we don't have to have a newer Swift. Since we're doing this anyway, we may as well set the user agent too since we always do that. """ if self.default_storage_policy: sp = self.default_storage_policy if headers: headers.update({'X-Storage-Policy': sp}) else: headers = {'X-Storage-Policy': sp} subreq = swift.common.wsgi.make_pre_authed_request( env, method=method, path=path, body=body, headers=headers, agent=self.agent) subreq.environ['swift.source'] = self.swift_source return subreq
[ "def", "make_pre_authed_request", "(", "self", ",", "env", ",", "method", "=", "None", ",", "path", "=", "None", ",", "body", "=", "None", ",", "headers", "=", "None", ")", ":", "if", "self", ".", "default_storage_policy", ":", "sp", "=", "self", ".", ...
43
18.304348
async def _workaround_1695335(self, delta, old, new, model): """ This is a (hacky) temporary work around for a bug in Juju where the instance status and agent version fields don't get updated properly by the AllWatcher. Deltas never contain a value for `data['agent-status']['version']`, and once the `instance-status` reaches `pending`, we no longer get any updates for it (the deltas come in, but the `instance-status` data is always the same after that). To work around this, whenever a delta comes in for this machine, we query FullStatus and use the data from there if and only if it's newer. Luckily, the timestamps on the `since` field does seem to be accurate. See https://bugs.launchpad.net/juju/+bug/1695335 """ if delta.data.get('synthetic', False): # prevent infinite loops re-processing already processed deltas return full_status = await utils.run_with_interrupt(model.get_status(), model._watch_stopping, loop=model.loop) if model._watch_stopping.is_set(): return if self.id not in full_status.machines: return if not full_status.machines[self.id]['instance-status']['since']: return machine = full_status.machines[self.id] change_log = [] key_map = { 'status': 'current', 'info': 'message', 'since': 'since', } # handle agent version specially, because it's never set in # deltas, and we don't want even a newer delta to clear it agent_version = machine['agent-status']['version'] if agent_version: delta.data['agent-status']['version'] = agent_version change_log.append(('agent-version', '', agent_version)) # only update (other) delta fields if status data is newer status_since = pyrfc3339.parse(machine['instance-status']['since']) delta_since = pyrfc3339.parse(delta.data['instance-status']['since']) if status_since > delta_since: for status_key in ('status', 'info', 'since'): delta_key = key_map[status_key] status_value = machine['instance-status'][status_key] delta_value = delta.data['instance-status'][delta_key] change_log.append((delta_key, delta_value, status_value)) delta.data['instance-status'][delta_key] = status_value if change_log: log.debug('Overriding machine delta with FullStatus data') for log_item in change_log: log.debug(' {}: {} -> {}'.format(*log_item)) delta.data['synthetic'] = True old_obj, new_obj = self.model.state.apply_delta(delta) await model._notify_observers(delta, old_obj, new_obj)
[ "async", "def", "_workaround_1695335", "(", "self", ",", "delta", ",", "old", ",", "new", ",", "model", ")", ":", "if", "delta", ".", "data", ".", "get", "(", "'synthetic'", ",", "False", ")", ":", "# prevent infinite loops re-processing already processed deltas...
43.761194
24.80597
def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1
[ "def", "find", "(", "self", ",", "header", ")", ":", "header", "=", "header", ".", "lower", "(", ")", "for", "idx", ",", "item", "in", "enumerate", "(", "self", ".", "_headers", ")", ":", "if", "item", ".", "lower", "(", ")", "==", "header", ":",...
32.5
12.5
def _download_file(uri, bulk_api): """Download the bulk API result file for a single batch""" resp = requests.get(uri, headers=bulk_api.headers(), stream=True) with tempfile.TemporaryFile("w+b") as f: for chunk in resp.iter_content(chunk_size=None): f.write(chunk) f.seek(0) yield f
[ "def", "_download_file", "(", "uri", ",", "bulk_api", ")", ":", "resp", "=", "requests", ".", "get", "(", "uri", ",", "headers", "=", "bulk_api", ".", "headers", "(", ")", ",", "stream", "=", "True", ")", "with", "tempfile", ".", "TemporaryFile", "(", ...
40.375
14.625
def edge_statistics(docgraph): """print basic statistics about an edge, e.g. layer/attribute counts""" print "Edge statistics\n===============" layer_counts = Counter() attrib_counts = Counter() source_counts = Counter() target_counts = Counter() for source, target, edge_attrs in docgraph.edges_iter(data=True): for layer in edge_attrs['layers']: layer_counts[layer] += 1 for attrib in edge_attrs: attrib_counts[attrib] += 1 source_counts[source] += 1 target_counts[target] += 1 print "\nnumber of edges with layers" print_sorted_counter(layer_counts) print "\nnumber of edges with attributes" print_sorted_counter(attrib_counts) print "\nmost common source edges" print_most_common(source_counts) print "\nmost common target edges" print_most_common(target_counts)
[ "def", "edge_statistics", "(", "docgraph", ")", ":", "print", "\"Edge statistics\\n===============\"", "layer_counts", "=", "Counter", "(", ")", "attrib_counts", "=", "Counter", "(", ")", "source_counts", "=", "Counter", "(", ")", "target_counts", "=", "Counter", ...
35.875
9
def dot(data, color=None, point_size=2, f_tooltip=None): """Create a dot density map :param data: data access object :param color: color :param point_size: point size :param f_tooltip: function to return a tooltip string for a point """ from geoplotlib.layers import DotDensityLayer _global_config.layers.append(DotDensityLayer(data, color=color, point_size=point_size, f_tooltip=f_tooltip))
[ "def", "dot", "(", "data", ",", "color", "=", "None", ",", "point_size", "=", "2", ",", "f_tooltip", "=", "None", ")", ":", "from", "geoplotlib", ".", "layers", "import", "DotDensityLayer", "_global_config", ".", "layers", ".", "append", "(", "DotDensityLa...
41.5
19.5
def vector_check(vector): """ Check input vector items type. :param vector: input vector :type vector : list :return: bool """ for i in vector: if isinstance(i, int) is False: return False if i < 0: return False return True
[ "def", "vector_check", "(", "vector", ")", ":", "for", "i", "in", "vector", ":", "if", "isinstance", "(", "i", ",", "int", ")", "is", "False", ":", "return", "False", "if", "i", "<", "0", ":", "return", "False", "return", "True" ]
20.214286
15.071429
def auth(username, password): ''' Authenticate against yubico server ''' _cred = __get_yubico_users(username) client = Yubico(_cred['id'], _cred['key']) try: return client.verify(password) except yubico_exceptions.StatusCodeError as e: log.info('Unable to verify YubiKey `%s`', e) return False
[ "def", "auth", "(", "username", ",", "password", ")", ":", "_cred", "=", "__get_yubico_users", "(", "username", ")", "client", "=", "Yubico", "(", "_cred", "[", "'id'", "]", ",", "_cred", "[", "'key'", "]", ")", "try", ":", "return", "client", ".", "...
25.769231
18.538462
def get_sitetree(self, alias): """Gets site tree items from the given site tree. Caches result to dictionary. Returns (tree alias, tree items) tuple. :param str|unicode alias: :rtype: tuple """ cache_ = self.cache get_cache_entry = cache_.get_entry set_cache_entry = cache_.set_entry caching_required = False if not self.current_app_is_admin(): # We do not need i18n for a tree rendered in Admin dropdown. alias = self.resolve_tree_i18n_alias(alias) sitetree = get_cache_entry('sitetrees', alias) if not sitetree: if DYNAMIC_ONLY: sitetree = [] else: sitetree = ( MODEL_TREE_ITEM_CLASS.objects. select_related('parent', 'tree'). prefetch_related('access_permissions__content_type'). filter(tree__alias__exact=alias). order_by('parent__sort_order', 'sort_order')) sitetree = self.attach_dynamic_tree_items(alias, sitetree) set_cache_entry('sitetrees', alias, sitetree) caching_required = True parents = get_cache_entry('parents', alias) if not parents: parents = defaultdict(list) for item in sitetree: parent = getattr(item, 'parent') parents[parent].append(item) set_cache_entry('parents', alias, parents) # Prepare items by ids cache if needed. if caching_required: # We need this extra pass to avoid future problems on items depth calculation. cache_update = cache_.update_entry_value for item in sitetree: cache_update('items_by_ids', alias, {item.id: item}) url = self.url calculate_item_depth = self.calculate_item_depth for item in sitetree: if caching_required: item.has_children = False if not hasattr(item, 'depth'): item.depth = calculate_item_depth(alias, item.id) item.depth_range = range(item.depth) # Resolve item permissions. if item.access_restricted: permissions_src = ( item.permissions if getattr(item, 'is_dynamic', False) else item.access_permissions.all()) item.perms = set( ['%s.%s' % (perm.content_type.app_label, perm.codename) for perm in permissions_src]) # Contextual properties. item.url_resolved = url(item) item.title_resolved = LazyTitle(item.title) if VARIABLE_TAG_START in item.title else item.title item.is_current = False item.in_current_branch = False # Get current item for the given sitetree. self.get_tree_current_item(alias) # Save sitetree data into cache if needed. if caching_required: cache_.save() return alias, sitetree
[ "def", "get_sitetree", "(", "self", ",", "alias", ")", ":", "cache_", "=", "self", ".", "cache", "get_cache_entry", "=", "cache_", ".", "get_entry", "set_cache_entry", "=", "cache_", ".", "set_entry", "caching_required", "=", "False", "if", "not", "self", "....
35.682353
19.141176
def split_pem(s): """ Split PEM objects. Useful to process concatenated certificates. """ pem_strings = [] while s != b"": start_idx = s.find(b"-----BEGIN") if start_idx == -1: break end_idx = s.find(b"-----END") end_idx = s.find(b"\n", end_idx) + 1 pem_strings.append(s[start_idx:end_idx]) s = s[end_idx:] return pem_strings
[ "def", "split_pem", "(", "s", ")", ":", "pem_strings", "=", "[", "]", "while", "s", "!=", "b\"\"", ":", "start_idx", "=", "s", ".", "find", "(", "b\"-----BEGIN\"", ")", "if", "start_idx", "==", "-", "1", ":", "break", "end_idx", "=", "s", ".", "fin...
28.285714
12.714286
def type(self): '''Return 'suite' or 'resource' or None This will return 'suite' if a testcase table is found; It will return 'resource' if at least one robot table is found. If no tables are found it will return None ''' robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)] if len(robot_tables) == 0: return None for table in self.tables: if isinstance(table, TestcaseTable): return "suite" return "resource"
[ "def", "type", "(", "self", ")", ":", "robot_tables", "=", "[", "table", "for", "table", "in", "self", ".", "tables", "if", "not", "isinstance", "(", "table", ",", "UnknownTable", ")", "]", "if", "len", "(", "robot_tables", ")", "==", "0", ":", "retu...
31.941176
23.588235
def _get_real_ip(self): """ Get IP from request. :param request: A usual request object :type request: HttpRequest :return: ipv4 string or None """ try: # Trying to work with most common proxy headers real_ip = self.request.META['HTTP_X_FORWARDED_FOR'] return real_ip.split(',')[0] except KeyError: return self.request.META['REMOTE_ADDR'] except Exception: # Unknown IP return None
[ "def", "_get_real_ip", "(", "self", ")", ":", "try", ":", "# Trying to work with most common proxy headers", "real_ip", "=", "self", ".", "request", ".", "META", "[", "'HTTP_X_FORWARDED_FOR'", "]", "return", "real_ip", ".", "split", "(", "','", ")", "[", "0", ...
30
13.529412
def setError(self, msg=None, title=None): """ Shows and error message """ if msg is not None: self.messageLabel.setText(msg) if title is not None: self.titleLabel.setText(title)
[ "def", "setError", "(", "self", ",", "msg", "=", "None", ",", "title", "=", "None", ")", ":", "if", "msg", "is", "not", "None", ":", "self", ".", "messageLabel", ".", "setText", "(", "msg", ")", "if", "title", "is", "not", "None", ":", "self", "....
28.375
8.625
def _load_results(self, container_id): """ load results from recent build :return: BuildResults """ if self.temp_dir: dt = DockerTasker() # FIXME: load results only when requested # results_path = os.path.join(self.temp_dir, RESULTS_JSON) # df_path = os.path.join(self.temp_dir, 'Dockerfile') # try: # with open(results_path, 'r') as results_fp: # results = json.load(results_fp, cls=BuildResultsJSONDecoder) # except (IOError, OSError) as ex: # logger.error("Can't open results: '%s'", repr(ex)) # for l in self.dt.logs(self.build_container_id, stream=False): # logger.debug(l.strip()) # raise RuntimeError("Can't open results: '%s'" % repr(ex)) # results.dockerfile = open(df_path, 'r').read() results = BuildResults() results.build_logs = dt.logs(container_id, stream=False) results.container_id = container_id return results
[ "def", "_load_results", "(", "self", ",", "container_id", ")", ":", "if", "self", ".", "temp_dir", ":", "dt", "=", "DockerTasker", "(", ")", "# FIXME: load results only when requested", "# results_path = os.path.join(self.temp_dir, RESULTS_JSON)", "# df_path = os.path.join(se...
45.083333
17.416667
def is_empty(self): """Asserts that val is empty.""" if len(self.val) != 0: if isinstance(self.val, str_types): self._err('Expected <%s> to be empty string, but was not.' % self.val) else: self._err('Expected <%s> to be empty, but was not.' % self.val) return self
[ "def", "is_empty", "(", "self", ")", ":", "if", "len", "(", "self", ".", "val", ")", "!=", "0", ":", "if", "isinstance", "(", "self", ".", "val", ",", "str_types", ")", ":", "self", ".", "_err", "(", "'Expected <%s> to be empty string, but was not.'", "%...
42.125
20.875
def merge_groups(self, indices): """Extend the lists within the DICOM groups dictionary. The indices will indicate which list have to be extended by which other list. Parameters ---------- indices: list or tuple of 2 iterables of int, bot having the same len The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) """ try: merged = merge_dict_of_lists(self.dicom_groups, indices, pop_later=True, copy=True) self.dicom_groups = merged except IndexError: raise IndexError('Index out of range to merge DICOM groups.')
[ "def", "merge_groups", "(", "self", ",", "indices", ")", ":", "try", ":", "merged", "=", "merge_dict_of_lists", "(", "self", ".", "dicom_groups", ",", "indices", ",", "pop_later", "=", "True", ",", "copy", "=", "True", ")", "self", ".", "dicom_groups", "...
46.3
22.3
def _save_metadata(self): """ Write this prefix metadata to disk Returns: None """ with open(self.paths.metadata(), 'w') as metadata_fd: utils.json_dump(self.metadata, metadata_fd)
[ "def", "_save_metadata", "(", "self", ")", ":", "with", "open", "(", "self", ".", "paths", ".", "metadata", "(", ")", ",", "'w'", ")", "as", "metadata_fd", ":", "utils", ".", "json_dump", "(", "self", ".", "metadata", ",", "metadata_fd", ")" ]
26.333333
15.666667
def get_graph_csv(): """ Allows the user to download a graph's data as a CSV file. :return: show a dialog box that allows the user to download the CSV file. """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == "POST": try: selected_variable_table = request.form["selected_variable_table"] filename = utils.generate_graph_csv(selected_variable_table) return send_file(filename, as_attachment=True, attachment_filename='{}.csv'.format(selected_variable_table)) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
[ "def", "get_graph_csv", "(", ")", ":", "assert", "request", ".", "method", "==", "\"POST\"", ",", "\"POST request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "try", ":", "...
48.142857
24.571429
def addProfile(self, profile): """ Adds the inputed profile to the system. :param profile | <XViewProfile> """ if ( profile in self._profiles ): return self._profiles.append(profile) self._profileCombo.blockSignals(True) self._profileCombo.addItem(profile.name()) self._profileCombo.setCurrentIndex(self._profileCombo.count()-1) self._profileCombo.blockSignals(False)
[ "def", "addProfile", "(", "self", ",", "profile", ")", ":", "if", "(", "profile", "in", "self", ".", "_profiles", ")", ":", "return", "self", ".", "_profiles", ".", "append", "(", "profile", ")", "self", ".", "_profileCombo", ".", "blockSignals", "(", ...
33.5
11.642857
def add_archive(self, src_file, remove_final=False): """ Adds the contents of another tarfile to the build. It will be repackaged during context generation, and added to the root level of the file system. Therefore, it is not required that tar (or compression utilities) is present in the base image. :param src_file: Tar archive to add. :type src_file: unicode | str :param remove_final: Remove the contents after the build operation has completed. Note that this will remove all top-level components of the tar archive recursively. Therefore, you should not use this on standard unix folders. This will also not reduce the size of the resulting image (actually may increase instead) unless the image is squashed. :type remove_final: bool :return: Name of the root files / directories added to the Dockerfile. :rtype: list[unicode | str] """ with tarfile.open(src_file, 'r') as tf: member_names = [member.name for member in tf.getmembers() if posixpath.sep not in member.name] self.prefix_all('ADD', *zip(member_names, member_names)) if remove_final: self._remove_files.update(member_names) self._archives.append(src_file) return member_names
[ "def", "add_archive", "(", "self", ",", "src_file", ",", "remove_final", "=", "False", ")", ":", "with", "tarfile", ".", "open", "(", "src_file", ",", "'r'", ")", "as", "tf", ":", "member_names", "=", "[", "member", ".", "name", "for", "member", "in", ...
54.2
25
def get_zorder(self, overlay, key, el): """ Computes the z-order of element in the NdOverlay taking into account possible batching of elements. """ spec = util.get_overlay_spec(overlay, key, el) return self.ordering.index(spec)
[ "def", "get_zorder", "(", "self", ",", "overlay", ",", "key", ",", "el", ")", ":", "spec", "=", "util", ".", "get_overlay_spec", "(", "overlay", ",", "key", ",", "el", ")", "return", "self", ".", "ordering", ".", "index", "(", "spec", ")" ]
38.428571
7
def check_field_exists(self, field_name): """Implements field exists check for debugging purposes. :param field_name: :return: """ if not settings.DEBUG: return try: self.lookup_opts.get_field(field_name) except FieldDoesNotExist as e: raise AdmirarchyConfigurationError(e)
[ "def", "check_field_exists", "(", "self", ",", "field_name", ")", ":", "if", "not", "settings", ".", "DEBUG", ":", "return", "try", ":", "self", ".", "lookup_opts", ".", "get_field", "(", "field_name", ")", "except", "FieldDoesNotExist", "as", "e", ":", "r...
25.357143
17.142857
def configure(level=logging.WARNING, handler=None, formatter=None): """Configure Logr @param handler: Logger message handler @type handler: logging.Handler or None @param formatter: Logger message Formatter @type formatter: logging.Formatter or None """ if formatter is None: formatter = LogrFormatter() if handler is None: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) Logr.handler = handler
[ "def", "configure", "(", "level", "=", "logging", ".", "WARNING", ",", "handler", "=", "None", ",", "formatter", "=", "None", ")", ":", "if", "formatter", "is", "None", ":", "formatter", "=", "LogrFormatter", "(", ")", "if", "handler", "is", "None", ":...
29.722222
14.944444
def clean_lines(string_list, remove_empty_lines=True): """ Strips whitespace, carriage returns and empty lines from a list of strings. Args: string_list: List of strings remove_empty_lines: Set to True to skip lines which are empty after stripping. Returns: List of clean strings with no whitespaces. """ for s in string_list: clean_s = s if '#' in s: ind = s.index('#') clean_s = s[:ind] clean_s = clean_s.strip() if (not remove_empty_lines) or clean_s != '': yield clean_s
[ "def", "clean_lines", "(", "string_list", ",", "remove_empty_lines", "=", "True", ")", ":", "for", "s", "in", "string_list", ":", "clean_s", "=", "s", "if", "'#'", "in", "s", ":", "ind", "=", "s", ".", "index", "(", "'#'", ")", "clean_s", "=", "s", ...
27.857143
19.571429
def _get_file(self, share_name, directory_name, file_name, start_range=None, end_range=None, range_get_content_md5=None, timeout=None): ''' Downloads a file's content, metadata, and properties. You can specify a range if you don't need to download the file in its entirety. If no range is specified, the full file will be downloaded. See get_file_to_* for high level functions that handle the download of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param int start_range: Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool range_get_content_md5: When this header is set to True and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. :param int timeout: The timeout parameter is expressed in seconds. :return: A File with content, properties, and metadata. :rtype: :class:`~azure.storage.file.models.File` ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(share_name, directory_name, file_name) request.query = [('timeout', _int_to_str(timeout))] _validate_and_format_range_headers( request, start_range, end_range, start_range_required=False, end_range_required=False, check_content_md5=range_get_content_md5) response = self._perform_request(request, None) return _parse_file(file_name, response)
[ "def", "_get_file", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "start_range", "=", "None", ",", "end_range", "=", "None", ",", "range_get_content_md5", "=", "None", ",", "timeout", "=", "None", ")", ":", "_validate_not_none",...
48.037736
20.45283
def Receive(self, replytype, **kw): '''Parse message, create Python object. KeyWord data: faults -- list of WSDL operation.fault typecodes wsaction -- If using WS-Address, must specify Action value we expect to receive. ''' self.ReceiveSOAP(**kw) if self.ps.IsAFault(): msg = FaultFromFaultMessage(self.ps) raise FaultException(msg) tc = replytype if hasattr(replytype, 'typecode'): tc = replytype.typecode reply = self.ps.Parse(tc) if self.address is not None: self.address.checkResponse(self.ps, kw.get('wsaction')) return reply
[ "def", "Receive", "(", "self", ",", "replytype", ",", "*", "*", "kw", ")", ":", "self", ".", "ReceiveSOAP", "(", "*", "*", "kw", ")", "if", "self", ".", "ps", ".", "IsAFault", "(", ")", ":", "msg", "=", "FaultFromFaultMessage", "(", "self", ".", ...
32.52381
17.857143
def _deregister(self, session): """ Deregister a session. """ if session in self: self._sessions.pop(self._get_session_key(session), None)
[ "def", "_deregister", "(", "self", ",", "session", ")", ":", "if", "session", "in", "self", ":", "self", ".", "_sessions", ".", "pop", "(", "self", ".", "_get_session_key", "(", "session", ")", ",", "None", ")" ]
29.666667
10
def _CreateDynamicDisplayAdSettings(media_service, opener): """Creates settings for dynamic display ad. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. Returns: The dynamic display ad settings. """ image = _CreateImage(media_service, opener, 'https://goo.gl/dEvQeF') logo = { 'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image' } dynamic_settings = { 'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'DynamicSettings', } return dynamic_settings
[ "def", "_CreateDynamicDisplayAdSettings", "(", "media_service", ",", "opener", ")", ":", "image", "=", "_CreateImage", "(", "media_service", ",", "opener", ",", "'https://goo.gl/dEvQeF'", ")", "logo", "=", "{", "'type'", ":", "'IMAGE'", ",", "'mediaId'", ":", "i...
24.115385
21.115385
def samtools_index(self, bam_file): """Index a bam file.""" cmd = self.tools.samtools + " index {0}".format(bam_file) return cmd
[ "def", "samtools_index", "(", "self", ",", "bam_file", ")", ":", "cmd", "=", "self", ".", "tools", ".", "samtools", "+", "\" index {0}\"", ".", "format", "(", "bam_file", ")", "return", "cmd" ]
37.25
13
def sample_shape(self): """Sample shape of random variable as a `TensorShape`.""" if isinstance(self._sample_shape, tf.Tensor): return tf.TensorShape(tf.get_static_value(self._sample_shape)) return tf.TensorShape(self._sample_shape)
[ "def", "sample_shape", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_sample_shape", ",", "tf", ".", "Tensor", ")", ":", "return", "tf", ".", "TensorShape", "(", "tf", ".", "get_static_value", "(", "self", ".", "_sample_shape", ")", ")", ...
49.2
11.8
def get_sgburst_waveform(template=None, **kwargs): """Return the plus and cross polarizations of a time domain sine-Gaussian burst waveform. Parameters ---------- template: object An object that has attached properties. This can be used to subsitute for keyword arguments. A common example would be a row in an xml table. approximant : string A string that indicates the chosen approximant. See `td_approximants` for available options. q : float The quality factor of a sine-Gaussian burst frequency : float The centre-frequency of a sine-Gaussian burst delta_t : float The time step used to generate the waveform hrss : float The strain rss amplitude: float The strain amplitude Returns ------- hplus: TimeSeries The plus polarization of the waveform. hcross: TimeSeries The cross polarization of the waveform. """ input_params = props_sgburst(template,**kwargs) for arg in sgburst_required_args: if arg not in input_params: raise ValueError("Please provide " + str(arg)) return _lalsim_sgburst_waveform(**input_params)
[ "def", "get_sgburst_waveform", "(", "template", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props_sgburst", "(", "template", ",", "*", "*", "kwargs", ")", "for", "arg", "in", "sgburst_required_args", ":", "if", "arg", "not", "in",...
31.675676
19.459459
def _call_retry(self, force_retry): """Call request and retry up to max_attempts times (or none if self.max_attempts=1)""" last_exception = None for i in range(self.max_attempts): try: log.info("Calling %s %s" % (self.method, self.url)) response = self.requests_method( self.url, data=self.data, params=self.params, headers=self.headers, timeout=(self.connect_timeout, self.read_timeout), verify=self.verify_ssl, ) if response is None: log.warn("Got response None") if self._method_is_safe_to_retry(): delay = 0.5 + i * 0.5 log.info("Waiting %s sec and Retrying since call is a %s" % (delay, self.method)) time.sleep(delay) continue else: raise PyMacaronCoreException("Call %s %s returned empty response" % (self.method, self.url)) return response except Exception as e: last_exception = e retry = force_retry if isinstance(e, ReadTimeout): # Log enough to help debugging... log.warn("Got a ReadTimeout calling %s %s" % (self.method, self.url)) log.warn("Exception was: %s" % str(e)) resp = e.response if not resp: log.info("Requests error has no response.") # TODO: retry=True? Is it really safe? else: b = resp.content log.info("Requests has a response with content: " + pprint.pformat(b)) if self._method_is_safe_to_retry(): # It is safe to retry log.info("Retrying since call is a %s" % self.method) retry = True elif isinstance(e, ConnectTimeout): log.warn("Got a ConnectTimeout calling %s %s" % (self.method, self.url)) log.warn("Exception was: %s" % str(e)) # ConnectTimeouts are safe to retry whatever the call... retry = True if retry: continue else: raise e # max_attempts has been reached: propagate the last received Exception if not last_exception: last_exception = Exception("Reached max-attempts (%s). Giving up calling %s %s" % (self.max_attempts, self.method, self.url)) raise last_exception
[ "def", "_call_retry", "(", "self", ",", "force_retry", ")", ":", "last_exception", "=", "None", "for", "i", "in", "range", "(", "self", ".", "max_attempts", ")", ":", "try", ":", "log", ".", "info", "(", "\"Calling %s %s\"", "%", "(", "self", ".", "met...
42.84375
21.125
def http_redirect_message(message, location, relay_state="", typ="SAMLRequest", sigalg='', signer=None, **kwargs): """The HTTP Redirect binding defines a mechanism by which SAML protocol messages can be transmitted within URL parameters. Messages are encoded for use with this binding using a URL encoding technique, and transmitted using the HTTP GET method. The DEFLATE Encoding is used in this function. :param message: The message :param location: Where the message should be posted to :param relay_state: for preserving and conveying state information :param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart :param sigalg: Which algorithm the signature function will use to sign the message :param signer: A signature function that can be used to sign the message :return: A tuple containing header information and a HTML message. """ if not isinstance(message, six.string_types): message = "%s" % (message,) _order = None if typ in ["SAMLRequest", "SAMLResponse"]: if typ == "SAMLRequest": _order = REQ_ORDER else: _order = RESP_ORDER args = {typ: deflate_and_base64_encode(message)} elif typ == "SAMLart": args = {typ: message} else: raise Exception("Unknown message type: %s" % typ) if relay_state: args["RelayState"] = relay_state if signer: # sigalgs, should be one defined in xmldsig assert sigalg in [b for a, b in SIG_ALLOWED_ALG] args["SigAlg"] = sigalg string = "&".join([urlencode({k: args[k]}) for k in _order if k in args]).encode('ascii') args["Signature"] = base64.b64encode(signer.sign(string)) string = urlencode(args) else: string = urlencode(args) glue_char = "&" if urlparse(location).query else "?" login_url = glue_char.join([location, string]) headers = [('Location', str(login_url))] body = [] return {"headers": headers, "data": body}
[ "def", "http_redirect_message", "(", "message", ",", "location", ",", "relay_state", "=", "\"\"", ",", "typ", "=", "\"SAMLRequest\"", ",", "sigalg", "=", "''", ",", "signer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(",...
37.018182
20.909091
def get_providing_power_source_type(self): """ Returns GetSystemPowerStatus().ACLineStatus @raise: WindowsError if any underlying error occures. """ power_status = SYSTEM_POWER_STATUS() if not GetSystemPowerStatus(pointer(power_status)): raise WinError() return POWER_TYPE_MAP[power_status.ACLineStatus]
[ "def", "get_providing_power_source_type", "(", "self", ")", ":", "power_status", "=", "SYSTEM_POWER_STATUS", "(", ")", "if", "not", "GetSystemPowerStatus", "(", "pointer", "(", "power_status", ")", ")", ":", "raise", "WinError", "(", ")", "return", "POWER_TYPE_MAP...
36.3
12.5
def reset_status(self): """Reset progress bars""" for item in self.items: item.isProcessing = False item.currentProgress = 0
[ "def", "reset_status", "(", "self", ")", ":", "for", "item", "in", "self", ".", "items", ":", "item", ".", "isProcessing", "=", "False", "item", ".", "currentProgress", "=", "0" ]
32
6.6
def add_residue_from_geo(structure, geo): '''Adds a residue to chain A model 0 of the given structure, and returns the new structure. The residue to be added is determined by the geometry object given as second argument. This function is a helper function and should not normally be called directly. Call add_residue() instead.''' resRef= getReferenceResidue(structure) AA=geo.residue_name segID= resRef.get_id()[1] segID+=1 ##geometry to bring together residue peptide_bond=geo.peptide_bond CA_C_N_angle=geo.CA_C_N_angle C_N_CA_angle=geo.C_N_CA_angle ##Backbone Coordinages N_CA_C_angle=geo.N_CA_C_angle CA_N_length=geo.CA_N_length CA_C_length=geo.CA_C_length phi= geo.phi psi_im1=geo.psi_im1 omega=geo.omega N_coord=calculateCoordinates(resRef['N'], resRef['CA'], resRef['C'], peptide_bond, CA_C_N_angle, psi_im1) N= Atom("N", N_coord, 0.0 , 1.0, " "," N", 0, "N") CA_coord=calculateCoordinates(resRef['CA'], resRef['C'], N, CA_N_length, C_N_CA_angle, omega) CA=Atom("CA", CA_coord, 0.0 , 1.0, " "," CA", 0,"C") C_coord=calculateCoordinates(resRef['C'], N, CA, CA_C_length, N_CA_C_angle, phi) C= Atom("C", C_coord, 0.0, 1.0, " ", " C",0,"C") ##Create Carbonyl atom (to be moved later) C_O_length=geo.C_O_length CA_C_O_angle=geo.CA_C_O_angle N_CA_C_O_diangle=geo.N_CA_C_O_diangle carbonyl=calculateCoordinates(N, CA, C, C_O_length, CA_C_O_angle, N_CA_C_O_diangle) O= Atom("O",carbonyl , 0.0 , 1.0, " "," O", 0, "O") if(AA=='G'): res=makeGly(segID, N, CA, C, O, geo) elif(AA=='A'): res=makeAla(segID, N, CA, C, O, geo) elif(AA=='S'): res=makeSer(segID, N, CA, C, O, geo) elif(AA=='C'): res=makeCys(segID, N, CA, C, O, geo) elif(AA=='V'): res=makeVal(segID, N, CA, C, O, geo) elif(AA=='I'): res=makeIle(segID, N, CA, C, O, geo) elif(AA=='L'): res=makeLeu(segID, N, CA, C, O, geo) elif(AA=='T'): res=makeThr(segID, N, CA, C, O, geo) elif(AA=='R'): res=makeArg(segID, N, CA, C, O, geo) elif(AA=='K'): res=makeLys(segID, N, CA, C, O, geo) elif(AA=='D'): res=makeAsp(segID, N, CA, C, O, geo) elif(AA=='E'): res=makeGlu(segID, N, CA, C, O, geo) elif(AA=='N'): res=makeAsn(segID, N, CA, C, O, geo) elif(AA=='Q'): res=makeGln(segID, N, CA, C, O, geo) elif(AA=='M'): res=makeMet(segID, N, CA, C, O, geo) elif(AA=='H'): res=makeHis(segID, N, CA, C, O, geo) elif(AA=='P'): res=makePro(segID, N, CA, C, O, geo) elif(AA=='F'): res=makePhe(segID, N, CA, C, O, geo) elif(AA=='Y'): res=makeTyr(segID, N, CA, C, O, geo) elif(AA=='W'): res=makeTrp(segID, N, CA, C, O, geo) else: res=makeGly(segID, N, CA, C, O, geo) resRef['O'].set_coord(calculateCoordinates(res['N'], resRef['CA'], resRef['C'], C_O_length, CA_C_O_angle, 180.0)) ghost= Atom("N", calculateCoordinates(res['N'], res['CA'], res['C'], peptide_bond, CA_C_N_angle, psi_im1), 0.0 , 0.0, " ","N", 0, "N") res['O'].set_coord(calculateCoordinates( res['N'], res['CA'], res['C'], C_O_length, CA_C_O_angle, 180.0)) structure[0]['A'].add(res) return structure
[ "def", "add_residue_from_geo", "(", "structure", ",", "geo", ")", ":", "resRef", "=", "getReferenceResidue", "(", "structure", ")", "AA", "=", "geo", ".", "residue_name", "segID", "=", "resRef", ".", "get_id", "(", ")", "[", "1", "]", "segID", "+=", "1",...
35.141304
20.423913
def _parse_genotype(self, vcf_fields): """Parse genotype from VCF line data""" format_col = vcf_fields[8].split(':') genome_data = vcf_fields[9].split(':') try: gt_idx = format_col.index('GT') except ValueError: return [] return [int(x) for x in re.split(r'[\|/]', genome_data[gt_idx]) if x != '.']
[ "def", "_parse_genotype", "(", "self", ",", "vcf_fields", ")", ":", "format_col", "=", "vcf_fields", "[", "8", "]", ".", "split", "(", "':'", ")", "genome_data", "=", "vcf_fields", "[", "9", "]", ".", "split", "(", "':'", ")", "try", ":", "gt_idx", "...
37.7
12.6
def getScriptLocation(): """Helper function to get the location of a Python file.""" location = os.path.abspath("./") if __file__.rfind("/") != -1: location = __file__[:__file__.rfind("/")] return location
[ "def", "getScriptLocation", "(", ")", ":", "location", "=", "os", ".", "path", ".", "abspath", "(", "\"./\"", ")", "if", "__file__", ".", "rfind", "(", "\"/\"", ")", "!=", "-", "1", ":", "location", "=", "__file__", "[", ":", "__file__", ".", "rfind"...
34.333333
10
def spi_configure(self, polarity, phase, bitorder): """Configure the SPI interface.""" ret = api.py_aa_spi_configure(self.handle, polarity, phase, bitorder) _raise_error_if_negative(ret)
[ "def", "spi_configure", "(", "self", ",", "polarity", ",", "phase", ",", "bitorder", ")", ":", "ret", "=", "api", ".", "py_aa_spi_configure", "(", "self", ".", "handle", ",", "polarity", ",", "phase", ",", "bitorder", ")", "_raise_error_if_negative", "(", ...
51.75
12.75
def p_file_contrib_1(self, p): """file_contrib : FILE_CONTRIB LINE""" try: if six.PY2: value = p[2].decode(encoding='utf-8') else: value = p[2] self.builder.add_file_contribution(self.document, value) except OrderError: self.order_error('FileContributor', 'FileName', p.lineno(1))
[ "def", "p_file_contrib_1", "(", "self", ",", "p", ")", ":", "try", ":", "if", "six", ".", "PY2", ":", "value", "=", "p", "[", "2", "]", ".", "decode", "(", "encoding", "=", "'utf-8'", ")", "else", ":", "value", "=", "p", "[", "2", "]", "self", ...
37.5
17.7
def doublewrap(f): ''' a decorator decorator, allowing the decorator to be used as: @decorator(with, arguments, and=kwargs) or @decorator Ref: http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet ''' @functools.wraps(f) def new_dec(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # actual decorated function return f(args[0]) else: # decorator arguments return lambda realf: f(realf, *args, **kwargs) return new_dec
[ "def", "doublewrap", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "new_dec", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "...
33.611111
24.722222
def get_differing_atom_residue_ids(self, pdb_name, pdb_list = []): '''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.''' # partition_by_sequence is a map pdb_name -> Int where two pdb names in the same equivalence class map to the same integer (i.e. it is a partition) # representative_pdbs is a map Int -> pdb_object mapping the equivalence classes (represented by an integer) to a representative PDB file # self.pdb_name_to_structure_mapping : pdb_name -> pdb_object # Sanity checks assert(pdb_name in self.pdb_names) assert(set(pdb_list).intersection(set(self.pdb_names)) == set(pdb_list)) # the names in pdb_list must be in pdb_names # 1. Get the representative structure for pdb_name representative_pdb_id = self.partition_by_sequence[pdb_name] representative_pdb = self.representative_pdbs[representative_pdb_id] # 2. Get the other representative structures as dictated by pdb_list other_representative_pdbs = set() other_representative_pdb_ids = set() if not pdb_list: pdb_list = self.pdb_names for opdb_name in pdb_list: orepresentative_pdb_id = self.partition_by_sequence[opdb_name] other_representative_pdb_ids.add(orepresentative_pdb_id) other_representative_pdbs.add(self.representative_pdbs[orepresentative_pdb_id]) other_representative_pdbs.discard(representative_pdb) other_representative_pdb_ids.discard(representative_pdb_id) # Early out if pdb_list was empty (or all pdbs were in the same equivalence class) if not other_representative_pdbs: return [] # 3. Return all residues of pdb_name's representative which differ from all the other representatives differing_atom_residue_ids = set() for other_representative_pdb_id in other_representative_pdb_ids: differing_atom_residue_ids = differing_atom_residue_ids.union(set(self.differing_atom_residue_ids[(representative_pdb_id, other_representative_pdb_id)])) return sorted(differing_atom_residue_ids)
[ "def", "get_differing_atom_residue_ids", "(", "self", ",", "pdb_name", ",", "pdb_list", "=", "[", "]", ")", ":", "# partition_by_sequence is a map pdb_name -> Int where two pdb names in the same equivalence class map to the same integer (i.e. it is a partition)", "# representative_pdbs i...
58.162162
36.594595
def _add_additional_properties(position, properties_dict): """ Sets AdditionalProperties of the ProbModelXML. """ add_prop = etree.SubElement(position, 'AdditionalProperties') for key, value in properties_dict.items(): etree.SubElement(add_prop, 'Property', attrib={'name': key, 'value': value})
[ "def", "_add_additional_properties", "(", "position", ",", "properties_dict", ")", ":", "add_prop", "=", "etree", ".", "SubElement", "(", "position", ",", "'AdditionalProperties'", ")", "for", "key", ",", "value", "in", "properties_dict", ".", "items", "(", ")",...
48.714286
17
def inbox_folder(self): """ Shortcut to get Inbox Folder instance :rtype: mailbox.Folder """ return self.folder_constructor(parent=self, name='Inbox', folder_id=OutlookWellKnowFolderNames .INBOX.value)
[ "def", "inbox_folder", "(", "self", ")", ":", "return", "self", ".", "folder_constructor", "(", "parent", "=", "self", ",", "name", "=", "'Inbox'", ",", "folder_id", "=", "OutlookWellKnowFolderNames", ".", "INBOX", ".", "value", ")" ]
38.125
17.375
def ssh(container, cmd='', user='root', password='root'): ''' SSH into a running container, using the host as a jump host. This requires the container to have a running sshd process. Args: * container: Container name or ID * cmd='': Command to run in the container * user='root': SSH username * password='root': SSH password ''' ip = get_ip(container) ssh_cmd = 'sshpass -p \'%s\' ssh -A -t -o StrictHostKeyChecking=no \'%s\'@%s' % (password, user, ip) local('ssh -A -t -o StrictHostKeyChecking=no -i "%s" %s@%s %s %s' % ( env.key_filename, env.user, env.host, ssh_cmd, cmd))
[ "def", "ssh", "(", "container", ",", "cmd", "=", "''", ",", "user", "=", "'root'", ",", "password", "=", "'root'", ")", ":", "ip", "=", "get_ip", "(", "container", ")", "ssh_cmd", "=", "'sshpass -p \\'%s\\' ssh -A -t -o StrictHostKeyChecking=no \\'%s\\'@%s'", "%...
42.266667
23.2
def monitor(args): """ file monitor mode """ filename = args.get('MDFILE') if not filename: print col('Need file argument', 2) raise SystemExit last_err = '' last_stat = 0 while True: if not os.path.exists(filename): last_err = 'File %s not found. Will continue trying.' % filename else: try: stat = os.stat(filename)[8] if stat != last_stat: parsed = run_args(args) print parsed last_stat = stat last_err = '' except Exception, ex: last_err = str(ex) if last_err: print 'Error: %s' % last_err sleep()
[ "def", "monitor", "(", "args", ")", ":", "filename", "=", "args", ".", "get", "(", "'MDFILE'", ")", "if", "not", "filename", ":", "print", "col", "(", "'Need file argument'", ",", "2", ")", "raise", "SystemExit", "last_err", "=", "''", "last_stat", "=", ...
30.083333
13.125
def process_query_result(self, query_result, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg, fields, bucket_result): """Build recursively result buckets.""" # Add metric results for current bucket for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if fields: current_level = fields[0] bucket_result.update(dict( type='bucket', field=current_level, key_type='terms', buckets=[build_buckets(b, fields[1:], dict(key=b['key'])) for b in agg[current_level]['buckets']] )) return bucket_result # Add copy_fields aggs = query_result['aggregations'] result = dict( start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, ) if self.copy_fields and aggs['top_hit']['hits']['hits']: doc = aggs['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): result[destination] = doc[source] else: result[destination] = source(result, doc) return build_buckets(aggs, self.aggregated_fields, result)
[ "def", "process_query_result", "(", "self", ",", "query_result", ",", "start_date", ",", "end_date", ")", ":", "def", "build_buckets", "(", "agg", ",", "fields", ",", "bucket_result", ")", ":", "\"\"\"Build recursively result buckets.\"\"\"", "# Add metric results for c...
44.666667
17.242424
def render_source(output_dir, package_specs, version): """ Render and output """ destination_filename = "%s/sbp_out.tex" % output_dir py_template = JENV.get_template(TEMPLATE_NAME) stable_msgs = [] unstable_msgs = [] prims = [] for p in sorted(package_specs, key=attrgetter('identifier')): pkg_name = p.identifier stable = p.stable # build list of required definitions (this package plus includes) # TODO: recursively include files definitions = p.definitions for inc in p.includes: inc_basename = inc.split(".")[0] for pkg in package_specs: if pkg.identifier.endswith(inc_basename): definitions += pkg.definitions if pkg_name == "swiftnav.sbp.types": prims = p.definitions for d in p.definitions: if d.public and d.static and d.sbp_id: items, size, multiplier \ = handle_fields(definitions, d.fields, "", 0, None) adj_size = "" if multiplier == 1: adj_size = "N+%d" % (size - 1) if size > 1 else "N" elif multiplier: if multiplier == size: adj_size = "%dN" % multiplier else: adj_size = "%dN+%d" % (multiplier, size - multiplier) else: adj_size = "%d" % size ti = TableItem(pkg_name, d.identifier, d.sbp_id, d.short_desc, d.desc, adj_size, items, p.stable, p.description) pkg_name = "" if stable: stable_msgs.append(ti) else: unstable_msgs.append(ti) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=stable_msgs, umsgs=unstable_msgs, prims=prims, version=version)) import subprocess import os os.chdir(output_dir) subprocess.call(["pdflatex", "--enable-write18", "-shell-escape", "sbp_out.tex"]) subprocess.call(["mv", "sbp_out.pdf", "../docs/sbp.pdf"])
[ "def", "render_source", "(", "output_dir", ",", "package_specs", ",", "version", ")", ":", "destination_filename", "=", "\"%s/sbp_out.tex\"", "%", "output_dir", "py_template", "=", "JENV", ".", "get_template", "(", "TEMPLATE_NAME", ")", "stable_msgs", "=", "[", "]...
34.421053
14.596491
def MappingField(cls, child_key, default=NOTHING, required=True, repr=False, key=None): """ Create new mapping field on a model. :param cls: class (or name) of the model to be related in Sequence. :param child_key: key field on the child object to be used as the map key. :param default: any mapping type :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict. """ default = _init_fields.init_default(required, default, OrderedDict()) converter = converters.to_mapping_field(cls, child_key) validator = _init_fields.init_validator(required, types.TypedMapping) return attrib(default=default, converter=converter, validator=validator, repr=repr, metadata=dict(key=key))
[ "def", "MappingField", "(", "cls", ",", "child_key", ",", "default", "=", "NOTHING", ",", "required", "=", "True", ",", "repr", "=", "False", ",", "key", "=", "None", ")", ":", "default", "=", "_init_fields", ".", "init_default", "(", "required", ",", ...
53.5
23.5
def timeAtThreshold(self,dateTime): ''' A convenience method for checking when a time is on the start/end boundary for this rule. ''' # Anything that's not at a day boundary is for sure not at a threshold. if not ( dateTime.hour == self.dayStarts and dateTime.minute == 0 and dateTime.second == 0 and dateTime.microsecond == 0 ): return False if self.applyRateRule == self.RateRuleChoices.daily: return True elif self.applyRateRule == self.RateRuleChoices.weekly: return (dateTime == self.weekStarts) elif self.applyRateRule == self.RateRuleChoices.monthly: return (dateTime.day == self.monthStarts) # Everything else is nonsensical, so False. return False
[ "def", "timeAtThreshold", "(", "self", ",", "dateTime", ")", ":", "# Anything that's not at a day boundary is for sure not at a threshold.", "if", "not", "(", "dateTime", ".", "hour", "==", "self", ".", "dayStarts", "and", "dateTime", ".", "minute", "==", "0", "and"...
36.636364
24.636364
def set_ports(self, port0 = 0x00, port1 = 0x00): 'Writes specified value to the pins defined as output by method. Writing to input pins has no effect.' self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port0) self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port1) return
[ "def", "set_ports", "(", "self", ",", "port0", "=", "0x00", ",", "port1", "=", "0x00", ")", ":", "self", ".", "bus", ".", "write_byte_data", "(", "self", ".", "address", ",", "self", ".", "CONTROL_PORT0", ",", "port0", ")", "self", ".", "bus", ".", ...
63.6
34
def _ostaunicode(src): # type: (str) -> bytes ''' Internal function to create an OSTA byte string from a source string. ''' if have_py_3: bytename = src else: bytename = src.decode('utf-8') # type: ignore try: enc = bytename.encode('latin-1') encbyte = b'\x08' except (UnicodeEncodeError, UnicodeDecodeError): enc = bytename.encode('utf-16_be') encbyte = b'\x10' return encbyte + enc
[ "def", "_ostaunicode", "(", "src", ")", ":", "# type: (str) -> bytes", "if", "have_py_3", ":", "bytename", "=", "src", "else", ":", "bytename", "=", "src", ".", "decode", "(", "'utf-8'", ")", "# type: ignore", "try", ":", "enc", "=", "bytename", ".", "enco...
26.647059
20.529412