text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def publishCommand(self, typeId, deviceId, commandId, msgFormat, data=None, qos=0, on_publish=None): """ Publish a command to a device # Parameters typeId (string) : The type of the device this command is to be published to deviceId (string): The id of the device this command is to be published to command (string) : The name of the command msgFormat (string) : The format of the command payload data (dict) : The command data qos (int) : The equivalent MQTT semantics of quality of service using the same constants (optional, defaults to `0`) on_publish (function) : A function that will be called when receipt of the publication is confirmed. This has different implications depending on the qos: - qos 0 : the client has asynchronously begun to send the event - qos 1 and 2 : the client has confirmation of delivery from WIoTP """ if self._config.isQuickstart(): self.logger.warning("QuickStart applications do not support sending commands") return False if not self.connectEvent.wait(timeout=10): return False else: topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat) # Raise an exception if there is no codec for this msgFormat if self.getMessageCodec(msgFormat) is None: raise MissingMessageEncoderException(msgFormat) payload = self.getMessageCodec(msgFormat).encode(data, datetime.now()) result = self.client.publish(topic, payload=payload, qos=qos, retain=False) if result[0] == paho.MQTT_ERR_SUCCESS: # Because we are dealing with aync pub/sub model and callbacks it is possible that # the _onPublish() callback for this mid is called before we obtain the lock to place # the mid into the _onPublishCallbacks list. # # _onPublish knows how to handle a scenario where the mid is not present (no nothing) # in this scenario we will need to invoke the callback directly here, because at the time # the callback was invoked the mid was not yet in the list. with self._messagesLock: if result[1] in self._onPublishCallbacks: # paho callback beat this thread so call callback inline now del self._onPublishCallbacks[result[1]] if on_publish is not None: on_publish() else: # this thread beat paho callback so set up for call later self._onPublishCallbacks[result[1]] = on_publish return True else: return False
[ "def", "publishCommand", "(", "self", ",", "typeId", ",", "deviceId", ",", "commandId", ",", "msgFormat", ",", "data", "=", "None", ",", "qos", "=", "0", ",", "on_publish", "=", "None", ")", ":", "if", "self", ".", "_config", ".", "isQuickstart", "(", ...
56.74
29.54
def iso_abundMulti(self, cyclist, stable=False, amass_range=None, mass_range=None, ylim=[0,0], ref=-1, decayed=False, include_title=False, title=None, pdf=False, color_plot=True, grid=False, point_set=1): ''' Method that plots figures and saves those figures to a .png file. Plots a figure for each cycle in the argument cycle. Can be called via iso_abund method by passing a list to cycle. Parameters ---------- cycllist : list The cycles of interest. This method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. include_title : boolean, optional Include a title with the plot. The default is False. title : string, optional A title to include with the plot. The default is None. pdf : boolean, optional Save image as a [pdf/png]. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. ''' max_num = max(cyclist) for i in range(len(cyclist)): self.iso_abund(cyclist[i],stable,amass_range,mass_range,ylim,ref,\ decayed=decayed,show=False,color_plot=color_plot,grid=False,\ point_set=1,include_title=include_title) if title !=None: pl.title(title) else: name='IsoAbund' number_str=_padding_model_number(cyclist[i],max_num) if not pdf: pl.savefig(name+number_str+'.png', dpi=200) else: pl.savefig(name+number_str+'.pdf', dpi=200) pl.clf() return None
[ "def", "iso_abundMulti", "(", "self", ",", "cyclist", ",", "stable", "=", "False", ",", "amass_range", "=", "None", ",", "mass_range", "=", "None", ",", "ylim", "=", "[", "0", ",", "0", "]", ",", "ref", "=", "-", "1", ",", "decayed", "=", "False", ...
46.818182
20.090909
def QueryFields(r, what, fields=None): """ Retrieves available fields for a resource. @type what: string @param what: Resource name, one of L{constants.QR_VIA_RAPI} @type fields: list of string @param fields: Requested fields @rtype: string @return: job id """ query = {} if fields is not None: query["fields"] = ",".join(fields) return r.request("get", "/2/query/%s/fields" % what, query=query)
[ "def", "QueryFields", "(", "r", ",", "what", ",", "fields", "=", "None", ")", ":", "query", "=", "{", "}", "if", "fields", "is", "not", "None", ":", "query", "[", "\"fields\"", "]", "=", "\",\"", ".", "join", "(", "fields", ")", "return", "r", "....
23.052632
19.789474
def _verified_frame_length(frame_length, content_type): # type: (int, ContentType) -> int """Verify a frame length value for a message content type. :param int frame_length: Frame length to verify :param ContentType content_type: Message content type to verify against :return: frame length :rtype: int :raises SerializationError: if frame length is too large :raises SerializationError: if frame length is not zero for unframed content type """ if content_type == ContentType.FRAMED_DATA and frame_length > MAX_FRAME_SIZE: raise SerializationError( "Specified frame length larger than allowed maximum: {found} > {max}".format( found=frame_length, max=MAX_FRAME_SIZE ) ) if content_type == ContentType.NO_FRAMING and frame_length != 0: raise SerializationError("Non-zero frame length found for non-framed message") return frame_length
[ "def", "_verified_frame_length", "(", "frame_length", ",", "content_type", ")", ":", "# type: (int, ContentType) -> int", "if", "content_type", "==", "ContentType", ".", "FRAMED_DATA", "and", "frame_length", ">", "MAX_FRAME_SIZE", ":", "raise", "SerializationError", "(", ...
42.181818
24.954545
def label_for_lm(self, **kwargs): "A special labelling method for language models." self.__class__ = LMTextList kwargs['label_cls'] = LMLabelList return self.label_const(0, **kwargs)
[ "def", "label_for_lm", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "__class__", "=", "LMTextList", "kwargs", "[", "'label_cls'", "]", "=", "LMLabelList", "return", "self", ".", "label_const", "(", "0", ",", "*", "*", "kwargs", ")" ]
42
6.8
def wtime_to_minutes(time_string): ''' wtime_to_minutes Convert standard wallclock time string to minutes. Args: - Time_string in HH:MM:SS format Returns: (int) minutes ''' hours, mins, seconds = time_string.split(':') return int(hours) * 60 + int(mins) + 1
[ "def", "wtime_to_minutes", "(", "time_string", ")", ":", "hours", ",", "mins", ",", "seconds", "=", "time_string", ".", "split", "(", "':'", ")", "return", "int", "(", "hours", ")", "*", "60", "+", "int", "(", "mins", ")", "+", "1" ]
20.857143
22.714286
def trade_signals_handler(self, signals): ''' Process buy and sell signals from the simulation ''' alloc = {} if signals['buy'] or signals['sell']: # Compute the optimal portfolio allocation, # Using user defined function try: alloc, e_ret, e_risk = self.optimize( self.date, signals['buy'], signals['sell'], self._optimizer_parameters) except Exception, error: raise PortfolioOptimizationFailed( reason=error, date=self.date, data=signals) return _remove_useless_orders(alloc)
[ "def", "trade_signals_handler", "(", "self", ",", "signals", ")", ":", "alloc", "=", "{", "}", "if", "signals", "[", "'buy'", "]", "or", "signals", "[", "'sell'", "]", ":", "# Compute the optimal portfolio allocation,", "# Using user defined function", "try", ":",...
38.235294
15.647059
def p_delays_identifier(self, p): 'delays : DELAY identifier' p[0] = DelayStatement(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_delays_identifier", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "DelayStatement", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "l...
39.75
7.75
def _redundant_stack_variable_removal(self, function, data_graph): """ If an argument passed from the stack (i.e. dword ptr [ebp+4h]) is saved to a local variable on the stack at the beginning of the function, and this local variable was never modified anywhere in this function, and no pointer of any stack variable is saved in any register, then we can replace all references to this local variable to that argument instead. :param function: :param networkx.MultiDiGraph data_graph: :return: """ # check if there is any stack pointer being stored into any register other than esp # basically check all consumers of stack pointers stack_ptrs = [ ] sp_offset = self.project.arch.registers['esp'][0] bp_offset = self.project.arch.registers['ebp'][0] for n in data_graph.nodes(): if isinstance(n.variable, SimRegisterVariable) and n.variable.reg in (sp_offset, bp_offset): stack_ptrs.append(n) # for each stack pointer variable, make sure none of its consumers is a general purpose register for stack_ptr in stack_ptrs: out_edges = data_graph.out_edges(stack_ptr, data=True) for _, dst, data in out_edges: if 'type' in data and data['type'] == 'kill': # we don't care about killing edges continue if isinstance(dst.variable, SimRegisterVariable) and dst.variable.reg < 40 and \ dst.variable.reg not in (sp_offset, bp_offset): # oops l.debug('Function %s does not satisfy requirements of redundant stack variable removal.', repr(function) ) return argument_variables = [ ] for n in data_graph.nodes(): if isinstance(n.variable, SimStackVariable) and n.variable.base == 'bp' and n.variable.offset >= 0: argument_variables.append(n) if not argument_variables: return #print function #print argument_variables argument_to_local = { } argument_register_as_retval = set() # for each argument, find its correspondence on the local stack frame for argument_variable in argument_variables: # is it copied to the stack? successors0 = list(data_graph.successors(argument_variable)) if not successors0: continue if len(successors0) != 1: continue if isinstance(successors0[0].variable, SimRegisterVariable): # argument -> register -> stack out_edges = data_graph.out_edges(successors0[0], data=True) successors1 = [ s for _, s, data in out_edges if 'type' not in data or data['type'] != 'kill' ] if len(successors1) == 1: successor1 = successors1[0] if isinstance(successor1.variable, SimStackVariable): if (successor1.variable.base == 'sp' and successor1.variable.offset > 0) or \ (successor1.variable.base == 'bp' and successor1.variable.offset < 0): # yes it's copied onto the stack! argument_to_local[argument_variable] = successor1 # if the register is eax, and it's not killed later, it might be the return value of this function # in that case, we cannot eliminate the instruction that moves stack argument to that register if successors0[0].variable.reg == self.project.arch.registers['eax'][0]: killers = [ s for _, s, data in out_edges if 'type' in data and data['type'] == 'kill'] if not killers: # it might be the return value argument_register_as_retval.add(argument_variable) else: # TODO: import ipdb; ipdb.set_trace() #import pprint #pprint.pprint(argument_to_local, width=160) # find local correspondence that are not modified throughout this function redundant_stack_variables = [ ] for argument, local_var in argument_to_local.items(): # local_var cannot be killed anywhere out_edges = data_graph.out_edges(local_var, data=True) consuming_locs = [ ] for _, consumer, data in out_edges: consuming_locs.append(consumer.location) if 'type' in data and data['type'] == 'kill': break else: # no killing edges. the value is not changed! rsv = RedundantStackVariable(argument, local_var, consuming_locs) if argument in argument_register_as_retval: rsv.argument_register_as_retval = True redundant_stack_variables.append(rsv) self.redundant_stack_variables.extend(redundant_stack_variables)
[ "def", "_redundant_stack_variable_removal", "(", "self", ",", "function", ",", "data_graph", ")", ":", "# check if there is any stack pointer being stored into any register other than esp", "# basically check all consumers of stack pointers", "stack_ptrs", "=", "[", "]", "sp_offset",...
45.423423
27.495495
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'): """ <Purpose> Return a (signature, scheme) tuple. >>> requested_scheme = 'ecdsa-sha2-nistp256' >>> public, private = generate_public_and_private(requested_scheme) >>> data = b'The quick brown fox jumps over the lazy dog' >>> signature, scheme = create_signature(public, private, data, requested_scheme) >>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature) True >>> requested_scheme == scheme True <Arguments> public: The ECDSA public key in PEM format. private: The ECDSA private key in PEM format. data: Byte data used by create_signature() to generate the signature returned. scheme: The signature scheme used to generate the signature. For example: 'ecdsa-sha2-nistp256'. <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. securesystemslib.exceptions.CryptoError, if a signature cannot be created. securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not one of the supported signature schemes. <Side Effects> None. <Returns> A signature dictionary conformat to 'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes, however, the hexlified signature is stored in the dictionary returned. """ # Do 'public_key' and 'private_key' have the correct format? # This check will ensure that the arguments conform to # 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise # 'securesystemslib.exceptions.FormatError' if the check fails. securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key) # Is 'private_key' properly formatted? securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key) # Is 'scheme' properly formatted? securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme) # 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this # if-clause isn't strictly needed. Nevertheless, the conditional statement # is included to accommodate multiple schemes that can potentially be added # in the future. if scheme == 'ecdsa-sha2-nistp256': try: private_key = load_pem_private_key(private_key.encode('utf-8'), password=None, backend=default_backend()) signature = private_key.sign(data, ec.ECDSA(hashes.SHA256())) except TypeError as e: raise securesystemslib.exceptions.CryptoError('Could not create' ' signature: ' + str(e)) # A defensive check for an invalid 'scheme'. The # ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it. else: #pragma: no cover raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported' ' signature scheme is specified: ' + repr(scheme)) return signature, scheme
[ "def", "create_signature", "(", "public_key", ",", "private_key", ",", "data", ",", "scheme", "=", "'ecdsa-sha2-nistp256'", ")", ":", "# Do 'public_key' and 'private_key' have the correct format?", "# This check will ensure that the arguments conform to", "# 'securesystemslib.formats...
35.1125
26.6375
def PlayerTypeEnum(ctx): """Player Type Enumeration.""" return Enum( ctx, absent=0, closed=1, human=2, eliminated=3, computer=4, cyborg=5, spectator=6 )
[ "def", "PlayerTypeEnum", "(", "ctx", ")", ":", "return", "Enum", "(", "ctx", ",", "absent", "=", "0", ",", "closed", "=", "1", ",", "human", "=", "2", ",", "eliminated", "=", "3", ",", "computer", "=", "4", ",", "cyborg", "=", "5", ",", "spectato...
18.083333
21.416667
def AddCredentialOptions(self, argument_group): """Adds the credential options to the argument group. The credential options are use to unlock encrypted volumes. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '--credential', action='append', default=[], type=str, dest='credentials', metavar='TYPE:DATA', help=( 'Define a credentials that can be used to unlock encrypted ' 'volumes e.g. BitLocker. The credential is defined as type:data ' 'e.g. "password:BDE-test". Supported credential types are: ' '{0:s}. Binary key data is expected to be passed in BASE-16 ' 'encoding (hexadecimal). WARNING credentials passed via command ' 'line arguments can end up in logs, so use this option with ' 'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
[ "def", "AddCredentialOptions", "(", "self", ",", "argument_group", ")", ":", "argument_group", ".", "add_argument", "(", "'--credential'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ",", "type", "=", "str", ",", "dest", "=", "'credentials...
51.166667
25.277778
def read_md5(self, hex=False): """ Calculate the md5 hash for this file. hex - Return the digest as hex string. This reads through the entire file. """ f = self.open('rb') try: m = hashlib.md5() while True: d = f.read(8192) if not d: break m.update(d) finally: f.close() if hex: return m.hexdigest() else: return m.digest()
[ "def", "read_md5", "(", "self", ",", "hex", "=", "False", ")", ":", "f", "=", "self", ".", "open", "(", "'rb'", ")", "try", ":", "m", "=", "hashlib", ".", "md5", "(", ")", "while", "True", ":", "d", "=", "f", ".", "read", "(", "8192", ")", ...
24.047619
15.857143
def force_unicode(value): """ return an utf-8 unicode entry """ if not isinstance(value, (str, unicode)): value = unicode(value) if isinstance(value, str): value = value.decode('utf-8') return value
[ "def", "force_unicode", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "str", ",", "unicode", ")", ")", ":", "value", "=", "unicode", "(", "value", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=...
25.555556
8.222222
def update_dependent_files(self, prev_commands=[]): """ Update the command's dependencies based on the evaluated input and output of previous commands. """ for command in prev_commands: for my_input in self.input_parts: for their_output in command.output_parts: if their_output == my_input: my_input.filename = their_output.eval()
[ "def", "update_dependent_files", "(", "self", ",", "prev_commands", "=", "[", "]", ")", ":", "for", "command", "in", "prev_commands", ":", "for", "my_input", "in", "self", ".", "input_parts", ":", "for", "their_output", "in", "command", ".", "output_parts", ...
47.333333
7.888889
def to_tuple(self): """Cast to tuple. Returns ------- tuple The confusion table as a 4-tuple (tp, tn, fp, fn) Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.to_tuple() (120, 60, 20, 30) """ return self._tp, self._tn, self._fp, self._fn
[ "def", "to_tuple", "(", "self", ")", ":", "return", "self", ".", "_tp", ",", "self", ".", "_tn", ",", "self", ".", "_fp", ",", "self", ".", "_fn" ]
21.25
21.25
def AdaBoost(L, K): """[Fig. 18.34]""" def train(dataset): examples, target = dataset.examples, dataset.target N = len(examples) epsilon = 1./(2*N) w = [1./N] * N h, z = [], [] for k in range(K): h_k = L(dataset, w) h.append(h_k) error = sum(weight for example, weight in zip(examples, w) if example[target] != h_k(example)) # Avoid divide-by-0 from either 0% or 100% error rates: error = clip(error, epsilon, 1-epsilon) for j, example in enumerate(examples): if example[target] == h_k(example): w[j] *= error / (1. - error) w = normalize(w) z.append(math.log((1. - error) / error)) return WeightedMajority(h, z) return train
[ "def", "AdaBoost", "(", "L", ",", "K", ")", ":", "def", "train", "(", "dataset", ")", ":", "examples", ",", "target", "=", "dataset", ".", "examples", ",", "dataset", ".", "target", "N", "=", "len", "(", "examples", ")", "epsilon", "=", "1.", "/", ...
37.636364
14.909091
def arrays_overlap(a1, a2): """ Collection function: returns true if the arrays contain any common non-null element; if not, returns null if both the arrays are non-empty and any of them contains a null element; returns false otherwise. >>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y']) >>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect() [Row(overlap=True), Row(overlap=False)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
[ "def", "arrays_overlap", "(", "a1", ",", "a2", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "arrays_overlap", "(", "_to_java_column", "(", "a1", ")", ",", "_to_java_colu...
50.083333
27.75
def get_parameter_text(self, lower, maximum, upper, wrap=False): """ Generates LaTeX appropriate text from marginalised parameter bounds. Parameters ---------- lower : float The lower bound on the parameter maximum : float The value of the parameter with maximum probability upper : float The upper bound on the parameter wrap : bool Wrap output text in dollar signs for LaTeX Returns ------- str The formatted text given the parameter bounds """ if lower is None or upper is None: return "" upper_error = upper - maximum lower_error = maximum - lower if upper_error != 0 and lower_error != 0: resolution = min(np.floor(np.log10(np.abs(upper_error))), np.floor(np.log10(np.abs(lower_error)))) elif upper_error == 0 and lower_error != 0: resolution = np.floor(np.log10(np.abs(lower_error))) elif upper_error != 0 and lower_error == 0: resolution = np.floor(np.log10(np.abs(upper_error))) else: resolution = np.floor(np.log10(np.abs(maximum))) factor = 0 fmt = "%0.1f" r = 1 if np.abs(resolution) > 2: factor = -resolution if resolution == 2: fmt = "%0.0f" factor = -1 r = 0 if resolution == 1: fmt = "%0.0f" if resolution == -1: fmt = "%0.2f" r = 2 elif resolution == -2: fmt = "%0.3f" r = 3 upper_error *= 10 ** factor lower_error *= 10 ** factor maximum *= 10 ** factor upper_error = round(upper_error, r) lower_error = round(lower_error, r) maximum = round(maximum, r) if maximum == -0.0: maximum = 0.0 if resolution == 2: upper_error *= 10 ** -factor lower_error *= 10 ** -factor maximum *= 10 ** -factor factor = 0 fmt = "%0.0f" upper_error_text = fmt % upper_error lower_error_text = fmt % lower_error if upper_error_text == lower_error_text: text = r"%s\pm %s" % (fmt, "%s") % (maximum, lower_error_text) else: text = r"%s^{+%s}_{-%s}" % (fmt, "%s", "%s") % \ (maximum, upper_error_text, lower_error_text) if factor != 0: text = r"\left( %s \right) \times 10^{%d}" % (text, -factor) if wrap: text = "$%s$" % text return text
[ "def", "get_parameter_text", "(", "self", ",", "lower", ",", "maximum", ",", "upper", ",", "wrap", "=", "False", ")", ":", "if", "lower", "is", "None", "or", "upper", "is", "None", ":", "return", "\"\"", "upper_error", "=", "upper", "-", "maximum", "lo...
34.413333
15.76
def imread(path, grayscale=False, size=None, interpolate="bilinear", channel_first=False, as_uint16=False, num_channels=-1): """ Read image by PIL module. Notice that PIL only supports uint8 for RGB (not uint16). So this imread function returns only uint8 array for both RGB and gray-scale. (Currently ignore "I" mode for gray-scale (32bit integer).) Args: path (str or 'file object'): File path or object to read. grayscale (bool): size (tupple of int): (width, height). If None, output img shape depends on the files to read. channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value is False, which means the img shape is (height, width, channel). interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. as_uint16 (bool): If you specify this argument, you can use only False for pil backend. num_channels (int): channel size of output array. Default is -1 which preserves raw image shape. Returns: numpy.ndarray """ if as_uint16: raise ValueError("pillow only supports uint8 for RGB image." " If you want to load image as uint16," " install pypng or cv2 and" " nnabla.utils.image_utils automatically change backend to use these module.") _imread_before(grayscale, num_channels) pil_img = Image.open(path, mode="r") img = pil_image_to_ndarray(pil_img, grayscale, num_channels) return _imread_after(img, size, interpolate, channel_first, imresize)
[ "def", "imread", "(", "path", ",", "grayscale", "=", "False", ",", "size", "=", "None", ",", "interpolate", "=", "\"bilinear\"", ",", "channel_first", "=", "False", ",", "as_uint16", "=", "False", ",", "num_channels", "=", "-", "1", ")", ":", "if", "as...
41.619048
26.142857
def css_load_time(self): """ Returns aggregate css load time for all pages. """ load_times = self.get_load_times('css') return round(mean(load_times), self.decimal_precision)
[ "def", "css_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'css'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
34.833333
9.833333
def update(self, auth_payload=values.unset): """ Update the ChallengeInstance :param unicode auth_payload: Optional payload to verify the Challenge :returns: Updated ChallengeInstance :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance """ data = values.of({'AuthPayload': auth_payload, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ChallengeInstance( self._version, payload, service_sid=self._solution['service_sid'], identity=self._solution['identity'], factor_sid=self._solution['factor_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "auth_payload", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'AuthPayload'", ":", "auth_payload", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "update", "(", "'...
30.24
18.72
def coerce_location(value, **options): """ Coerce a string to a :class:`Location` object. :param value: The value to coerce (a string or :class:`Location` object). :param options: Any keyword arguments are passed on to :func:`~executor.contexts.create_context()`. :returns: A :class:`Location` object. """ # Location objects pass through untouched. if not isinstance(value, Location): # Other values are expected to be strings. if not isinstance(value, string_types): msg = "Expected Location object or string, got %s instead!" raise ValueError(msg % type(value)) # Try to parse a remote location. ssh_alias, _, directory = value.partition(':') if ssh_alias and directory and '/' not in ssh_alias: options['ssh_alias'] = ssh_alias else: directory = value # Create the location object. value = Location( context=create_context(**options), directory=parse_path(directory), ) return value
[ "def", "coerce_location", "(", "value", ",", "*", "*", "options", ")", ":", "# Location objects pass through untouched.", "if", "not", "isinstance", "(", "value", ",", "Location", ")", ":", "# Other values are expected to be strings.", "if", "not", "isinstance", "(", ...
39.259259
13.111111
def _initialize_parameters(state_machine, n_features): """ Helper to create initial parameter vector with the correct shape. """ return np.zeros((state_machine.n_states + state_machine.n_transitions, n_features))
[ "def", "_initialize_parameters", "(", "state_machine", ",", "n_features", ")", ":", "return", "np", ".", "zeros", "(", "(", "state_machine", ".", "n_states", "+", "state_machine", ".", "n_transitions", ",", "n_features", ")", ")" ]
55
8
def cli(ctx, config, debug): """SnakTeX command line interface - write LaTeX faster through templating.""" ctx.obj['config'] = config ctx.obj['engine'] = stex.SnakeTeX(config_file=config, debug=debug)
[ "def", "cli", "(", "ctx", ",", "config", ",", "debug", ")", ":", "ctx", ".", "obj", "[", "'config'", "]", "=", "config", "ctx", ".", "obj", "[", "'engine'", "]", "=", "stex", ".", "SnakeTeX", "(", "config_file", "=", "config", ",", "debug", "=", ...
52.25
13
def clear_all(self): """ clear all files that were to be injected """ self.injections.clear_all() for config_file in CONFIG_FILES: self.injections.clear(os.path.join("~", config_file))
[ "def", "clear_all", "(", "self", ")", ":", "self", ".", "injections", ".", "clear_all", "(", ")", "for", "config_file", "in", "CONFIG_FILES", ":", "self", ".", "injections", ".", "clear", "(", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "config_...
43.2
10
def roll(self): """ Calculates the Roll of the Quaternion. """ x, y, z, w = self.x, self.y, self.z, self.w return math.atan2(2*y*w - 2*x*z, 1 - 2*y*y - 2*z*z)
[ "def", "roll", "(", "self", ")", ":", "x", ",", "y", ",", "z", ",", "w", "=", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "z", ",", "self", ".", "w", "return", "math", ".", "atan2", "(", "2", "*", "y", "*", "w", "-", "2",...
33.4
17.4
def _init_metadata(self): """stub""" self._confused_learning_objectives_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'confusedLearningObjectiveIds'), 'element_label': 'Confused Learning Objectives', 'instructions': 'List of IDs', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_list_values': [[]], 'syntax': 'LIST' } self._feedbacks_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'feedbacks'), 'element_label': 'Feedbacks', 'instructions': 'Enter as many text feedback strings as you wish', 'required': True, 'read_only': False, 'linked': False, 'array': True, 'default_object_values': [[]], 'syntax': 'OBJECT', 'object_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "self", ".", "_confused_learning_objectives_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",",...
39.137931
13.965517
def do_flip(dec=None, inc=None, di_block=None): """ This function returns the antipode (i.e. it flips) of directions. The function can take dec and inc as seperate lists if they are of equal length and explicitly specified or are the first two arguments. It will then return a list of flipped decs and a list of flipped incs. If a di_block (a nested list of [dec, inc, 1.0]) is specified then it is used and the function returns a di_block with the flipped directions. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec, inc, 1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns ---------- dec_flip, inc_flip : list of flipped declinations and inclinations or dflip : a nested list of [dec, inc, 1.0] Examples ---------- Lists of declination and inclination can be flipped to their antipodes: >>> decs = [1.0, 358.0, 2.0] >>> incs = [10.0, 12.0, 8.0] >>> ipmag.do_flip(decs, incs) ([181.0, 178.0, 182.0], [-10.0, -12.0, -8.0]) The function can also take a di_block and returns a flipped di_block: >>> directions = [[1.0,10.0],[358.0,12.0,],[2.0,8.0]] >>> ipmag.do_flip(di_block=directions) [[181.0, -10.0, 1.0], [178.0, -12.0, 1.0], [182.0, -8.0, 1.0]] """ if di_block is None: dec_flip = [] inc_flip = [] for n in range(0, len(dec)): dec_flip.append((dec[n] - 180.) % 360.0) inc_flip.append(-inc[n]) return dec_flip, inc_flip else: dflip = [] for rec in di_block: d, i = (rec[0] - 180.) % 360., -rec[1] dflip.append([d, i, 1.0]) return dflip
[ "def", "do_flip", "(", "dec", "=", "None", ",", "inc", "=", "None", ",", "di_block", "=", "None", ")", ":", "if", "di_block", "is", "None", ":", "dec_flip", "=", "[", "]", "inc_flip", "=", "[", "]", "for", "n", "in", "range", "(", "0", ",", "le...
32.267857
23.125
def get_credentials(scopes=None, secrets=None, storage=None, no_webserver=False): """Make OAuth 2.0 credentials for scopes from ``secrets`` and ``storage`` files. Args: scopes: scope URL(s) or ``'read'``, ``'write'`` (default: ``%r``) secrets: location of secrets file (default: ``%r``) storage: location of storage file (default: ``%r``) no_webserver: url/code prompt instead of webbrowser based auth see https://developers.google.com/sheets/quickstart/python see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ scopes = Scopes.get(scopes) if secrets is None: secrets = SECRETS if storage is None: storage = STORAGE secrets, storage = map(os.path.expanduser, (secrets, storage)) store = file.Storage(storage) creds = store.get() if creds is None or creds.invalid: flow = client.flow_from_clientsecrets(secrets, scopes) args = ['--noauth_local_webserver'] if no_webserver else [] flags = tools.argparser.parse_args(args) creds = tools.run_flow(flow, store, flags) return creds
[ "def", "get_credentials", "(", "scopes", "=", "None", ",", "secrets", "=", "None", ",", "storage", "=", "None", ",", "no_webserver", "=", "False", ")", ":", "scopes", "=", "Scopes", ".", "get", "(", "scopes", ")", "if", "secrets", "is", "None", ":", ...
36.193548
23.83871
def get_location_metres(original_location, dNorth, dEast): """ Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the specified `original_location`. The returned Location has the same `alt` value as `original_location`. The function is useful when you want to move the vehicle around specifying locations relative to the current vehicle position. The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles. For more information see: http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters """ earth_radius=6378137.0 #Radius of "spherical" earth #Coordinate offsets in radians dLat = dNorth/earth_radius dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180)) #New position in decimal degrees newlat = original_location.lat + (dLat * 180/math.pi) newlon = original_location.lon + (dLon * 180/math.pi) return LocationGlobal(newlat, newlon,original_location.alt)
[ "def", "get_location_metres", "(", "original_location", ",", "dNorth", ",", "dEast", ")", ":", "earth_radius", "=", "6378137.0", "#Radius of \"spherical\" earth", "#Coordinate offsets in radians", "dLat", "=", "dNorth", "/", "earth_radius", "dLon", "=", "dEast", "/", ...
51.428571
27.047619
def next(self): ''' Returns next image for same content_object and None if image is the last. ''' try: return self.__class__.objects.for_model(self.content_object, self.content_type).\ filter(order__lt=self.order).order_by('-order')[0] except IndexError: return None
[ "def", "next", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__class__", ".", "objects", ".", "for_model", "(", "self", ".", "content_object", ",", "self", ".", "content_type", ")", ".", "filter", "(", "order__lt", "=", "self", ".", "order...
43.777778
26.666667
def visibleNodes(self): """ Returns a list of the visible nodes in the scene. :return [<XNode>, ..] """ return filter(lambda x: isinstance(x, XNode) and x.isVisible(), self.items())
[ "def", "visibleNodes", "(", "self", ")", ":", "return", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "XNode", ")", "and", "x", ".", "isVisible", "(", ")", ",", "self", ".", "items", "(", ")", ")" ]
31.125
13.625
def descend(self, remote, force=False): """ Descend, possibly creating directories as needed """ remote_dirs = remote.split('/') for directory in remote_dirs: try: self.conn.cwd(directory) except Exception: if force: self.conn.mkd(directory) self.conn.cwd(directory) return self.conn.pwd()
[ "def", "descend", "(", "self", ",", "remote", ",", "force", "=", "False", ")", ":", "remote_dirs", "=", "remote", ".", "split", "(", "'/'", ")", "for", "directory", "in", "remote_dirs", ":", "try", ":", "self", ".", "conn", ".", "cwd", "(", "director...
37
6.636364
def preview(df,preview_rows = 20):#,preview_max_cols = 0): """ Returns a preview of a dataframe, which contains both header rows and tail rows. """ if preview_rows < 4: preview_rows = 4 preview_rows = min(preview_rows,df.shape[0]) outer = math.floor(preview_rows / 4) return pd.concat([df.head(outer), df[outer:-outer].sample(preview_rows-2*outer), df.tail(outer)])
[ "def", "preview", "(", "df", ",", "preview_rows", "=", "20", ")", ":", "#,preview_max_cols = 0):", "if", "preview_rows", "<", "4", ":", "preview_rows", "=", "4", "preview_rows", "=", "min", "(", "preview_rows", ",", "df", ".", "shape", "[", "0", "]", ")"...
39.545455
9.818182
def _get_side1KerningGroups(self): """ Subclasses may override this method. """ found = {} for name, contents in self.items(): if name.startswith("public.kern1."): found[name] = contents return found
[ "def", "_get_side1KerningGroups", "(", "self", ")", ":", "found", "=", "{", "}", "for", "name", ",", "contents", "in", "self", ".", "items", "(", ")", ":", "if", "name", ".", "startswith", "(", "\"public.kern1.\"", ")", ":", "found", "[", "name", "]", ...
29.666667
7.222222
def _insert_continuation_prompt(self, cursor): """ Inserts new continuation prompt using the specified cursor. """ if self._continuation_prompt_html is None: self._insert_plain_text(cursor, self._continuation_prompt) else: self._continuation_prompt = self._insert_html_fetching_plain_text( cursor, self._continuation_prompt_html)
[ "def", "_insert_continuation_prompt", "(", "self", ",", "cursor", ")", ":", "if", "self", ".", "_continuation_prompt_html", "is", "None", ":", "self", ".", "_insert_plain_text", "(", "cursor", ",", "self", ".", "_continuation_prompt", ")", "else", ":", "self", ...
49.25
15.75
def desbloquear_sat(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('desbloquearsat') conteudo = resp.json() return RespostaSAT.desbloquear_sat(conteudo.get('retorno'))
[ "def", "desbloquear_sat", "(", "self", ")", ":", "resp", "=", "self", ".", "_http_post", "(", "'desbloquearsat'", ")", "conteudo", "=", "resp", ".", "json", "(", ")", "return", "RespostaSAT", ".", "desbloquear_sat", "(", "conteudo", ".", "get", "(", "'reto...
37.888889
12.222222
def log(x, base=None): """ Calculate the log Parameters ---------- x : float or array_like Input values base : int or float (Default: None) Base of the log. If `None`, the natural logarithm is computed (`base=np.e`). Returns ------- out : float or ndarray Calculated result """ if base == 10: return np.log10(x) elif base == 2: return np.log2(x) elif base is None or base == np.e: return np.log(x) else: return np.log(x)/np.log(base)
[ "def", "log", "(", "x", ",", "base", "=", "None", ")", ":", "if", "base", "==", "10", ":", "return", "np", ".", "log10", "(", "x", ")", "elif", "base", "==", "2", ":", "return", "np", ".", "log2", "(", "x", ")", "elif", "base", "is", "None", ...
21.24
17.48
def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer): """ Convert separable convolution layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) has_bias = keras_layer.use_bias # Get the weights from _keras. weight_list = keras_layer.get_weights() output_blob_shape = list(filter(None, keras_layer.output_shape)) output_channels = output_blob_shape[-1] # D: depth mutliplier # w[0] is (H,W,Cin,D) # w[1] is (1,1,Cin * D, Cout) W0 = weight_list[0] W1 = weight_list[1] height, width, input_channels, depth_mult = W0.shape b = weight_list[2] if has_bias else None W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult)) stride_height, stride_width = keras_layer.strides # Dilations if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple): dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]] else: dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate] intermediate_name = output_name + '_intermin_' builder.add_convolution(name = layer + '_step_1', kernel_channels = 1, output_channels = input_channels * depth_mult, height = height, width = width, stride_height = stride_height, stride_width = stride_width, border_mode = keras_layer.padding, groups = input_channels, W = W0, b = None, has_bias = False, is_deconv = False, output_shape = None, input_name = input_name, output_name = intermediate_name, dilation_factors = dilations) builder.add_convolution(name = layer + '_step_2', kernel_channels = input_channels * depth_mult, output_channels = output_channels, height = 1, width = 1, stride_height = 1, stride_width = 1, border_mode = keras_layer.padding, groups = 1, W = W1, b = b, has_bias = has_bias, is_deconv = False, output_shape = None, input_name = intermediate_name, output_name = output_name, dilation_factors = [1,1])
[ "def", "convert_separable_convolution", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "_check_data_format", "(", "keras_layer", ")", "# Get input and output names", "input_name", ",", "output_name", "=", "(", "inp...
32.556962
17.56962
def click_element(self, locator): """Click element identified by `locator`. Key attributes for arbitrary elements are `index` and `name`. See `introduction` for details about locating elements. """ self._info("Clicking element '%s'." % locator) self._element_find(locator, True, True).click()
[ "def", "click_element", "(", "self", ",", "locator", ")", ":", "self", ".", "_info", "(", "\"Clicking element '%s'.\"", "%", "locator", ")", "self", ".", "_element_find", "(", "locator", ",", "True", ",", "True", ")", ".", "click", "(", ")" ]
42.625
16.125
def _is_dir(self, f): '''Check if the given in-dap file is a directory''' return self._tar.getmember(f).type == tarfile.DIRTYPE
[ "def", "_is_dir", "(", "self", ",", "f", ")", ":", "return", "self", ".", "_tar", ".", "getmember", "(", "f", ")", ".", "type", "==", "tarfile", ".", "DIRTYPE" ]
47
19.666667
def remove_file(filepath): ''' Delete a file ''' try: os.remove(os.path.abspath(os.path.expanduser(filepath))) except OSError as e: if e.errno != errno.ENOENT: raise
[ "def", "remove_file", "(", "filepath", ")", ":", "try", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "filepath", ")", ")", ")", "except", "OSError", "as", "e", ":", "if", "e", "...
31.857143
16.142857
def basic_range1(ranged_hparams): """A basic range of hyperparameters.""" rhp = ranged_hparams rhp.set_discrete("batch_size", [1024, 2048, 4096]) rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6]) rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE) rhp.set_discrete("kernel_height", [1, 3, 5, 7]) rhp.set_discrete("kernel_width", [1, 3, 5, 7]) rhp.set_discrete("compress_steps", [0, 1, 2]) rhp.set_float("dropout", 0.0, 0.5) rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("label_smoothing", 0.0, 0.2) rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE) rhp.set_categorical("initializer", ["uniform", "orthogonal", "uniform_unit_scaling"]) rhp.set_float("initializer_gain", 0.5, 3.5) rhp.set_categorical("learning_rate_decay_scheme", ["none", "sqrt", "noam", "exp"]) rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) rhp.set_categorical( "optimizer", ["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"])
[ "def", "basic_range1", "(", "ranged_hparams", ")", ":", "rhp", "=", "ranged_hparams", "rhp", ".", "set_discrete", "(", "\"batch_size\"", ",", "[", "1024", ",", "2048", ",", "4096", "]", ")", "rhp", ".", "set_discrete", "(", "\"num_hidden_layers\"", ",", "[",...
50
15.8
def geo(self): """ If the message media is geo, geo live or a venue, this returns the :tl:`GeoPoint`. """ if isinstance(self.media, (types.MessageMediaGeo, types.MessageMediaGeoLive, types.MessageMediaVenue)): return self.media.geo
[ "def", "geo", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "media", ",", "(", "types", ".", "MessageMediaGeo", ",", "types", ".", "MessageMediaGeoLive", ",", "types", ".", "MessageMediaVenue", ")", ")", ":", "return", "self", ".", "media"...
38.333333
12.111111
def remoteDataReceived(self, connection, data): """Some data was received from the remote end. Find the matching protocol and replay it. """ proto = self.getLocalProtocol(connection) proto.transport.write(data) return {}
[ "def", "remoteDataReceived", "(", "self", ",", "connection", ",", "data", ")", ":", "proto", "=", "self", ".", "getLocalProtocol", "(", "connection", ")", "proto", ".", "transport", ".", "write", "(", "data", ")", "return", "{", "}" ]
32.75
11.625
def get_all_slots(cls): """Iterates through a class' (`cls`) mro to get all slots as a set.""" slots_iterator = (getattr(c, '__slots__', ()) for c in cls.__mro__) # `__slots__` might only be a single string, # so we need to put the strings into a tuple. slots_converted = ((slots,) if isinstance(slots, str) else slots for slots in slots_iterator) all_slots = set() all_slots.update(*slots_converted) return all_slots
[ "def", "get_all_slots", "(", "cls", ")", ":", "slots_iterator", "=", "(", "getattr", "(", "c", ",", "'__slots__'", ",", "(", ")", ")", "for", "c", "in", "cls", ".", "__mro__", ")", "# `__slots__` might only be a single string,", "# so we need to put the strings in...
47.2
15.4
def concat(invises, outvis, timesort=False): """Concatenate visibility measurement sets. invises (list of str) Paths to the input measurement sets outvis (str) Path to the output measurement set. timesort (boolean) If true, sort the output in time after concatenation. Example:: from pwkit.environments.casa import tasks tasks.concat(['epoch1.ms', 'epoch2.ms'], 'combined.ms') """ tb = util.tools.table() ms = util.tools.ms() if os.path.exists(outvis): raise RuntimeError('output "%s" already exists' % outvis) for invis in invises: if not os.path.isdir(invis): raise RuntimeError('input "%s" does not exist' % invis) tb.open(b(invises[0])) tb.copy(b(outvis), deep=True, valuecopy=True) tb.close() ms.open(b(outvis), nomodify=False) for invis in invises[1:]: ms.concatenate(msfile=b(invis), freqtol=b(concat_freqtol), dirtol=b(concat_dirtol)) ms.writehistory(message=b'taskname=tasklib.concat', origin=b'tasklib.concat') ms.writehistory(message=b('vis = ' + ', '.join(invises)), origin=b'tasklib.concat') ms.writehistory(message=b('timesort = ' + 'FT'[int(timesort)]), origin=b'tasklib.concat') if timesort: ms.timesort() ms.close()
[ "def", "concat", "(", "invises", ",", "outvis", ",", "timesort", "=", "False", ")", ":", "tb", "=", "util", ".", "tools", ".", "table", "(", ")", "ms", "=", "util", ".", "tools", ".", "ms", "(", ")", "if", "os", ".", "path", ".", "exists", "(",...
29.068182
23.477273
def magic_file(filename): """ Returns tuple of (num_of_matches, array_of_matches) arranged highest confidence match first. :param filename: path to file :return: list of possible matches, highest confidence first """ head, foot = _file_details(filename) if not head: raise ValueError("Input was empty") try: info = _identify_all(head, foot, ext_from_filename(filename)) except PureError: info = [] info.sort(key=lambda x: x.confidence, reverse=True) return info
[ "def", "magic_file", "(", "filename", ")", ":", "head", ",", "foot", "=", "_file_details", "(", "filename", ")", "if", "not", "head", ":", "raise", "ValueError", "(", "\"Input was empty\"", ")", "try", ":", "info", "=", "_identify_all", "(", "head", ",", ...
32.1875
16.1875
def get_vault_query_session(self, proxy): """Gets the OsidSession associated with the vault query service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.authorization.VaultQuerySession) - a ``VaultQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_vault_query() is false`` *compliance: optional -- This method must be implemented if ``supports_vault_query()`` is true.* """ if not self.supports_vault_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.VaultQuerySession(proxy=proxy, runtime=self._runtime)
[ "def", "get_vault_query_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_vault_query", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "VaultQuerySession"...
44.294118
14.470588
def COSTALD(T, Tc, Vc, omega): r'''Calculate saturation liquid density using the COSTALD CSP method. A popular and accurate estimation method. If possible, fit parameters are used; alternatively critical properties work well. The density of a liquid is given by: .. math:: V_s=V^*V^{(0)}[1-\omega_{SRK}V^{(\delta)}] V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3} - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3} V^{(\delta)}=\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3} {T_r-1.00001} Units are that of critical or fit constant volume. Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol]. This parameter is alternatively a fit parameter omega : float (ideally SRK) Acentric factor for fluid, [-] This parameter is alternatively a fit parameter. Returns ------- Vs : float Saturation liquid volume Notes ----- 196 constants are fit to this function in [1]_. Range: 0.25 < Tr < 0.95, often said to be to 1.0 This function has been checked with the API handbook example problem. Examples -------- Propane, from an example in the API Handbook >>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097) 530.3009967969841 References ---------- .. [1] Hankinson, Risdon W., and George H. Thomson. "A New Correlation for Saturated Densities of Liquids and Their Mixtures." AIChE Journal 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412 ''' Tr = T/Tc V_delta = (-0.296123 + 0.386914*Tr - 0.0427258*Tr**2 - 0.0480645*Tr**3)/(Tr - 1.00001) V_0 = 1 - 1.52816*(1-Tr)**(1/3.) + 1.43907*(1-Tr)**(2/3.) \ - 0.81446*(1-Tr) + 0.190454*(1-Tr)**(4/3.) return Vc*V_0*(1-omega*V_delta)
[ "def", "COSTALD", "(", "T", ",", "Tc", ",", "Vc", ",", "omega", ")", ":", "Tr", "=", "T", "/", "Tc", "V_delta", "=", "(", "-", "0.296123", "+", "0.386914", "*", "Tr", "-", "0.0427258", "*", "Tr", "**", "2", "-", "0.0480645", "*", "Tr", "**", ...
29.640625
24.765625
def version(): """Get the version number without importing the mrcfile package.""" namespace = {} with open(os.path.join('mrcfile', 'version.py')) as f: exec(f.read(), namespace) return namespace['__version__']
[ "def", "version", "(", ")", ":", "namespace", "=", "{", "}", "with", "open", "(", "os", ".", "path", ".", "join", "(", "'mrcfile'", ",", "'version.py'", ")", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "namespace", ")", "...
38.166667
13
def list_to_pose(poselist, frame_id="", stamp=rospy.Time(0)): """ Convert a pose in the form of a list in PoseStamped :param poselist: a pose on the form [[x, y, z], [x, y, z, w]] :param frame_id: the frame_id on the outputed pose (facultative, empty otherwise) :param stamp: the stamp of the outputed pose (facultative, 0 otherwise) :return: the converted geometry_msgs/PoseStampted object """ p = PoseStamped() p.header.frame_id = frame_id p.header.stamp = stamp p.pose.position.x = poselist[0][0] p.pose.position.y = poselist[0][1] p.pose.position.z = poselist[0][2] p.pose.orientation.x = poselist[1][0] p.pose.orientation.y = poselist[1][1] p.pose.orientation.z = poselist[1][2] p.pose.orientation.w = poselist[1][3] return p
[ "def", "list_to_pose", "(", "poselist", ",", "frame_id", "=", "\"\"", ",", "stamp", "=", "rospy", ".", "Time", "(", "0", ")", ")", ":", "p", "=", "PoseStamped", "(", ")", "p", ".", "header", ".", "frame_id", "=", "frame_id", "p", ".", "header", "."...
41.263158
12.631579
def nodePop(ctxt): """Pops the top element node from the node stack """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.nodePop(ctxt__o) if ret is None:raise treeError('nodePop() failed') return xmlNode(_obj=ret)
[ "def", "nodePop", "(", "ctxt", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "ret", "=", "libxml2mod", ".", "nodePop", "(", "ctxt__o", ")", "if", "ret", "is", "None", ":", "raise"...
36.428571
9.857143
def main(argv): """ Main function. """ if len(argv) != 2: sys.stderr.write("\nYou can update a project in two steps.\n\n") sys.stderr.write("Step 1: Update or create infrastructure files\n") sys.stderr.write(" which will be needed to configure and build the project:\n") sys.stderr.write(" $ {0} --self\n\n".format(argv[0])) sys.stderr.write("Step 2: Create CMakeLists.txt and setup script in PROJECT_ROOT:\n") sys.stderr.write(" $ {0} <PROJECT_ROOT>\n".format(argv[0])) sys.stderr.write(" example:\n") sys.stderr.write(" $ {0} ..\n".format(argv[0])) sys.exit(-1) if argv[1] in ['-h', '--help']: print('Usage:') for t, h in [('python update.py --self', 'Update this script and fetch or update infrastructure files under autocmake/.'), ('python update.py <builddir>', '(Re)generate CMakeLists.txt and setup script and fetch or update CMake modules.'), ('python update.py (-h | --help)', 'Show this help text.')]: print(' {0:30} {1}'.format(t, h)) sys.exit(0) if argv[1] == '--self': # update self if not os.path.isfile('autocmake.yml'): print('- fetching example autocmake.yml') fetch_url( src='{0}example/autocmake.yml'.format(AUTOCMAKE_GITHUB_URL), dst='autocmake.yml' ) if not os.path.isfile('.gitignore'): print('- creating .gitignore') with open('.gitignore', 'w') as f: f.write('*.pyc\n') for f in ['autocmake/configure.py', 'autocmake/__init__.py', 'autocmake/external/docopt.py', 'autocmake/external/__init__.py', 'autocmake/generate.py', 'autocmake/extract.py', 'autocmake/interpolate.py', 'autocmake/parse_rst.py', 'autocmake/parse_yaml.py', 'update.py']: print('- fetching {0}'.format(f)) fetch_url( src='{0}{1}'.format(AUTOCMAKE_GITHUB_URL, f), dst='{0}'.format(f) ) # finally create a README.md with licensing information with open('README.md', 'w') as f: print('- generating licensing information') f.write(licensing_info()) sys.exit(0) process_yaml(argv)
[ "def", "main", "(", "argv", ")", ":", "if", "len", "(", "argv", ")", "!=", "2", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nYou can update a project in two steps.\\n\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"Step 1: Update or create infra...
41.147541
17.868852
def p_definitions(self, p): 'definitions : definitions definition' p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1))
[ "def", "p_definitions", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "(", "p", "[", "2", "]", ",", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
34.5
8.5
def get_block_overview(block_representation, coin_symbol='btc', txn_limit=None, txn_offset=None, api_key=None): """ Takes a block_representation, coin_symbol and txn_limit and gets an overview of that block, including up to X transaction ids. Note that block_representation may be the block number or block hash """ assert is_valid_coin_symbol(coin_symbol) assert is_valid_block_representation( block_representation=block_representation, coin_symbol=coin_symbol) url = make_url(coin_symbol, **dict(blocks=block_representation)) params = {} if api_key: params['token'] = api_key if txn_limit: params['limit'] = txn_limit if txn_offset: params['txstart'] = txn_offset r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) response_dict = get_valid_json(r) if 'error' in response_dict: return response_dict return _clean_block(response_dict=response_dict)
[ "def", "get_block_overview", "(", "block_representation", ",", "coin_symbol", "=", "'btc'", ",", "txn_limit", "=", "None", ",", "txn_offset", "=", "None", ",", "api_key", "=", "None", ")", ":", "assert", "is_valid_coin_symbol", "(", "coin_symbol", ")", "assert",...
33.827586
19.344828
def get_view_menus(self, permission_name): """Returns the details of view_menus for a perm name""" vm = set() for perm_name, vm_name in self.get_all_permissions(): if perm_name == permission_name: vm.add(vm_name) return vm
[ "def", "get_view_menus", "(", "self", ",", "permission_name", ")", ":", "vm", "=", "set", "(", ")", "for", "perm_name", ",", "vm_name", "in", "self", ".", "get_all_permissions", "(", ")", ":", "if", "perm_name", "==", "permission_name", ":", "vm", ".", "...
39.428571
11.571429
def _mergeProteinEntries(proteinLists, protToPeps): """Returns a new "protToPeps" dictionary with entries merged that are present in proteinLists. NOTE: The key of the merged entry is a tuple of the sorted protein keys. This behaviour might change in the future; the tuple might be replaced by simply one of the protein entries which is then representative for all. :param proteinLists: a list of protein groups that will be merged [{protein, ...}, ...] :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :returns: dict, {protein: set([peptid, ...])} """ mergedProtToPeps = dict(protToPeps) for proteins in proteinLists: for protein in proteins: peptides = mergedProtToPeps.pop(protein) mergedProtein = tuple(sorted(proteins)) mergedProtToPeps[mergedProtein] = peptides return mergedProtToPeps
[ "def", "_mergeProteinEntries", "(", "proteinLists", ",", "protToPeps", ")", ":", "mergedProtToPeps", "=", "dict", "(", "protToPeps", ")", "for", "proteins", "in", "proteinLists", ":", "for", "protein", "in", "proteins", ":", "peptides", "=", "mergedProtToPeps", ...
44.5
19.318182
def add_parameter(self, indicator_id, content, name='comment', ptype='string'): """ Add a a parameter to the IOC. :param indicator_id: The unique Indicator/IndicatorItem id the parameter is associated with. :param content: The value of the parameter. :param name: The name of the parameter. :param ptype: The type of the parameter content. :return: True :raises: IOCParseError if the indicator_id is not associated with a Indicator or IndicatorItem in the IOC. """ parameters_node = self.parameters criteria_node = self.top_level_indicator.getparent() # first check for duplicate id,name pairs elems = parameters_node.xpath('.//param[@ref-id="{}" and @name="{}"]'.format(indicator_id, name)) if len(elems) > 0: # there is no actual restriction on duplicate parameters log.info('Duplicate (id,name) parameter pair will be inserted [{}][{}].'.format(indicator_id, name)) # now check to make sure the id is present in the IOC logic elems = criteria_node.xpath( './/IndicatorItem[@id="{}"]|.//Indicator[@id="{}"]'.format(indicator_id, indicator_id)) if len(elems) == 0: raise IOCParseError('ID does not exist in the IOC [{}][{}].'.format(str(indicator_id), str(content))) parameters_node.append(ioc_et.make_param_node(indicator_id, content, name, ptype)) return True
[ "def", "add_parameter", "(", "self", ",", "indicator_id", ",", "content", ",", "name", "=", "'comment'", ",", "ptype", "=", "'string'", ")", ":", "parameters_node", "=", "self", ".", "parameters", "criteria_node", "=", "self", ".", "top_level_indicator", ".", ...
57.72
29.16
def accept(self, offer_ids, operations, filters=Filters()): """Accepts the given offers and performs a sequence of operations on those accepted offers. See Offer.Operation in mesos.proto for the set of available operations. Available resources are aggregated when multiple offers are provided. Note that all offers must belong to the same slave. Any unused resources will be considered declined. The specified filters are applied on all unused resources (see mesos.proto for a description of Filters). """ logging.info('Accepts offers {}'.format(offer_ids)) return self.driver.acceptOffers(map(encode, offer_ids), map(encode, operations), encode(filters))
[ "def", "accept", "(", "self", ",", "offer_ids", ",", "operations", ",", "filters", "=", "Filters", "(", ")", ")", ":", "logging", ".", "info", "(", "'Accepts offers {}'", ".", "format", "(", "offer_ids", ")", ")", "return", "self", ".", "driver", ".", ...
53.733333
24.666667
def _default_styles_xml(cls): """ Return a bytestream containing XML for a default styles part. """ path = os.path.join( os.path.split(__file__)[0], '..', 'templates', 'default-styles.xml' ) with open(path, 'rb') as f: xml_bytes = f.read() return xml_bytes
[ "def", "_default_styles_xml", "(", "cls", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "split", "(", "__file__", ")", "[", "0", "]", ",", "'..'", ",", "'templates'", ",", "'default-styles.xml'", ")", "with", "op...
30.727273
12.545455
def get_genes_for_hgnc_id(self, hgnc_symbol): """ obtain the ensembl gene IDs that correspond to a HGNC symbol """ headers = {"content-type": "application/json"} # http://grch37.rest.ensembl.org/xrefs/symbol/homo_sapiens/KMT2A?content-type=application/json self.attempt = 0 ext = "/xrefs/symbol/homo_sapiens/{}".format(hgnc_symbol) r = self.ensembl_request(ext, headers) genes = [] for item in json.loads(r): if item["type"] == "gene": genes.append(item["id"]) return genes
[ "def", "get_genes_for_hgnc_id", "(", "self", ",", "hgnc_symbol", ")", ":", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "# http://grch37.rest.ensembl.org/xrefs/symbol/homo_sapiens/KMT2A?content-type=application/json", "self", ".", "attempt", "=", ...
33.833333
18.777778
def rtl_any(*vectorlist): """ Hardware equivalent of python native "any". :param WireVector vectorlist: all arguments are WireVectors of length 1 :return: WireVector of length 1 Returns a 1-bit WireVector which will hold a '1' if any of the inputs are '1' (i.e. it is a big ol' OR gate) """ if len(vectorlist) <= 0: raise PyrtlError('rtl_any requires at least 1 argument') converted_vectorlist = [as_wires(v) for v in vectorlist] if any(len(v) != 1 for v in converted_vectorlist): raise PyrtlError('only length 1 WireVectors can be inputs to rtl_any') return or_all_bits(concat_list(converted_vectorlist))
[ "def", "rtl_any", "(", "*", "vectorlist", ")", ":", "if", "len", "(", "vectorlist", ")", "<=", "0", ":", "raise", "PyrtlError", "(", "'rtl_any requires at least 1 argument'", ")", "converted_vectorlist", "=", "[", "as_wires", "(", "v", ")", "for", "v", "in",...
43.266667
19.666667
def astype(array, y): """A functional form of the `astype` method. Args: array: The array or number to cast. y: An array or number, as the input, whose type should be that of array. Returns: An array or number with the same dtype as `y`. """ if isinstance(y, autograd.core.Node): return array.astype(numpy.array(y.value).dtype) return array.astype(numpy.array(y).dtype)
[ "def", "astype", "(", "array", ",", "y", ")", ":", "if", "isinstance", "(", "y", ",", "autograd", ".", "core", ".", "Node", ")", ":", "return", "array", ".", "astype", "(", "numpy", ".", "array", "(", "y", ".", "value", ")", ".", "dtype", ")", ...
29.769231
17.230769
def _send_pub(self, load): ''' Take a load and send it across the network to connected minions ''' for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load)
[ "def", "_send_pub", "(", "self", ",", "load", ")", ":", "for", "transport", ",", "opts", "in", "iter_transport_opts", "(", "self", ".", "opts", ")", ":", "chan", "=", "salt", ".", "transport", ".", "server", ".", "PubServerChannel", ".", "factory", "(", ...
40.285714
23.714286
def loadFullValue(self, seq, scope_attrs): """ Evaluate full value for async Console variables in a separate thread and send results to IDE side :param seq: id of command :param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR (i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2) :return: """ frame_variables = self.get_namespace() var_objects = [] vars = scope_attrs.split(NEXT_VALUE_SEPARATOR) for var_attrs in vars: if '\t' in var_attrs: name, attrs = var_attrs.split('\t', 1) else: name = var_attrs attrs = None if name in frame_variables: var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs) var_objects.append((var_object, name)) else: var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables) var_objects.append((var_object, name)) from _pydevd_bundle.pydevd_comm import GetValueAsyncThreadConsole t = GetValueAsyncThreadConsole(self.get_server(), seq, var_objects) t.start()
[ "def", "loadFullValue", "(", "self", ",", "seq", ",", "scope_attrs", ")", ":", "frame_variables", "=", "self", ".", "get_namespace", "(", ")", "var_objects", "=", "[", "]", "vars", "=", "scope_attrs", ".", "split", "(", "NEXT_VALUE_SEPARATOR", ")", "for", ...
43.821429
22.607143
def _cell_attribute_append(self, selection, tab, attributes): """Appends to cell_attributes with checks""" cell_attributes = self.code_array.cell_attributes thick_bottom_cells = [] thick_right_cells = [] # Does any cell in selection.cells have a larger bottom border? if "borderwidth_bottom" in attributes: bwidth = attributes["borderwidth_bottom"] for row, col in selection.cells: __bwidth = cell_attributes[row, col, tab]["borderwidth_bottom"] if __bwidth > bwidth: thick_bottom_cells.append((row, col)) # Does any cell in selection.cells have a larger right border? if "borderwidth_right" in attributes: rwidth = attributes["borderwidth_right"] for row, col in selection.cells: __rwidth = cell_attributes[row, col, tab]["borderwidth_right"] if __rwidth > rwidth: thick_right_cells.append((row, col)) for thick_cell in thick_bottom_cells + thick_right_cells: try: selection.cells.remove(thick_cell) except ValueError: pass cell_attributes.append((selection, tab, attributes)) if thick_bottom_cells: bsel = copy(selection) bsel.cells = thick_bottom_cells battrs = copy(attributes) battrs.pop("borderwidth_bottom") cell_attributes.append((bsel, tab, battrs)) if thick_right_cells: rsel = copy(selection) rsel.cells = thick_right_cells rattrs = copy(attributes) rattrs.pop("borderwidth_right") cell_attributes.append((rsel, tab, rattrs))
[ "def", "_cell_attribute_append", "(", "self", ",", "selection", ",", "tab", ",", "attributes", ")", ":", "cell_attributes", "=", "self", ".", "code_array", ".", "cell_attributes", "thick_bottom_cells", "=", "[", "]", "thick_right_cells", "=", "[", "]", "# Does a...
37.478261
18
def tcp_server(tcp_addr, settings): """Start up the tcp server, send the settings.""" family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP) sock.bind(tcp_addr) sock.listen(1) logging.info("Waiting for connection on %s", tcp_addr) conn, addr = sock.accept() logging.info("Accepted connection from %s", Addr(*addr)) # Send map_data independently for py2/3 and json encoding reasons. write_tcp(conn, settings["map_data"]) send_settings = {k: v for k, v in settings.items() if k != "map_data"} logging.debug("settings: %s", send_settings) write_tcp(conn, json.dumps(send_settings).encode()) return conn
[ "def", "tcp_server", "(", "tcp_addr", ",", "settings", ")", ":", "family", "=", "socket", ".", "AF_INET6", "if", "\":\"", "in", "tcp_addr", ".", "ip", "else", "socket", ".", "AF_INET", "sock", "=", "socket", ".", "socket", "(", "family", ",", "socket", ...
43.375
18.6875
def mxmt(m1, m2): """ Multiply a 3x3 matrix and the transpose of another 3x3 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmt_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: The product m1 times m2 transpose. :rtype: float """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix() libspice.mxmt_c(m1, m2, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "mxmt", "(", "m1", ",", "m2", ")", ":", "m1", "=", "stypes", ".", "toDoubleMatrix", "(", "m1", ")", "m2", "=", "stypes", ".", "toDoubleMatrix", "(", "m2", ")", "mout", "=", "stypes", ".", "emptyDoubleMatrix", "(", ")", "libspice", ".", "mxmt_c...
32
12.333333
def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER): """js has to be a javascript source code. returns equivalent python code. compile plans only work with the following restrictions: - only enabled for oneliner expressions - when there are comments in the js code string substitution is disabled - when there nested escaped quotes string substitution is disabled, so cacheable: Q1 == 1 && name == 'harry' not cacheable: Q1 == 1 && name == 'harry' // some comment not cacheable: Q1 == 1 && name == 'o\'Reilly' not cacheable: Q1 == 1 && name /* some comment */ == 'o\'Reilly' """ match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan( js) cp_hash = hashlib.md5(compilation_plan.encode('utf-8')).digest() try: python_code = cache[cp_hash]['proto_python_code'] except: parser = pyjsparser.PyJsParser() parsed = parser.parse(compilation_plan) # js to esprima syntax tree # Another way of doing that would be with my auto esprima translation but its much slower and causes import problems: # parsed = esprima.parse(js).to_dict() translating_nodes.clean_stacks() python_code = translating_nodes.trans( parsed) # syntax tree to python code cache[cp_hash] = { 'compilation_plan': compilation_plan, 'proto_python_code': python_code, } python_code = match_increaser_str.wrap_up(python_code) python_code = match_increaser_num.wrap_up(python_code) return HEADER + python_code
[ "def", "translate_js_with_compilation_plan", "(", "js", ",", "HEADER", "=", "DEFAULT_HEADER", ")", ":", "match_increaser_str", ",", "match_increaser_num", ",", "compilation_plan", "=", "get_compilation_plan", "(", "js", ")", "cp_hash", "=", "hashlib", ".", "md5", "(...
35.955556
23
def enrich(self, gmt): """use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes """ if isscalar(self.background): if isinstance(self.background, int) or self.background.isdigit(): self._bg = int(self.background) elif isinstance(self.background, str): # self.background = set(reduce(lambda x,y: x+y, gmt.values(),[])) self._bg = self.get_background() self._logger.info("Background: found %s genes"%(len(self._bg))) else: raise Exception("Unsupported background data type") else: # handle array object: nd.array, list, tuple, set, Series try: it = iter(self.background) self._bg = set(self.background) except TypeError: self._logger.error("Unsupported background data type") # statistical testing hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt, background=self._bg)) if len(hgtest) > 0: terms, pvals, olsz, gsetsz, genes = hgtest fdrs, rej = multiple_testing_correction(ps = pvals, alpha=self.cutoff, method='benjamini-hochberg') # save to a dataframe odict = OrderedDict() odict['Term'] = terms odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz)) odict['P-value'] = pvals odict['Adjusted P-value'] = fdrs # odict['Reject (FDR< %s)'%self.cutoff ] = rej odict['Genes'] = [";".join(g) for g in genes] res = pd.DataFrame(odict) return res return
[ "def", "enrich", "(", "self", ",", "gmt", ")", ":", "if", "isscalar", "(", "self", ".", "background", ")", ":", "if", "isinstance", "(", "self", ".", "background", ",", "int", ")", "or", "self", ".", "background", ".", "isdigit", "(", ")", ":", "se...
40.529412
20.372549
def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing): """ Base of the skeleton for voxel based boundary term calculation. This function holds the low level procedures shared by nearly all boundary terms. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image containing the voxel intensity values @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of absolute intensity differences @type boundary_term function @param neighbourhood_function A function that takes two arrays of neighbouring pixels and computes an intensity term from them that is returned as a single array of the same shape @type neighbourhood_function function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False """ image = scipy.asarray(image) image = image.astype(scipy.float_) # iterate over the image dimensions and for each create the appropriate edges and compute the associated weights for dim in range(image.ndim): # construct slice-objects for the current dimension slices_exclude_last = [slice(None)] * image.ndim slices_exclude_last[dim] = slice(-1) slices_exclude_first = [slice(None)] * image.ndim slices_exclude_first[dim] = slice(1, None) # compute difference between all layers in the current dimensions direction neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first]) # apply boundary term neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term) # compute key offset for relative key difference offset_key = [1 if i == dim else 0 for i in range(image.ndim)] offset = __flatten_index(offset_key, image.shape) # generate index offset function for index dependent offset idx_offset_divider = (image.shape[dim] - 1) * offset idx_offset = lambda x: int(x / idx_offset_divider) * offset # weight the computed distanced in dimension dim by the corresponding slice spacing provided if spacing: neighbourhood_intensity_term /= spacing[dim] for key, value in enumerate(neighbourhood_intensity_term.ravel()): # apply index dependent offset key += idx_offset(key) # add edges and set the weight graph.set_nweight(key, key + offset, value, value)
[ "def", "__skeleton_base", "(", "graph", ",", "image", ",", "boundary_term", ",", "neighbourhood_function", ",", "spacing", ")", ":", "image", "=", "scipy", ".", "asarray", "(", "image", ")", "image", "=", "image", ".", "astype", "(", "scipy", ".", "float_"...
54.470588
25.568627
def _best_match_syn(self, sx, sys, scope_map): """ The best match is determined by the highest magnitude weight """ SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self._standardize_label(sx.val) sxp = self._id_to_ontology(sx.class_id) for sy in sys: syv = self._standardize_label(sy.val) syp = self._id_to_ontology(sy.class_id) W = None if sxv == syv: confidence = sx.confidence * sy.confidence if sx.is_abbreviation() or sy.is_abbreviation: confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5) confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5) W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2) elif sxv in syv: W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)) elif syv in sxv: W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)) if W is not None: # The best match is determined by the highest magnitude weight if WBEST is None or max(abs(W)) > max(abs(WBEST)): WBEST = W sbest = sy return WBEST, sbest
[ "def", "_best_match_syn", "(", "self", ",", "sx", ",", "sys", ",", "scope_map", ")", ":", "SUBSTRING_WEIGHT", "=", "0.2", "WBEST", "=", "None", "sbest", "=", "None", "sxv", "=", "self", ".", "_standardize_label", "(", "sx", ".", "val", ")", "sxp", "=",...
44.965517
18.758621
def iter_extensions(extension): """ Depth-first iterator over sub-extensions on `extension`. """ for _, ext in inspect.getmembers(extension, is_extension): for item in iter_extensions(ext): yield item yield ext
[ "def", "iter_extensions", "(", "extension", ")", ":", "for", "_", ",", "ext", "in", "inspect", ".", "getmembers", "(", "extension", ",", "is_extension", ")", ":", "for", "item", "in", "iter_extensions", "(", "ext", ")", ":", "yield", "item", "yield", "ex...
34.857143
10.428571
def get_function_url(self, function): """ Registers the given callable in the system (if it isn't already) and returns the URL that can be used to invoke the given function from remote. """ assert self._opened, "RPC System is not opened" logging.debug("get_function_url(%s)" % repr(function)) if function in ~self._functions: functionid = self._functions[:function] else: functionid = uuid.uuid1() self._functions[functionid] = function return "anycall://%s/functions/%s" % (self._connectionpool.ownid, functionid.hex)
[ "def", "get_function_url", "(", "self", ",", "function", ")", ":", "assert", "self", ".", "_opened", ",", "\"RPC System is not opened\"", "logging", ".", "debug", "(", "\"get_function_url(%s)\"", "%", "repr", "(", "function", ")", ")", "if", "function", "in", ...
47.230769
16.769231
def _resolve_dut_count(self): """ Calculates total amount of resources required and their types. :return: Nothing, modifies _dut_count, _hardware_count and _process_count :raises: ValueError if total count does not match counts of types separately. """ self._dut_count = len(self._dut_requirements) self._resolve_process_count() self._resolve_hardware_count() if self._dut_count != self._hardware_count + self._process_count: raise ValueError("Missing or invalid type fields in dut configuration!")
[ "def", "_resolve_dut_count", "(", "self", ")", ":", "self", ".", "_dut_count", "=", "len", "(", "self", ".", "_dut_requirements", ")", "self", ".", "_resolve_process_count", "(", ")", "self", ".", "_resolve_hardware_count", "(", ")", "if", "self", ".", "_dut...
44.538462
20.384615
def rows_from_csv(filename, predicate=None, encoding='utf-8'): """\ Returns an iterator over all rows in the provided CSV `filename`. `filename` Absolute path to a file to read the cables from. The file must be a CSV file with the following columns: <identifier>, <creation-date>, <reference-id>, <origin>, <classification-level>, <references-to-other-cables>, <header>, <body> The delimiter must be a comma (``,``) and the content must be enclosed in double quotes (``"``). `predicate` A predicate that is invoked for each cable reference identifier. If the predicate evaluates to ``False`` the cable is ignored. By default, all cables are used. I.e. ``cables_from_csv('cables.csv', lambda r: r.startswith('09'))`` would return cables where the reference identifier starts with ``09``. `encoding` The file encoding (``UTF-8`` by default). """ pred = predicate or bool with open(filename, 'rb') as f: for row in _UnicodeReader(f, encoding=encoding, delimiter=',', quotechar='"', escapechar='\\'): ident, created, reference_id, origin, classification, references, header, body = row if row and pred(reference_id): yield ident, created, reference_id, origin, classification, references, header, body
[ "def", "rows_from_csv", "(", "filename", ",", "predicate", "=", "None", ",", "encoding", "=", "'utf-8'", ")", ":", "pred", "=", "predicate", "or", "bool", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "for", "row", "in", "_UnicodeR...
55.625
29.5
def humanise_exception(exception): """Humanise a python exception by giving the class name and traceback. The function will return a tuple with the exception name and the traceback. :param exception: Exception object. :type exception: Exception :return: A tuple with the exception name and the traceback. :rtype: (str, str) """ trace = ''.join(traceback.format_tb(sys.exc_info()[2])) name = exception.__class__.__name__ return name, trace
[ "def", "humanise_exception", "(", "exception", ")", ":", "trace", "=", "''", ".", "join", "(", "traceback", ".", "format_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", ")", "name", "=", "exception", ".", "__class__", ".", "__name__", ...
33.428571
18.214286
def size(self): """ -> #int number of keys in this instance """ return int(self._client.hget(self._bucket_key, self.key_prefix) or 0)
[ "def", "size", "(", "self", ")", ":", "return", "int", "(", "self", ".", "_client", ".", "hget", "(", "self", ".", "_bucket_key", ",", "self", ".", "key_prefix", ")", "or", "0", ")" ]
49
20.666667
def update_assignment_override(self, id, course_id, assignment_id, assignment_override_due_at=None, assignment_override_lock_at=None, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_unlock_at=None): """ Update an assignment override. All current overridden values must be supplied if they are to be retained; e.g. if due_at was overridden, but this PUT omits a value for due_at, due_at will no longer be overridden. If the override is adhoc and student_ids is not supplied, the target override set is unchanged. Target override sets cannot be changed for group or section overrides. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - assignment_override[student_ids] """The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override. Ignored unless the override being updated is adhoc.""" if assignment_override_student_ids is not None: data["assignment_override[student_ids]"] = assignment_override_student_ids # OPTIONAL - assignment_override[title] """The title of an adhoc assignment override. Ignored unless the override being updated is adhoc.""" if assignment_override_title is not None: data["assignment_override[title]"] = assignment_override_title # OPTIONAL - assignment_override[due_at] """The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.""" if assignment_override_due_at is not None: data["assignment_override[due_at]"] = assignment_override_due_at # OPTIONAL - assignment_override[unlock_at] """The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.""" if assignment_override_unlock_at is not None: data["assignment_override[unlock_at]"] = assignment_override_unlock_at # OPTIONAL - assignment_override[lock_at] """The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.""" if assignment_override_lock_at is not None: data["assignment_override[lock_at]"] = assignment_override_lock_at self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_assignment_override", "(", "self", ",", "id", ",", "course_id", ",", "assignment_id", ",", "assignment_override_due_at", "=", "None", ",", "assignment_override_lock_at", "=", "None", ",", "assignment_override_student_ids", "=", "None", ",", "assignment_ov...
51.685714
27.942857
def process_package(self, package_name): """ Build artifacts declared for the given package. """ metadata = super(ArtifactRegistry, self).process_package(package_name) if metadata: self.update_artifact_metadata(package_name, metadata)
[ "def", "process_package", "(", "self", ",", "package_name", ")", ":", "metadata", "=", "super", "(", "ArtifactRegistry", ",", "self", ")", ".", "process_package", "(", "package_name", ")", "if", "metadata", ":", "self", ".", "update_artifact_metadata", "(", "p...
35
17.25
def get_option_set_by_id(cls, option_set_id, **kwargs): """Find OptionSet Return single instance of OptionSet by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_set_by_id(option_set_id, async=True) >>> result = thread.get() :param async bool :param str option_set_id: ID of optionSet to return (required) :return: OptionSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_option_set_by_id_with_http_info(option_set_id, **kwargs) else: (data) = cls._get_option_set_by_id_with_http_info(option_set_id, **kwargs) return data
[ "def", "get_option_set_by_id", "(", "cls", ",", "option_set_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_option_set_by_i...
42.238095
20.428571
def __create_paleo_col(l, col_count): """ Receive split list from separate_data_vars, and turn it into a dictionary for that column :param list l: :param int col_count: :return dict: """ # Format: what, material, error, units, seasonality, archive, detail, method, # C or N for Character or Numeric data, direction of relation to climate (positive or negative) d = OrderedDict() d['number'] = col_count for idx, var_name in enumerate(NOAA_KEYS_BY_SECTION["Variables"]): try: value = l[idx] # These two cases are nested in the column, so treat them special if var_name == "seasonality": d["climateInterpretation"] = {var_name: value} elif var_name == "uncertainty": d["calibration"] = {var_name: value} # All other cases are root items in the column, so add normally else: d[var_name] = value except IndexError as e: logger_noaa_lpd.debug("create_var_col: IndexError: var: {}, {}".format(var_name, e)) return d
[ "def", "__create_paleo_col", "(", "l", ",", "col_count", ")", ":", "# Format: what, material, error, units, seasonality, archive, detail, method,", "# C or N for Character or Numeric data, direction of relation to climate (positive or negative)", "d", "=", "OrderedDict", "(", ")", "d",...
47.08
22.04
def WriteClientStartupInfo(self, client_id, new_si): """Handle a startup event.""" drift = rdfvalue.Duration("5m") if data_store.RelationalDBEnabled(): current_si = data_store.REL_DB.ReadClientStartupInfo(client_id) # We write the updated record if the client_info has any changes # or the boot time is more than 5 minutes different. if (not current_si or current_si.client_info != new_si.client_info or not current_si.boot_time or abs(current_si.boot_time - new_si.boot_time) > drift): try: data_store.REL_DB.WriteClientStartupInfo(client_id, new_si) except db.UnknownClientError: # On first contact with a new client, this write will fail. logging.info("Can't write StartupInfo for unknown client %s", client_id) else: changes = False with aff4.FACTORY.Create( client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) as client: old_info = client.Get(client.Schema.CLIENT_INFO) old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0) info = new_si.client_info # Only write to the datastore if we have new information. if info != old_info: client.Set(client.Schema.CLIENT_INFO(info)) changes = True client.AddLabels(info.labels, owner="GRR") # Allow for some drift in the boot times (5 minutes). if not old_boot or abs(old_boot - new_si.boot_time) > drift: client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time)) changes = True if data_store.RelationalDBEnabled() and changes: try: data_store.REL_DB.WriteClientStartupInfo(client_id, new_si) except db.UnknownClientError: pass
[ "def", "WriteClientStartupInfo", "(", "self", ",", "client_id", ",", "new_si", ")", ":", "drift", "=", "rdfvalue", ".", "Duration", "(", "\"5m\"", ")", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "current_si", "=", "data_store", ".", "REL_...
39
21.155556
def rebind_string(self, keysym, newstring): """Change the translation of KEYSYM to NEWSTRING. If NEWSTRING is None, remove old translation if any. """ if newstring is None: try: del self.keysym_translations[keysym] except KeyError: pass else: self.keysym_translations[keysym] = newstring
[ "def", "rebind_string", "(", "self", ",", "keysym", ",", "newstring", ")", ":", "if", "newstring", "is", "None", ":", "try", ":", "del", "self", ".", "keysym_translations", "[", "keysym", "]", "except", "KeyError", ":", "pass", "else", ":", "self", ".", ...
35
13.181818
def get_printable(iterable): """ Get printable characters from the specified string. Note that str.isprintable() is not available in Python 2. """ if iterable: return ''.join(i for i in iterable if i in string.printable) return ''
[ "def", "get_printable", "(", "iterable", ")", ":", "if", "iterable", ":", "return", "''", ".", "join", "(", "i", "for", "i", "in", "iterable", "if", "i", "in", "string", ".", "printable", ")", "return", "''" ]
31.875
15.875
def create(self, data, **kwargs): """Create a new object. Args: data (dict): parameters to send to the server to create the resource **kwargs: Extra options to send to the server (e.g. sudo) Returns: RESTObject: a new instance of the managed object class built with the data sent by the server Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request """ self._check_missing_create_attrs(data) files = {} # We get the attributes that need some special transformation types = getattr(self, '_types', {}) if types: # Duplicate data to avoid messing with what the user sent us data = data.copy() for attr_name, type_cls in types.items(): if attr_name in data.keys(): type_obj = type_cls(data[attr_name]) # if the type if FileAttribute we need to pass the data as # file if issubclass(type_cls, g_types.FileAttribute): k = type_obj.get_file_name(attr_name) files[attr_name] = (k, data.pop(attr_name)) else: data[attr_name] = type_obj.get_for_api() # Handle specific URL for creation path = kwargs.pop('path', self.path) server_data = self.gitlab.http_post(path, post_data=data, files=files, **kwargs) return self._obj_cls(self, server_data)
[ "def", "create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_missing_create_attrs", "(", "data", ")", "files", "=", "{", "}", "# We get the attributes that need some special transformation", "types", "=", "getattr", "(", "se...
40.341463
21.463415
def whois_emails(self, emails): """Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result} """ api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
[ "def", "whois_emails", "(", "self", ",", "emails", ")", ":", "api_name", "=", "'opendns-whois-emails'", "fmt_url_path", "=", "u'whois/emails/{0}'", "return", "self", ".", "_multi_get", "(", "api_name", ",", "fmt_url_path", ",", "emails", ")" ]
31.636364
12.636364
def untlpy2highwirepy(untl_elements, **kwargs): """Convert a UNTL Python object to a highwire Python object.""" highwire_list = [] title = None publisher = None creation = None escape = kwargs.get('escape', False) for element in untl_elements.children: # If the UNTL element should be converted to highwire, # create highwire element. if element.tag in HIGHWIRE_CONVERSION_DISPATCH: highwire_element = HIGHWIRE_CONVERSION_DISPATCH[element.tag]( qualifier=element.qualifier, content=element.content, children=element.children, escape=escape, ) if highwire_element: if element.tag == 'title': if element.qualifier != 'officialtitle' and not title: title = highwire_element elif element.qualifier == 'officialtitle': title = highwire_element elif element.tag == 'publisher': if not publisher: # This is the first publisher element. publisher = highwire_element highwire_list.append(publisher) elif element.tag == 'date': # If a creation date hasn't been found yet, # verify this date is acceptable. if not creation and element.qualifier == 'creation': if highwire_element.content: creation = highwire_element if creation: highwire_list.append(creation) # Otherwise, add the element to the list if it has content. elif highwire_element.content: highwire_list.append(highwire_element) # If the title was found, add it to the list. if title: highwire_list.append(title) return highwire_list
[ "def", "untlpy2highwirepy", "(", "untl_elements", ",", "*", "*", "kwargs", ")", ":", "highwire_list", "=", "[", "]", "title", "=", "None", "publisher", "=", "None", "creation", "=", "None", "escape", "=", "kwargs", ".", "get", "(", "'escape'", ",", "Fals...
45.813953
13.790698
def _splitstrip(string, sep=","): """return a list of stripped string by splitting the string given as argument on `sep` (',' by default). Empty string are discarded. >>> _splitstrip('a, b, c , 4,,') ['a', 'b', 'c', '4'] >>> _splitstrip('a') ['a'] >>> _splitstrip('a,\nb,\nc,') ['a', 'b', 'c'] :type string: str or unicode :param string: a csv line :type sep: str or unicode :param sep: field separator, default to the comma (',') :rtype: str or unicode :return: the unquoted string (or the input string if it wasn't quoted) """ return [word.strip() for word in string.split(sep) if word.strip()]
[ "def", "_splitstrip", "(", "string", ",", "sep", "=", "\",\"", ")", ":", "return", "[", "word", ".", "strip", "(", ")", "for", "word", "in", "string", ".", "split", "(", "sep", ")", "if", "word", ".", "strip", "(", ")", "]" ]
30.809524
19.714286
def to_query(self): """ Returns a json-serializable representation. """ query = {} for field_instance in self.fields: query.update(field_instance.to_query()) return query
[ "def", "to_query", "(", "self", ")", ":", "query", "=", "{", "}", "for", "field_instance", "in", "self", ".", "fields", ":", "query", ".", "update", "(", "field_instance", ".", "to_query", "(", ")", ")", "return", "query" ]
22.3
16.7
def read(string): """ Read a graph from a XML document and return it. Nodes and edges specified in the input will be added to the current graph. @type string: string @param string: Input string in XML format specifying a graph. @rtype: graph @return: Graph """ dom = parseString(string) if dom.getElementsByTagName("graph"): G = graph() elif dom.getElementsByTagName("digraph"): G = digraph() elif dom.getElementsByTagName("hypergraph"): return read_hypergraph(string) else: raise InvalidGraphType # Read nodes... for each_node in dom.getElementsByTagName("node"): G.add_node(each_node.getAttribute('id')) for each_attr in each_node.getElementsByTagName("attribute"): G.add_node_attribute(each_node.getAttribute('id'), (each_attr.getAttribute('attr'), each_attr.getAttribute('value'))) # Read edges... for each_edge in dom.getElementsByTagName("edge"): if (not G.has_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')))): G.add_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')), \ wt = float(each_edge.getAttribute('wt')), label = each_edge.getAttribute('label')) for each_attr in each_edge.getElementsByTagName("attribute"): attr_tuple = (each_attr.getAttribute('attr'), each_attr.getAttribute('value')) if (attr_tuple not in G.edge_attributes((each_edge.getAttribute('from'), \ each_edge.getAttribute('to')))): G.add_edge_attribute((each_edge.getAttribute('from'), \ each_edge.getAttribute('to')), attr_tuple) return G
[ "def", "read", "(", "string", ")", ":", "dom", "=", "parseString", "(", "string", ")", "if", "dom", ".", "getElementsByTagName", "(", "\"graph\"", ")", ":", "G", "=", "graph", "(", ")", "elif", "dom", ".", "getElementsByTagName", "(", "\"digraph\"", ")",...
41.428571
23.809524
def StringEscape(self, string, match, **_): """Escape backslashes found inside a string quote. Backslashes followed by anything other than ['"rnbt] will just be included in the string. Args: string: The string that matched. match: The match object (m.group(1) is the escaped code) """ precondition.AssertType(string, Text) if match.group(1) in "'\"rnbt": self.string += compatibility.UnescapeString(string) else: self.string += string
[ "def", "StringEscape", "(", "self", ",", "string", ",", "match", ",", "*", "*", "_", ")", ":", "precondition", ".", "AssertType", "(", "string", ",", "Text", ")", "if", "match", ".", "group", "(", "1", ")", "in", "\"'\\\"rnbt\"", ":", "self", ".", ...
32
17.666667
def str2dict(dotted_str, value=None, separator='.'): """ Convert dotted string to dict splitting by :separator: """ dict_ = {} parts = dotted_str.split(separator) d, prev = dict_, None for part in parts: prev = d d = d.setdefault(part, {}) else: if value is not None: prev[part] = value return dict_
[ "def", "str2dict", "(", "dotted_str", ",", "value", "=", "None", ",", "separator", "=", "'.'", ")", ":", "dict_", "=", "{", "}", "parts", "=", "dotted_str", ".", "split", "(", "separator", ")", "d", ",", "prev", "=", "dict_", ",", "None", "for", "p...
29.333333
14.833333
def volume(self): """ Mesh volume - will throw a VTK error/warning if not a closed surface Returns ------- volume : float Total volume of the mesh. """ mprop = vtk.vtkMassProperties() mprop.SetInputData(self.tri_filter()) return mprop.GetVolume()
[ "def", "volume", "(", "self", ")", ":", "mprop", "=", "vtk", ".", "vtkMassProperties", "(", ")", "mprop", ".", "SetInputData", "(", "self", ".", "tri_filter", "(", ")", ")", "return", "mprop", ".", "GetVolume", "(", ")" ]
24.615385
17.230769
def check_input(self, token): """ Performs checks on the input token. Raises an exception if unsupported. :param token: the token to check :type token: Token """ if isinstance(token.payload, Evaluation): return None if isinstance(token.payload, ClusterEvaluation): return None raise Exception( self.full_name + ": Input token is not a supported Evaluation object - " + classes.get_classname(token.payload))
[ "def", "check_input", "(", "self", ",", "token", ")", ":", "if", "isinstance", "(", "token", ".", "payload", ",", "Evaluation", ")", ":", "return", "None", "if", "isinstance", "(", "token", ".", "payload", ",", "ClusterEvaluation", ")", ":", "return", "N...
36.142857
16.714286
def show_clock_output_clock_time_current_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_clock = ET.Element("show_clock") config = show_clock output = ET.SubElement(show_clock, "output") clock_time = ET.SubElement(output, "clock-time") current_time = ET.SubElement(clock_time, "current-time") current_time.text = kwargs.pop('current_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_clock_output_clock_time_current_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_clock", "=", "ET", ".", "Element", "(", "\"show_clock\"", ")", "config", "=", "show_clock", ...
40.461538
13.461538
def to_ruby(self): ''' Convert one MeCabToken into HTML ''' if self.need_ruby(): surface = self.surface reading = self.reading_hira() return '<ruby><rb>{sur}</rb><rt>{read}</rt></ruby>'.format(sur=surface, read=reading) elif self.is_eos: return '' else: return self.surface
[ "def", "to_ruby", "(", "self", ")", ":", "if", "self", ".", "need_ruby", "(", ")", ":", "surface", "=", "self", ".", "surface", "reading", "=", "self", ".", "reading_hira", "(", ")", "return", "'<ruby><rb>{sur}</rb><rt>{read}</rt></ruby>'", ".", "format", "(...
35.6
17.6
def add_nodes(self, nodes, nesting=1): """ Adds nodes and edges for generating the graph showing the relationship between modules and submodules listed in nodes. """ hopNodes = set() # nodes in this hop hopEdges = [] # edges in this hop # get nodes and edges for this hop for i, n in zip(range(len(nodes)), nodes): r, g, b = rainbowcolour(i, len(nodes)) colour = '#%02X%02X%02X' % (r, g, b) for nu in n.uses: if nu not in self.added: hopNodes.add(nu) hopEdges.append((n, nu, 'dashed', colour)) if hasattr(n, 'ancestor'): if n.ancestor not in self.added: hopNodes.add(n.ancestor) hopEdges.append((n, n.ancestor, 'solid', colour)) # add nodes, edges and attributes to the graph if maximum number of # nodes is not exceeded if self.add_to_graph(hopNodes, hopEdges, nesting): self.dot.attr('graph', size='11.875,1000.0')
[ "def", "add_nodes", "(", "self", ",", "nodes", ",", "nesting", "=", "1", ")", ":", "hopNodes", "=", "set", "(", ")", "# nodes in this hop", "hopEdges", "=", "[", "]", "# edges in this hop", "# get nodes and edges for this hop", "for", "i", ",", "n", "in", "z...
45.695652
10.652174