text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def convert_cropping(builder, layer, input_names, output_names, keras_layer): """ Convert padding layer from keras to coreml. Keras only supports zero padding at this time. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) is_1d = isinstance(keras_layer, _keras.layers.Cropping1D) cropping = keras_layer.cropping top = left = bottom = right = 0 if is_1d: if type(cropping) is int: left = right = cropping elif type(cropping) is tuple: if type(cropping[0]) is int: left, right = cropping elif type(cropping[0]) is tuple and len(cropping[0]) == 2: left, right = cropping[0] else: raise ValueError("Unrecognized cropping option: %s" % (str(cropping))) else: raise ValueError("Unrecognized cropping option: %s" % (str(cropping))) else: if type(cropping) is int: top = left = bottom = right = cropping elif type(cropping) is tuple: if type(cropping[0]) is int: top, left = cropping bottom, right = cropping elif type(cropping[0]) is tuple: top, bottom = cropping[0] left, right = cropping[1] else: raise ValueError("Unrecognized cropping option: %s" % (str(cropping))) else: raise ValueError("Unrecognized cropping option: %s" % (str(cropping))) # Now add the layer builder.add_crop(name = layer, left = left, right=right, top=top, bottom=bottom, offset = [0,0], input_names = [input_name], output_name=output_name )
[ "def", "convert_cropping", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "_check_data_format", "(", "keras_layer", ")", "# Get input and output names", "input_name", ",", "output_name", "=", "(", "input_names", ...
36.019231
17.057692
def get_stp_mst_detail_output_cist_port_admin_edge(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") admin_edge = ET.SubElement(port, "admin-edge") admin_edge.text = kwargs.pop('admin_edge') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_admin_edge", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "...
41.357143
12.285714
def has_output(state, text, incorrect_msg="The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.", fixed=False, strip_ansi=True): """Check whether student output contains specific text. Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``; they might be more fit for your use case. Args: state: State instance describing student and solution code. Can be omitted if used with ``Ex()``. text : text that student output must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student output. fixed: whether to match ``text`` exactly, rather than using regular expressions. strip_ansi: whether to remove ANSI escape codes from output :Example: Suppose the solution requires you to do: :: echo 'this is a printout!' The following SCT can be written: :: Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out') Submissions that would pass: :: echo 'this is a print out' test='this is a printout!' && echo $test Submissions that would fail: :: echo 'this is a wrong printout' """ stu_output = state.student_result if strip_ansi: stu_output = _strip_ansi(stu_output) # either simple text matching or regex test res = text in stu_output if fixed else re.search(text, stu_output) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text, 'fixed': fixed }) state.do_test(_msg) return state
[ "def", "has_output", "(", "state", ",", "text", ",", "incorrect_msg", "=", "\"The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.\"", ",", "fixed", "=", "False", ",", "strip_ansi", "=", "True", ")", ":", "stu_output", "=...
35.3
28.66
def removeRequest(self, service, *args): """ Removes a request from the pending request list. """ if isinstance(service, RequestWrapper): if self.logger: self.logger.debug('Removing request: %s', self.requests[self.requests.index(service)]) del self.requests[self.requests.index(service)] return for request in self.requests: if request.service == service and request.args == args: if self.logger: self.logger.debug('Removing request: %s', self.requests[self.requests.index(request)]) del self.requests[self.requests.index(request)] return raise LookupError("Request not found")
[ "def", "removeRequest", "(", "self", ",", "service", ",", "*", "args", ")", ":", "if", "isinstance", "(", "service", ",", "RequestWrapper", ")", ":", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "'Removing request: %s'", ",...
34.130435
19.695652
def write_to_png(self, target=None): """Writes the contents of surface as a PNG image. :param target: A filename, a binary mode file-like object with a :meth:`~file.write` method, or :obj:`None`. :returns: If :obj:`target` is :obj:`None`, return the PNG contents as a byte string. """ return_bytes = target is None if return_bytes: target = io.BytesIO() if hasattr(target, 'write'): write_func = _make_write_func(target) _check_status(cairo.cairo_surface_write_to_png_stream( self._pointer, write_func, ffi.NULL)) else: _check_status(cairo.cairo_surface_write_to_png( self._pointer, _encode_filename(target))) if return_bytes: return target.getvalue()
[ "def", "write_to_png", "(", "self", ",", "target", "=", "None", ")", ":", "return_bytes", "=", "target", "is", "None", "if", "return_bytes", ":", "target", "=", "io", ".", "BytesIO", "(", ")", "if", "hasattr", "(", "target", ",", "'write'", ")", ":", ...
35.583333
15.416667
def fetch(self, refund_id, data={}, **kwargs): """" Refund object for given paymnet Id Args: refund_id : Refund Id for which refund has to be retrieved Returns: Refund dict for given refund Id """ return super(Refund, self).fetch(refund_id, data, **kwargs)
[ "def", "fetch", "(", "self", ",", "refund_id", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "Refund", ",", "self", ")", ".", "fetch", "(", "refund_id", ",", "data", ",", "*", "*", "kwargs", ")" ]
29.090909
18.090909
def reorder_pinned_topics_courses(self, order, course_id): """ Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - order """The ids of the pinned discussion topics in the desired order. (For example, "order=104,102,103".)""" data["order"] = order self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True)
[ "def", "reorder_pinned_topics_courses", "(", "self", ",", "order", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", ...
40.454545
25.5
def saveSilent(self): """ Saves the record, but does not emit the saved signal. This method is useful when chaining together multiple saves. Check the saveSignalBlocked value to know if it was muted to know if any saves occurred. :return <bool> """ self.blockSignals(True) success = self.save() self.blockSignals(False) return success
[ "def", "saveSilent", "(", "self", ")", ":", "self", ".", "blockSignals", "(", "True", ")", "success", "=", "self", ".", "save", "(", ")", "self", ".", "blockSignals", "(", "False", ")", "return", "success" ]
33.692308
16.615385
def _set_options(self, options): '''Private function for setting options used for sealing''' if not options: return self.options.copy() options = options.copy() if 'magic' in options: self.set_magic(options['magic']) del(options['magic']) if 'flags' in options: flags = options['flags'] del(options['flags']) for key, value in flags.iteritems(): if not isinstance(value, bool): raise TypeError('Invalid flag type for: %s' % key) else: flags = self.options['flags'] if 'info' in options: del(options['info']) for key, value in options.iteritems(): if not isinstance(value, int): raise TypeError('Invalid option type for: %s' % key) if value < 0 or value > 255: raise ValueError('Option value out of range for: %s' % key) new_options = self.options.copy() new_options.update(options) new_options['flags'].update(flags) return new_options
[ "def", "_set_options", "(", "self", ",", "options", ")", ":", "if", "not", "options", ":", "return", "self", ".", "options", ".", "copy", "(", ")", "options", "=", "options", ".", "copy", "(", ")", "if", "'magic'", "in", "options", ":", "self", ".", ...
32.147059
16.735294
def worklog(accountable): """ List all worklogs for a given issue key. """ worklog = accountable.issue_worklog() headers = ['author_name', 'comment', 'time_spent'] if worklog: rows = [[v for k, v in sorted(w.items()) if k in headers] for w in worklog] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No worklogs found for {}'.format(accountable.issue_key), fg='red' )
[ "def", "worklog", "(", "accountable", ")", ":", "worklog", "=", "accountable", ".", "issue_worklog", "(", ")", "headers", "=", "[", "'author_name'", ",", "'comment'", ",", "'time_spent'", "]", "if", "worklog", ":", "rows", "=", "[", "[", "v", "for", "k",...
30.4375
14.5625
def get_api_publisher(self, social_user): """ owner_id - VK user or group from_group - 1 by group, 0 by user message - text attachments - comma separated links or VK resources ID's and other https://vk.com/dev.php?method=wall.post """ def _post(**kwargs): api = self.get_api(social_user) response = api.wall.post(**kwargs) return response return _post
[ "def", "get_api_publisher", "(", "self", ",", "social_user", ")", ":", "def", "_post", "(", "*", "*", "kwargs", ")", ":", "api", "=", "self", ".", "get_api", "(", "social_user", ")", "response", "=", "api", ".", "wall", ".", "post", "(", "*", "*", ...
31.133333
13.666667
def set_config_file(self, path): """ Set the config file. The contents must be valid YAML and there must be a top-level element 'tasks'. The listed tasks will be started according to their configuration, and the file will be watched for future changes. The changes will be activated by appropriate changes to the running tasks. """ log = self._params.get('log', self._discard) if path != self._config_file: if self._config_file: log.info("Config file changed from '%s' to '%s'", self._config_file, path) self.file_del(self, paths=[self._config_file]) else: log.info("Config file set to '%s'", path) self._config_file = path self.file_add(event_target(self, 'legion_config', log=log), path) return self._load_config()
[ "def", "set_config_file", "(", "self", ",", "path", ")", ":", "log", "=", "self", ".", "_params", ".", "get", "(", "'log'", ",", "self", ".", "_discard", ")", "if", "path", "!=", "self", ".", "_config_file", ":", "if", "self", ".", "_config_file", ":...
48.555556
17.666667
def naive(seqs, f=None, start=None, key=lambda x: x): """Naive cycle detector See help(cycle_detector) for more context. Args: sequence: A sequence to detect cyles in. f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found. Will always generate a first and period value no matter which of the `seqs` or `f` interface is used. """ history = {} for step, value in enumerate(seqs[0]): keyed = key(value) yield value if keyed in history: raise CycleDetected( first=history[keyed], period=step - history[keyed]) history[keyed] = step
[ "def", "naive", "(", "seqs", ",", "f", "=", "None", ",", "start", "=", "None", ",", "key", "=", "lambda", "x", ":", "x", ")", ":", "history", "=", "{", "}", "for", "step", ",", "value", "in", "enumerate", "(", "seqs", "[", "0", "]", ")", ":",...
25.933333
23.666667
def make_FULL_BSE_Densities_folder(self, folder): """ mkdir "FULL_BSE_Densities" folder (needed for bse run) in the desired folder """ if os.path.exists(folder + "/FULL_BSE_Densities"): return "FULL_BSE_Densities folder already exists" else: os.makedirs(folder + "/FULL_BSE_Densities") return "makedirs FULL_BSE_Densities folder"
[ "def", "make_FULL_BSE_Densities_folder", "(", "self", ",", "folder", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "folder", "+", "\"/FULL_BSE_Densities\"", ")", ":", "return", "\"FULL_BSE_Densities folder already exists\"", "else", ":", "os", ".", "maked...
39.7
18.9
def set_pid(): """ Explicitly ask the ipython kernel for its pid """ global pid lines = '\n'.join(['import os', '_pid = os.getpid()']) try: msg_id = send(lines, silent=True, user_variables=['_pid']) except TypeError: # change in IPython 3.0+ msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'}) # wait to get message back from kernel try: child = get_child_msg(msg_id) except Empty: echo("no reply from IPython kernel") return try: pid = int(child['content']['user_variables']['_pid']) except TypeError: # change in IPython 1.0.dev moved this out pid = int(child['content']['user_variables']['_pid']['data']['text/plain']) except KeyError: # change in IPython 3.0+ pid = int( child['content']['user_expressions']['_pid']['data']['text/plain']) except KeyError: # change in IPython 1.0.dev moved this out echo("Could not get PID information, kernel not running Python?") return pid
[ "def", "set_pid", "(", ")", ":", "global", "pid", "lines", "=", "'\\n'", ".", "join", "(", "[", "'import os'", ",", "'_pid = os.getpid()'", "]", ")", "try", ":", "msg_id", "=", "send", "(", "lines", ",", "silent", "=", "True", ",", "user_variables", "=...
36.321429
22.107143
def select(self, column, agg=None, _as=None, distinct=False): """ What columnns to select in query. :column should be a column name or equation to produce column **not** aggregated :agg should be a valid aggregate method used to producte the figure :_as should be a string used to represent the column. Required when agg present """ if agg and not _as: raise ValueError("Aggregate colunns require `_as` to be specified") if column is False: self._selects = {} else: self._selects.setdefault((_as or column), (column, agg, _as, distinct))
[ "def", "select", "(", "self", ",", "column", ",", "agg", "=", "None", ",", "_as", "=", "None", ",", "distinct", "=", "False", ")", ":", "if", "agg", "and", "not", "_as", ":", "raise", "ValueError", "(", "\"Aggregate colunns require `_as` to be specified\"", ...
48.692308
22.846154
def feature_enabled(self, feature_name): """ Indicates whether the specified feature is enabled for the CPC of this partition. The HMC must generally support features, and the specified feature must be available for the CPC. For a list of available features, see section "Features" in the :term:`HMC API`, or use the :meth:`feature_info` method. Authorization requirements: * Object-access permission to this partition. Parameters: feature_name (:term:`string`): The name of the feature. Returns: bool: `True` if the feature is enabled, or `False` if the feature is disabled (but available). Raises: :exc:`ValueError`: Features are not supported on the HMC. :exc:`ValueError`: The specified feature is not available for the CPC. :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ feature_list = self.prop('available-features-list', None) if feature_list is None: raise ValueError("Firmware features are not supported on CPC %s" % self.manager.cpc.name) for feature in feature_list: if feature['name'] == feature_name: break else: raise ValueError("Firmware feature %s is not available on CPC %s" % (feature_name, self.manager.cpc.name)) return feature['state']
[ "def", "feature_enabled", "(", "self", ",", "feature_name", ")", ":", "feature_list", "=", "self", ".", "prop", "(", "'available-features-list'", ",", "None", ")", "if", "feature_list", "is", "None", ":", "raise", "ValueError", "(", "\"Firmware features are not su...
34.622222
22.8
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100, learning_rate=0.01): """ Rprop optimizer. See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf. """ for param, grad in zip(params, gradients): grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad') step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step') test = grad * grad_tm1 same = T.gt(test, 0) diff = T.lt(test, 0) step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * ( T.eq(test, 0) + same * rprop_increase + diff * rprop_decrease))) grad = grad - diff * grad yield param, param - T.sgn(grad) * step yield grad_tm1, grad yield step_tm1, step
[ "def", "rprop_core", "(", "params", ",", "gradients", ",", "rprop_increase", "=", "1.01", ",", "rprop_decrease", "=", "0.99", ",", "rprop_min_step", "=", "0", ",", "rprop_max_step", "=", "100", ",", "learning_rate", "=", "0.01", ")", ":", "for", "param", "...
43.428571
21.047619
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]: """ Gets the maximum padding lengths from all ``Instances`` in this batch. Each ``Instance`` has multiple ``Fields``, and each ``Field`` could have multiple things that need padding. We look at all fields in all instances, and find the max values for each (field_name, padding_key) pair, returning them in a dictionary. This can then be used to convert this batch into arrays of consistent length, or to set model parameters, etc. """ padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict) all_instance_lengths: List[Dict[str, Dict[str, int]]] = [instance.get_padding_lengths() for instance in self.instances] if not all_instance_lengths: return {**padding_lengths} all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list) for instance_lengths in all_instance_lengths: for field_name, instance_field_lengths in instance_lengths.items(): all_field_lengths[field_name].append(instance_field_lengths) for field_name, field_lengths in all_field_lengths.items(): for padding_key in field_lengths[0].keys(): max_value = max(x[padding_key] if padding_key in x else 0 for x in field_lengths) padding_lengths[field_name][padding_key] = max_value return {**padding_lengths}
[ "def", "get_padding_lengths", "(", "self", ")", "->", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "int", "]", "]", ":", "padding_lengths", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "int", "]", "]", "=", "defaultdict", "(", "dict"...
62.208333
29.791667
def calculate_fee(self, input_values): ''' Tx, list(int) -> int ''' total_in = sum(input_values) total_out = sum([utils.le2i(tx_out.value) for tx_out in self.tx_outs]) for js in self.tx_joinsplits: total_in += utils.le2i(js.vpub_new) total_out += utils.le2i(js.vpub_old) return total_in - total_out
[ "def", "calculate_fee", "(", "self", ",", "input_values", ")", ":", "total_in", "=", "sum", "(", "input_values", ")", "total_out", "=", "sum", "(", "[", "utils", ".", "le2i", "(", "tx_out", ".", "value", ")", "for", "tx_out", "in", "self", ".", "tx_out...
36.9
13.7
def hash_shooter(video_path): """Compute a hash using Shooter's algorithm :param string video_path: path of the video :return: the hash :rtype: string """ filesize = os.path.getsize(video_path) readsize = 4096 if os.path.getsize(video_path) < readsize * 2: return None offsets = (readsize, filesize // 3 * 2, filesize // 3, filesize - readsize * 2) filehash = [] with open(video_path, 'rb') as f: for offset in offsets: f.seek(offset) filehash.append(hashlib.md5(f.read(readsize)).hexdigest()) return ';'.join(filehash)
[ "def", "hash_shooter", "(", "video_path", ")", ":", "filesize", "=", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "readsize", "=", "4096", "if", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "<", "readsize", "*", "2", ":", ...
31.105263
17.210526
def eventFilter(self, watchedObject, event): """ Deletes an item from an editable combobox when the delete or backspace key is pressed in the list of items, or when ctrl-delete or ctrl-back space is pressed in the line-edit. When the combobox is not editable the filter does nothing. """ if self.comboBox.isEditable() and event.type() == QtCore.QEvent.KeyPress: key = event.key() if key in (Qt.Key_Delete, Qt.Key_Backspace): if (watchedObject == self._comboboxListView or (watchedObject == self.comboBox and event.modifiers() == Qt.ControlModifier)): index = self._comboboxListView.currentIndex() if index.isValid(): row = index.row() logger.debug("Removing item {} from the combobox: {}" .format(row, self._comboboxListView.model().data(index))) self.cti.removeValueByIndex(row) self.comboBox.removeItem(row) return True # Calling parent event filter, which may filter out other events. return super(ChoiceCtiEditor, self).eventFilter(watchedObject, event)
[ "def", "eventFilter", "(", "self", ",", "watchedObject", ",", "event", ")", ":", "if", "self", ".", "comboBox", ".", "isEditable", "(", ")", "and", "event", ".", "type", "(", ")", "==", "QtCore", ".", "QEvent", ".", "KeyPress", ":", "key", "=", "even...
51.72
23
def unwrap(self, encrypted_data): """ Decrypts the data send by the server using the TLS channel negotiated between the client and the server. :param encrypted_data: the byte string of the encrypted data :return: a byte string of the decrypted data """ length = self.tls_connection.bio_write(encrypted_data) data = b'' counter = 0 while True: try: data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break data += data_chunk counter += self.BIO_BUFFER_SIZE if counter > length: break return data
[ "def", "unwrap", "(", "self", ",", "encrypted_data", ")", ":", "length", "=", "self", ".", "tls_connection", ".", "bio_write", "(", "encrypted_data", ")", "data", "=", "b''", "counter", "=", "0", "while", "True", ":", "try", ":", "data_chunk", "=", "self...
29.416667
19.75
def _get_field_comment(field, separator=' - '): """ Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'})) 'my_title - my_desc' >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None})) 'my_title' >>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'})) 'my_description' >>> _get_field_comment(tableschema.Field({})) '' """ title = field.descriptor.get('title') or '' description = field.descriptor.get('description') or '' return _get_comment(description, title, separator)
[ "def", "_get_field_comment", "(", "field", ",", "separator", "=", "' - '", ")", ":", "title", "=", "field", ".", "descriptor", ".", "get", "(", "'title'", ")", "or", "''", "description", "=", "field", ".", "descriptor", ".", "get", "(", "'description'", ...
39.3
24.9
def GetAttribute(self, name): """Provides the value of the attribute with the specified qualified name. """ ret = libxml2mod.xmlTextReaderGetAttribute(self._o, name) return ret
[ "def", "GetAttribute", "(", "self", ",", "name", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderGetAttribute", "(", "self", ".", "_o", ",", "name", ")", "return", "ret" ]
41.4
11.6
def calc_im_size(img_height, img_width): '''Calculates shape of data after decoding. Parameters ---------- img_height : int Height of encoded data. img_width : int Width of encoded data. Returns ------- encoded_shape : tuple(int) Gives back 2-tuple with decoded image dimensions. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_deconv_outsize((height, width), 4, 2, 1) return height, width
[ "def", "calc_im_size", "(", "img_height", ",", "img_width", ")", ":", "height", ",", "width", "=", "img_height", ",", "img_width", "for", "_", "in", "range", "(", "5", ")", ":", "height", ",", "width", "=", "_get_deconv_outsize", "(", "(", "height", ",",...
25.238095
20.190476
def company_path(cls, project, company): """Return a fully-qualified company string.""" return google.api_core.path_template.expand( "projects/{project}/companies/{company}", project=project, company=company )
[ "def", "company_path", "(", "cls", ",", "project", ",", "company", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/companies/{company}\"", ",", "project", "=", "project", ",", "company", "=", "compan...
48.2
17.8
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool: """ Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e., <issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or <issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3. :param token: candidate string :param issuer_did: issuer DID to match, if specified :return: whether input token looks like a valid credential definition identifier """ cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '') return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did)
[ "def", "ok_cred_def_id", "(", "token", ":", "str", ",", "issuer_did", ":", "str", "=", "None", ")", "->", "bool", ":", "cd_id_m", "=", "re", ".", "match", "(", "'([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'", ".", "format", "(", "B58", ")", ",", "token", "or",...
52.076923
30.384615
def drop_edges(self) -> None: """Drop all edges in the database.""" t = time.time() self.session.query(Edge).delete() self.session.commit() log.info('dropped all edges in %.2f seconds', time.time() - t)
[ "def", "drop_edges", "(", "self", ")", "->", "None", ":", "t", "=", "time", ".", "time", "(", ")", "self", ".", "session", ".", "query", "(", "Edge", ")", ".", "delete", "(", ")", "self", ".", "session", ".", "commit", "(", ")", "log", ".", "in...
29.625
18.75
def exec_command(self, cmdstr): """ Execute an x3270 command `cmdstr` gets sent directly to the x3270 subprocess on it's stdin. """ if self.is_terminated: raise TerminatedError("this TerminalClient instance has been terminated") log.debug("sending command: %s", cmdstr) c = Command(self.app, cmdstr) start = time.time() c.execute() elapsed = time.time() - start log.debug("elapsed execution: {0}".format(elapsed)) self.status = Status(c.status_line) return c
[ "def", "exec_command", "(", "self", ",", "cmdstr", ")", ":", "if", "self", ".", "is_terminated", ":", "raise", "TerminatedError", "(", "\"this TerminalClient instance has been terminated\"", ")", "log", ".", "debug", "(", "\"sending command: %s\"", ",", "cmdstr", ")...
31.555556
17.777778
def _rpt_unused_sections(self, prt): """Report unused sections.""" sections_unused = set(self.sections_seen).difference(self.section2goids.keys()) for sec in sections_unused: prt.write(" UNUSED SECTION: {SEC}\n".format(SEC=sec))
[ "def", "_rpt_unused_sections", "(", "self", ",", "prt", ")", ":", "sections_unused", "=", "set", "(", "self", ".", "sections_seen", ")", ".", "difference", "(", "self", ".", "section2goids", ".", "keys", "(", ")", ")", "for", "sec", "in", "sections_unused"...
52.2
16.4
def _filter_update_security_group_rule(rule): '''Only two fields are allowed for modification: external_service and external_service_id ''' allowed = ['external_service', 'external_service_id'] filtered = {} for k, val in rule.iteritems(): if k in allowed: if isinstance(val, basestring) and \ len(val) <= GROUP_NAME_MAX_LENGTH: filtered[k] = val return filtered
[ "def", "_filter_update_security_group_rule", "(", "rule", ")", ":", "allowed", "=", "[", "'external_service'", ",", "'external_service_id'", "]", "filtered", "=", "{", "}", "for", "k", ",", "val", "in", "rule", ".", "iteritems", "(", ")", ":", "if", "k", "...
33.384615
15.692308
def _issubclass_Union(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): """Helper for _issubclass, a.k.a pytypes.issubtype. """ if not follow_fwd_refs: return _issubclass_Union_rec(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) try: # try to succeed fast, before we go the expensive way involving recursion checks return _issubclass_Union_rec(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, False, _recursion_check) except pytypes.ForwardRefError: return _issubclass_Union_rec(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check)
[ "def", "_issubclass_Union", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "if", "not", "follow_fwd_refs", ":", "return", "_issubclass_Union_rec"...
59.714286
28.142857
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred: """ Get PG Sequence Generator A PostGreSQL sequence generator returns a chunk of IDs for the given declarative. :return: A generator that will provide the IDs :rtype: an iterator, yielding the numbers to assign """ return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
[ "def", "prefetchDeclarativeIds", "(", "self", ",", "Declarative", ",", "count", ")", "->", "Deferred", ":", "return", "self", ".", "_dbConn", ".", "prefetchDeclarativeIds", "(", "Declarative", "=", "Declarative", ",", "count", "=", "count", ")" ]
37.272727
25.636364
def basescript(line): ''' >>> import pprint >>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}' >>> output_line1 = basescript(input_line) >>> pprint.pprint(output_line1) {'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py', u'fn': u'start', u'ln': 58, u'name': u'basescript.basescript'}, u'event': u'exited via keyboard interrupt', u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', u'level': u'warning', u'timestamp': u'2018-02-07T06:37:00.297610Z', u'type': u'log'}, 'event': u'exited via keyboard interrupt', 'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65', 'level': u'warning', 'timestamp': u'2018-02-07T06:37:00.297610Z', 'type': u'log'} ''' log = json.loads(line) return dict( timestamp=log['timestamp'], data=log, id=log['id'], type=log['type'], level=log['level'], event=log['event'] )
[ "def", "basescript", "(", "line", ")", ":", "log", "=", "json", ".", "loads", "(", "line", ")", "return", "dict", "(", "timestamp", "=", "log", "[", "'timestamp'", "]", ",", "data", "=", "log", ",", "id", "=", "log", "[", "'id'", "]", ",", "type"...
42.375
28.125
def make_session(config=None, num_cpu=None, make_default=False, graph=None): """Returns a session that will use <num_cpu> CPU's only""" if num_cpu is None: num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count())) if config is None: config = tf.ConfigProto( allow_soft_placement=True, inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu) config.gpu_options.allow_growth = True if make_default: return tf.InteractiveSession(config=config, graph=graph) else: return tf.Session(config=config, graph=graph)
[ "def", "make_session", "(", "config", "=", "None", ",", "num_cpu", "=", "None", ",", "make_default", "=", "False", ",", "graph", "=", "None", ")", ":", "if", "num_cpu", "is", "None", ":", "num_cpu", "=", "int", "(", "os", ".", "getenv", "(", "'RCALL_...
41.4
18.066667
def stat(path, hours, offset=None): """ Has the metric file at ``path`` been modified since ``hours`` ago? .. note:: ``offset`` is only for compatibility with ``data()`` and is ignored. """ return os.stat(path).st_mtime < (time.time() - _to_sec(hours))
[ "def", "stat", "(", "path", ",", "hours", ",", "offset", "=", "None", ")", ":", "return", "os", ".", "stat", "(", "path", ")", ".", "st_mtime", "<", "(", "time", ".", "time", "(", ")", "-", "_to_sec", "(", "hours", ")", ")" ]
34.25
20.5
def synchronize(self, pid, vendorSpecific=None): """See Also: synchronizeResponse() Args: pid: vendorSpecific: Returns: """ response = self.synchronizeResponse(pid, vendorSpecific) return self._read_boolean_response(response)
[ "def", "synchronize", "(", "self", ",", "pid", ",", "vendorSpecific", "=", "None", ")", ":", "response", "=", "self", ".", "synchronizeResponse", "(", "pid", ",", "vendorSpecific", ")", "return", "self", ".", "_read_boolean_response", "(", "response", ")" ]
32.5
18.5
def _parse(self, raw, next_cls): ''' Parse a raw bytes object and construct the list of packet header objects (and possible remaining bytes) that are part of this packet. ''' if next_cls is None: from switchyard.lib.packet import Ethernet next_cls = Ethernet self._headers = [] while issubclass(next_cls, PacketHeaderBase): packet_header_obj = next_cls() raw = packet_header_obj.from_bytes(raw) self.add_header(packet_header_obj) next_cls = packet_header_obj.next_header_class() if next_cls is None: break if raw: self.add_header(RawPacketContents(raw))
[ "def", "_parse", "(", "self", ",", "raw", ",", "next_cls", ")", ":", "if", "next_cls", "is", "None", ":", "from", "switchyard", ".", "lib", ".", "packet", "import", "Ethernet", "next_cls", "=", "Ethernet", "self", ".", "_headers", "=", "[", "]", "while...
37.473684
17.789474
def metadata_and_language_from_option_line(self, line): """Parse code options on the given line. When a start of a code cell is found, self.metadata is set to a dictionary.""" if self.start_code_re.match(line): self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:]) elif self.alternative_start_code_re.match(line): self.metadata = {}
[ "def", "metadata_and_language_from_option_line", "(", "self", ",", "line", ")", ":", "if", "self", ".", "start_code_re", ".", "match", "(", "line", ")", ":", "self", ".", "language", ",", "self", ".", "metadata", "=", "self", ".", "options_to_metadata", "(",...
58.857143
14
def add_insertions(self, skip=10, window=1, test=False): '''Adds a random base within window bases around every skip bases. e.g. skip=10, window=1 means a random base added somwhere in theintervals [9,11], [19,21] ... ''' assert 2 * window < skip new_seq = list(self.seq) for i in range(len(self) - skip, 0, -skip): pos = random.randrange(i - window, i + window + 1) base = random.choice(['A', 'C', 'G', 'T']) if test: base = 'N' new_seq.insert(pos, base) self.seq = ''.join(new_seq)
[ "def", "add_insertions", "(", "self", ",", "skip", "=", "10", ",", "window", "=", "1", ",", "test", "=", "False", ")", ":", "assert", "2", "*", "window", "<", "skip", "new_seq", "=", "list", "(", "self", ".", "seq", ")", "for", "i", "in", "range"...
48.083333
24.416667
def properties_operator(cls, name): """Wraps a container operator to ensure container class is maintained""" def wrapper(self, *args, **kwargs): """Perform operation and cast to container class""" output = getattr(super(cls, self), name)(*args, **kwargs) return cls(output) wrapped = getattr(cls, name) wrapper.__name__ = wrapped.__name__ wrapper.__doc__ = wrapped.__doc__ return wrapper
[ "def", "properties_operator", "(", "cls", ",", "name", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Perform operation and cast to container class\"\"\"", "output", "=", "getattr", "(", "super", "(", "cls"...
35.5
13.25
def bulk_attachments(self, article, attachments): """ This function implements associating attachments to an article after article creation (for unassociated attachments). :param article: Article id or :class:`Article` object :param attachments: :class:`ArticleAttachment` object, or list of :class:`ArticleAttachment` objects, up to 20 supported. `Zendesk documentation. <https://developer.zendesk.com/rest_api/docs/help_center/articles#associate-attachments-in-bulk-to-article>`__ :return: """ return HelpdeskAttachmentRequest(self).post(self.endpoint.bulk_attachments, article=article, attachments=attachments)
[ "def", "bulk_attachments", "(", "self", ",", "article", ",", "attachments", ")", ":", "return", "HelpdeskAttachmentRequest", "(", "self", ")", ".", "post", "(", "self", ".", "endpoint", ".", "bulk_attachments", ",", "article", "=", "article", ",", "attachments...
56.461538
31.692308
def plugins(cls, enabled=True): """ Returns the plugins for the given class. :param enabled | <bool> || None :return [<Plugin>, ..] """ cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}).values() if enabled is None: return plugs return filter(lambda x: x.isEnabled() == enabled, plugs)
[ "def", "plugins", "(", "cls", ",", "enabled", "=", "True", ")", ":", "cls", ".", "loadPlugins", "(", ")", "plugs", "=", "getattr", "(", "cls", ",", "'_%s__plugins'", "%", "cls", ".", "__name__", ",", "{", "}", ")", ".", "values", "(", ")", "if", ...
29.071429
16.5
def check_args(self): """ Validates that a valid number of arguments were received and that all arguments were recognised. :return: True or False. """ parser = get_base_arguments(get_parser()) parser = get_tc_arguments(parser) # Disable "Do not use len(SEQ) as condition value" # pylint: disable=C1801 if len(sys.argv) < 2: self.logger.error("Icetea called with no arguments! ") parser.print_help() return False elif not self.args.ignore_invalid_params and self.unknown: self.logger.error("Unknown parameters received, exiting. " "To ignore this add --ignore_invalid_params flag.") self.logger.error("Following parameters were unknown: {}".format(self.unknown)) parser.print_help() return False return True
[ "def", "check_args", "(", "self", ")", ":", "parser", "=", "get_base_arguments", "(", "get_parser", "(", ")", ")", "parser", "=", "get_tc_arguments", "(", "parser", ")", "# Disable \"Do not use len(SEQ) as condition value\"", "# pylint: disable=C1801", "if", "len", "(...
40.727273
19.727273
def to_internal(self, attribute_profile, external_dict): """ Converts the external data from "type" to internal :type attribute_profile: str :type external_dict: dict[str, str] :rtype: dict[str, str] :param attribute_profile: From which external type to convert (ex: oidc, saml, ...) :param external_dict: Attributes in the external format :return: Attributes in the internal format """ internal_dict = {} for internal_attribute_name, mapping in self.from_internal_attributes.items(): if attribute_profile not in mapping: logger.debug("no attribute mapping found for internal attribute '%s' the attribute profile '%s'" % ( internal_attribute_name, attribute_profile)) # skip this internal attribute if we have no mapping in the specified profile continue external_attribute_name = mapping[attribute_profile] attribute_values = self._collate_attribute_values_by_priority_order(external_attribute_name, external_dict) if attribute_values: # Only insert key if it has some values logger.debug("backend attribute '%s' mapped to %s" % (external_attribute_name, internal_attribute_name)) internal_dict[internal_attribute_name] = attribute_values else: logger.debug("skipped backend attribute '%s': no value found", external_attribute_name) internal_dict = self._handle_template_attributes(attribute_profile, internal_dict) return internal_dict
[ "def", "to_internal", "(", "self", ",", "attribute_profile", ",", "external_dict", ")", ":", "internal_dict", "=", "{", "}", "for", "internal_attribute_name", ",", "mapping", "in", "self", ".", "from_internal_attributes", ".", "items", "(", ")", ":", "if", "at...
52.454545
31.121212
def get_minimum_needs(self): """Get the minimum needed information about the minimum needs. That is the resource and the amount. :returns: minimum needs :rtype: OrderedDict """ minimum_needs = OrderedDict() for resource in self.minimum_needs['resources']: if resource['Unit abbreviation']: name = '%s [%s]' % ( tr(resource['Resource name']), resource['Unit abbreviation'] ) else: name = tr(resource['Resource name']) amount = resource['Default'] minimum_needs[name] = amount return OrderedDict(minimum_needs)
[ "def", "get_minimum_needs", "(", "self", ")", ":", "minimum_needs", "=", "OrderedDict", "(", ")", "for", "resource", "in", "self", ".", "minimum_needs", "[", "'resources'", "]", ":", "if", "resource", "[", "'Unit abbreviation'", "]", ":", "name", "=", "'%s [...
34.55
11.2
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None): """Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process. """ inputs = [] skip_inputs = set([]) for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]: inputs.append(orig_input) # Only add description and other information for non-record inputs, otherwise batched with records if not any(is_cwl_record(x) for x in inputs): inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs] nested_inputs = [] if step.parallel in ["single-merge", "batch-merge"]: if parallel_ids: inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs] nested_inputs = parallel_ids[:] parallel_ids = [] elif step.parallel in ["multi-combined"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] inputs = [_nest_variable(x) for x in inputs] elif step.parallel in ["multi-batch"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] # If we're batching,with mixed records/inputs avoid double nesting records inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs] # avoid inputs/outputs with the same name outputs = [_get_string_vid(x["id"]) for x in step.outputs] final_inputs = [] for input in inputs: input["wf_duplicate"] = get_base_id(input["id"]) in outputs final_inputs.append(input) return inputs, parallel_ids, nested_inputs
[ "def", "_get_step_inputs", "(", "step", ",", "file_vs", ",", "std_vs", ",", "parallel_ids", ",", "wf", "=", "None", ")", ":", "inputs", "=", "[", "]", "skip_inputs", "=", "set", "(", "[", "]", ")", "for", "orig_input", "in", "[", "_get_variable", "(", ...
48.666667
19.361111
def list_all_stripe_gateways(cls, **kwargs): """List StripeGateways Return a list of StripeGateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_stripe_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StripeGateway] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_stripe_gateways_with_http_info(**kwargs) else: (data) = cls._list_all_stripe_gateways_with_http_info(**kwargs) return data
[ "def", "list_all_stripe_gateways", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_stripe_gateways_with_http_info",...
38.173913
15.086957
def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ","...
37.142857
10.857143
def update_bin(self, bin_form): """Updates an existing bin. arg: bin_form (osid.resource.BinForm): the form containing the elements to be updated raise: IllegalState - ``bin_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``bin_id`` or ``bin_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``bin_form`` did not originate from ``get_bin_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.update_bin_template if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=bin_form) collection = JSONClientValidated('resource', collection='Bin', runtime=self._runtime) if not isinstance(bin_form, ABCBinForm): raise errors.InvalidArgument('argument type is not an BinForm') if not bin_form.is_for_update(): raise errors.InvalidArgument('the BinForm is for update only, not create') try: if self._forms[bin_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('bin_form already used in an update transaction') except KeyError: raise errors.Unsupported('bin_form did not originate from this session') if not bin_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(bin_form._my_map) # save is deprecated - change to replace_one self._forms[bin_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned return objects.Bin(osid_object_map=bin_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_bin", "(", "self", ",", "bin_form", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.update_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "updat...
52.575
25.025
def download(directory, master_token=None, member=None, access_token=None, source=None, project_data=False, max_size='128m', verbose=False, debug=False, memberlist=None, excludelist=None, id_filename=False): """ Download data from project members to the target directory. Unless this is a member-specific download, directories will be created for each project member ID. Also, unless a source is specified, all shared sources are downloaded and data is sorted into subdirectories according to source. Projects can optionally return data to Open Humans member accounts. If project_data is True (or the "--project-data" flag is used), this data (the project's own data files, instead of data from other sources) will be downloaded for each member. :param directory: This field is the target directory to download data. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param max_size: This field is the maximum file size. It's default value is 128m. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. """ set_log_level(debug, verbose) if (memberlist or excludelist) and (member or access_token): raise UsageError('Please do not provide a memberlist or excludelist ' 'when retrieving data for a single member.') memberlist = read_id_list(memberlist) excludelist = read_id_list(excludelist) if not (master_token or access_token) or (master_token and access_token): raise UsageError('Please specify either a master access token (-T), ' 'or an OAuth2 user access token (-t).') if (source and project_data): raise UsageError("It doesn't make sense to use both 'source' and" "'project-data' options!") if master_token: project = OHProject(master_access_token=master_token) if member: if project_data: project.download_member_project_data( member_data=project.project_data[member], target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: project.download_member_shared( member_data=project.project_data[member], target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename) else: project.download_all(target_dir=directory, source=source, max_size=max_size, memberlist=memberlist, excludelist=excludelist, project_data=project_data, id_filename=id_filename) else: member_data = exchange_oauth2_member(access_token, all_files=True) if project_data: OHProject.download_member_project_data(member_data=member_data, target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: OHProject.download_member_shared(member_data=member_data, target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename)
[ "def", "download", "(", "directory", ",", "master_token", "=", "None", ",", "member", "=", "None", ",", "access_token", "=", "None", ",", "source", "=", "None", ",", "project_data", "=", "False", ",", "max_size", "=", "'128m'", ",", "verbose", "=", "Fals...
49.853933
22.550562
def make_parallel_helper(parallel_arg, **kwargs): """Return a :class:`ParallelHelper` object that can be used for easy parallelization of computations. *parallel_arg* is an object that lets the caller easily specify the kind of parallelization they are interested in. Allowed values are: False Serial processing only. True Parallel processing using all available cores. 1 Equivalent to ``False``. Other positive integer Parallel processing using the specified number of cores. x, 0 < x < 1 Parallel processing using about ``x * N`` cores, where N is the total number of cores in the system. Note that the meanings of ``0.99`` and ``1`` as arguments are very different. :class:`ParallelHelper` instance Returns the instance. The ``**kwargs`` are passed on to the appropriate :class:`ParallelHelper` constructor, if the caller wants to do something tricky. Expected usage is:: from pwkit.parallel import make_parallel_helper def sub_operation(arg): ... do some computation ... return result def my_parallelizable_function(arg1, arg2, parallel=True): phelp = make_parallel_helper(parallel) with phelp.get_map() as map: op_results = map(sub_operation, args) ... reduce "op_results" in some way ... return final_result This means that ``my_parallelizable_function`` doesn't have to worry about all of the various fancy things the caller might want to do in terms of special parallel magic. Note that ``sub_operation`` above must be defined in a stand-alone fashion because of the way Python's :mod:`multiprocessing` module works. This can be worked around somewhat with the special :meth:`ParallelHelper.get_ppmap` variant. This returns a "partially-Pickling" map operation --- with a different calling signature --- that allows un-Pickle-able values to be used. See the documentation for :func:`serial_ppmap` for usage information. """ if parallel_arg is True: # note: (True == 1) is True return MultiprocessingPoolHelper(**kwargs) if parallel_arg is False or parallel_arg == 1: return SerialHelper(**kwargs) if parallel_arg > 0 and parallel_arg < 1: from multiprocessing import cpu_count n = int(round(parallel_arg * cpu_count())) return MultiprocessingPoolHelper(processes=n, **kwargs) if isinstance(parallel_arg, ParallelHelper): return parallel_arg if isinstance(parallel_arg, six.integer_types): return MultiprocessingPoolHelper(processes=parallel_arg, **kwargs) raise ValueError('don\'t understand make_parallel_helper() argument %r' % parallel_arg)
[ "def", "make_parallel_helper", "(", "parallel_arg", ",", "*", "*", "kwargs", ")", ":", "if", "parallel_arg", "is", "True", ":", "# note: (True == 1) is True", "return", "MultiprocessingPoolHelper", "(", "*", "*", "kwargs", ")", "if", "parallel_arg", "is", "False",...
37.69863
22.835616
def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
[ "def", "array_to_hdf5", "(", "a", ",", "parent", ",", "name", ",", "*", "*", "kwargs", ")", ":", "import", "h5py", "h5f", "=", "None", "if", "isinstance", "(", "parent", ",", "str", ")", ":", "h5f", "=", "h5py", ".", "File", "(", "parent", ",", "...
22.682927
22.853659
def _column_resized(self, col, old_width, new_width): """Update the column width.""" self.dataTable.setColumnWidth(col, new_width) self._update_layout()
[ "def", "_column_resized", "(", "self", ",", "col", ",", "old_width", ",", "new_width", ")", ":", "self", ".", "dataTable", ".", "setColumnWidth", "(", "col", ",", "new_width", ")", "self", ".", "_update_layout", "(", ")" ]
44
9.75
def read_dictionary_file(dictionary_path): """Return all words in dictionary file as set.""" try: return _user_dictionary_cache[dictionary_path] except KeyError: if dictionary_path and os.path.exists(dictionary_path): with open(dictionary_path, "rt") as dict_f: words = set(re.findall(r"(\w[\w']*\w|\w)", " ".join(dict_f.read().splitlines()))) return words return set()
[ "def", "read_dictionary_file", "(", "dictionary_path", ")", ":", "try", ":", "return", "_user_dictionary_cache", "[", "dictionary_path", "]", "except", "KeyError", ":", "if", "dictionary_path", "and", "os", ".", "path", ".", "exists", "(", "dictionary_path", ")", ...
39.833333
19.416667
def _find_project_config_file(user_config_file): """Find path to project-wide config file Search from current working directory, and traverse path up to directory with .versionner.rc file or root directory :param user_config_file: instance with user-wide config path :type: pathlib.Path :rtype: pathlib.Path """ proj_cfg_dir = pathlib.Path('.').absolute() proj_cfg_file = None root = pathlib.Path('/') while proj_cfg_dir != root: proj_cfg_file = proj_cfg_dir / defaults.RC_FILENAME if proj_cfg_file.exists(): break proj_cfg_file = None # pylint: disable=redefined-variable-type proj_cfg_dir = proj_cfg_dir.parent if proj_cfg_file and proj_cfg_file != user_config_file: return proj_cfg_file
[ "def", "_find_project_config_file", "(", "user_config_file", ")", ":", "proj_cfg_dir", "=", "pathlib", ".", "Path", "(", "'.'", ")", ".", "absolute", "(", ")", "proj_cfg_file", "=", "None", "root", "=", "pathlib", ".", "Path", "(", "'/'", ")", "while", "pr...
33.826087
16.217391
def realpath(self, spec, key): """ Resolve and update the path key in the spec with its realpath, based on the working directory. """ if key not in spec: # do nothing for now return if not spec[key]: logger.warning( "cannot resolve realpath of '%s' as it is not defined", key) return check = realpath(join(spec.get(WORKING_DIR, ''), spec[key])) if check != spec[key]: spec[key] = check logger.warning( "realpath of '%s' resolved to '%s', spec is updated", key, check ) return check
[ "def", "realpath", "(", "self", ",", "spec", ",", "key", ")", ":", "if", "key", "not", "in", "spec", ":", "# do nothing for now", "return", "if", "not", "spec", "[", "key", "]", ":", "logger", ".", "warning", "(", "\"cannot resolve realpath of '%s' as it is ...
28.956522
19.217391
def net_transform(transform_func, block=None, **kwargs): """ Maps nets to new sets of nets according to a custom function :param transform_func: Function signature: func(orig_net (logicnet)) -> keep_orig_net (bool) :return: """ block = working_block(block) with set_working_block(block, True): for net in block.logic.copy(): keep_orig_net = transform_func(net, **kwargs) if not keep_orig_net: block.logic.remove(net)
[ "def", "net_transform", "(", "transform_func", ",", "block", "=", "None", ",", "*", "*", "kwargs", ")", ":", "block", "=", "working_block", "(", "block", ")", "with", "set_working_block", "(", "block", ",", "True", ")", ":", "for", "net", "in", "block", ...
34.857143
13.857143
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[ "def", "separate_reach_logs", "(", "log_str", ")", ":", "log_lines", "=", "log_str", ".", "splitlines", "(", ")", "reach_logs", "=", "[", "]", "reach_lines", "=", "[", "]", "adding_reach_lines", "=", "False", "for", "l", "in", "log_lines", "[", ":", "]", ...
40.157895
12.210526
def database_caller_creator(self, number_of_rows, username, password, host, port, name=None, custom=None): '''creates a postgresql db returns the related connection object which will be later used to spawn the cursor ''' cursor = None conn = None if name: dbname = name else: dbname = 'postgresql_' + str_generator(self).lower() try: # createdb conn = psycopg2.connect( user=username, password=password, host=host, port=port) conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) cur = conn.cursor() cur.execute('CREATE DATABASE %s;' % dbname) cur.close() conn.close() # reconnect to the new database conn = psycopg2.connect(user=username, password=password, host=host, port=port, database=dbname) cursor = conn.cursor() logger.warning('Database created and opened succesfully: %s' % dbname, extra=d) except Exception as err: logger.error(err, extra=d) raise if custom: self.custom_db_creator(number_of_rows, cursor, conn, custom) cursor.close() conn.close() sys.exit(0) return cursor, conn
[ "def", "database_caller_creator", "(", "self", ",", "number_of_rows", ",", "username", ",", "password", ",", "host", ",", "port", ",", "name", "=", "None", ",", "custom", "=", "None", ")", ":", "cursor", "=", "None", "conn", "=", "None", "if", "name", ...
36
21.621622
def get_last_response_xml(self, pretty_print_if_possible=False): """ Retrieves the raw XML (decrypted) of the last SAML response, or the last Logout Response generated or processed :returns: SAML response XML :rtype: string|None """ response = None if self.__last_response is not None: if isinstance(self.__last_response, basestring): response = self.__last_response else: response = tostring(self.__last_response, pretty_print=pretty_print_if_possible) return response
[ "def", "get_last_response_xml", "(", "self", ",", "pretty_print_if_possible", "=", "False", ")", ":", "response", "=", "None", "if", "self", ".", "__last_response", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "__last_response", ",", "basest...
38.933333
18.133333
def close(self, autocommit=True): """Close the consumer, waiting indefinitely for any needed cleanup. Keyword Arguments: autocommit (bool): If auto-commit is configured for this consumer, this optional flag causes the consumer to attempt to commit any pending consumed offsets prior to close. Default: True """ if self._closed: return log.debug("Closing the KafkaConsumer.") self._closed = True self._coordinator.close(autocommit=autocommit) self._metrics.close() self._client.close() try: self.config['key_deserializer'].close() except AttributeError: pass try: self.config['value_deserializer'].close() except AttributeError: pass log.debug("The KafkaConsumer has closed.")
[ "def", "close", "(", "self", ",", "autocommit", "=", "True", ")", ":", "if", "self", ".", "_closed", ":", "return", "log", ".", "debug", "(", "\"Closing the KafkaConsumer.\"", ")", "self", ".", "_closed", "=", "True", "self", ".", "_coordinator", ".", "c...
36.208333
17.541667
def anncluster(c, clus_obj, db, type_ann, feature_id="name"): """intersect transcription position with annotation files""" id_sa, id_ea, id_id, id_idl, id_sta = 1, 2, 3, 4, 5 if type_ann == "bed": id_sb = 7 id_eb = 8 id_stb = 11 id_tag = 9 ida = 0 clus_id = clus_obj.clus loci_id = clus_obj.loci db = os.path.splitext(db)[0] logger.debug("Type:%s\n" % type_ann) for cols in c.features(): if type_ann == "gtf": cb, sb, eb, stb, db, tag = read_gtf_line(cols[6:], feature_id) else: sb = int(cols[id_sb]) eb = int(cols[id_eb]) stb = cols[id_stb] tag = cols[id_tag] id = int(cols[id_id]) idl = int(cols[id_idl]) if (id in clus_id): clus = clus_id[id] sa = int(cols[id_sa]) ea = int(cols[id_ea]) ida += 1 lento5, lento3, strd = _position_in_feature([sa, ea, cols[id_sta]], [sb, eb, stb]) if db in loci_id[idl].db_ann: ann = annotation(db, tag, strd, lento5, lento3) tdb = loci_id[idl].db_ann[db] tdb.add_db_ann(ida, ann) loci_id[idl].add_db(db, tdb) else: ann = annotation(db, tag, strd, lento5, lento3) tdb = dbannotation(1) tdb.add_db_ann(ida, ann) loci_id[idl].add_db(db, tdb) clus_id[id] = clus clus_obj.clus = clus_id clus_obj.loci = loci_id return clus_obj
[ "def", "anncluster", "(", "c", ",", "clus_obj", ",", "db", ",", "type_ann", ",", "feature_id", "=", "\"name\"", ")", ":", "id_sa", ",", "id_ea", ",", "id_id", ",", "id_idl", ",", "id_sta", "=", "1", ",", "2", ",", "3", ",", "4", ",", "5", "if", ...
35.302326
13.813953
def render(dson_input, saltenv='base', sls='', **kwargs): ''' Accepts DSON data as a string or as a file object and runs it through the JSON parser. :rtype: A Python data structure ''' if not isinstance(dson_input, six.string_types): dson_input = dson_input.read() log.debug('DSON input = %s', dson_input) if dson_input.startswith('#!'): dson_input = dson_input[(dson_input.find('\n') + 1):] if not dson_input.strip(): return {} return dson.loads(dson_input)
[ "def", "render", "(", "dson_input", ",", "saltenv", "=", "'base'", ",", "sls", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "dson_input", ",", "six", ".", "string_types", ")", ":", "dson_input", "=", "dson_input", ".", ...
29.941176
20.764706
def _initialize_progressbar(self): """ Initialize the progressbar. :return: None """ self._progressbar = progressbar.ProgressBar(widgets=Analysis._PROGRESS_WIDGETS, maxval=10000 * 100).start()
[ "def", "_initialize_progressbar", "(", "self", ")", ":", "self", ".", "_progressbar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "Analysis", ".", "_PROGRESS_WIDGETS", ",", "maxval", "=", "10000", "*", "100", ")", ".", "start", "(", ")" ]
32.428571
20.714286
def extract_alphabet(alphabet, inputdata, fixed_start = False): """ Receives a sequence and an alphabet, returns a list of PositionTokens with all of the parts of the sequence that are a subset of the alphabet """ if not inputdata: return [] base_alphabet = alphabet.alphabet lexer = lexer_factory(alphabet, base_alphabet) totallen = len(inputdata) maxl = totallen minl = 1 if fixed_start: max_start = 1 else: max_start = totallen result = [] for i in range(max_start): for j in range(i+minl, min(i+maxl, totallen) + 1): try: lexed = lexer(inputdata[i:j]) if lexed and len(lexed) == 1: result.append((i,j, inputdata[i:j], lexed[0].gd)) elif lexed: raise Exception except: continue result = filter_subsets(result) return [PositionToken(content, gd, left, right) for (left, right, content, gd) in result]
[ "def", "extract_alphabet", "(", "alphabet", ",", "inputdata", ",", "fixed_start", "=", "False", ")", ":", "if", "not", "inputdata", ":", "return", "[", "]", "base_alphabet", "=", "alphabet", ".", "alphabet", "lexer", "=", "lexer_factory", "(", "alphabet", ",...
32.354839
17.387097
def get_options(option_type, from_options): """Extract options for specified option type from all options :param option_type: the object of specified type of options :param from_options: all options dictionary :return: the dictionary of options for specified type, each option can be filled by value from all options dictionary or blank in case the option for specified type is not exist in all options dictionary """ _options = dict() for key in option_type.keys: key_with_prefix = f'{option_type.prefix}{key}' if key not in from_options and key_with_prefix not in from_options: _options[key] = '' elif key in from_options: _options[key] = from_options.get(key) else: _options[key] = from_options.get(key_with_prefix) return _options
[ "def", "get_options", "(", "option_type", ",", "from_options", ")", ":", "_options", "=", "dict", "(", ")", "for", "key", "in", "option_type", ".", "keys", ":", "key_with_prefix", "=", "f'{option_type.prefix}{key}'", "if", "key", "not", "in", "from_options", "...
41.55
23.15
def count_single_dots(self): """Count all strokes of this recording that have only a single dot. """ pointlist = self.get_pointlist() single_dots = 0 for stroke in pointlist: if len(stroke) == 1: single_dots += 1 return single_dots
[ "def", "count_single_dots", "(", "self", ")", ":", "pointlist", "=", "self", ".", "get_pointlist", "(", ")", "single_dots", "=", "0", "for", "stroke", "in", "pointlist", ":", "if", "len", "(", "stroke", ")", "==", "1", ":", "single_dots", "+=", "1", "r...
33.222222
7.444444
def Tethering_bind(self, port): """ Function path: Tethering.bind Domain: Tethering Method name: bind Parameters: Required arguments: 'port' (type: integer) -> Port number to bind. No return value. Description: Request browser port binding. """ assert isinstance(port, (int,) ), "Argument 'port' must be of type '['int']'. Received type: '%s'" % type( port) subdom_funcs = self.synchronous_command('Tethering.bind', port=port) return subdom_funcs
[ "def", "Tethering_bind", "(", "self", ",", "port", ")", ":", "assert", "isinstance", "(", "port", ",", "(", "int", ",", ")", ")", ",", "\"Argument 'port' must be of type '['int']'. Received type: '%s'\"", "%", "type", "(", "port", ")", "subdom_funcs", "=", "self...
26.833333
18.944444
def validate_args(args): ''' Apply custom validation and actions based on parsed arguments. Parameters ---------- args : argparse.Namespace Result from ``parse_args`` method of ``argparse.ArgumentParser`` instance. Returns ------- argparse.Namespace Reference to input ``args``, which have been validated/updated. ''' logging.basicConfig(level=getattr(logging, args.log_level.upper())) if getattr(args, 'command', None) == 'install': if args.requirements_file and not args.requirements_file.isfile(): print >> sys.stderr, ('Requirements file not available: {}' .format(args.requirements_file)) raise SystemExit(-1) elif not args.plugin and not args.requirements_file: print >> sys.stderr, ('Requirements file or at least one plugin ' 'must be specified.') raise SystemExit(-2) if hasattr(args, 'server_url'): logger.debug('Using MicroDrop index server: "%s"', args.server_url) args.server_url = SERVER_URL_TEMPLATE % args.server_url if all([args.plugins_directory is None, args.config_file is None]): args.plugins_directory = get_plugins_directory() elif args.plugins_directory is None: args.config_file = args.config_file.realpath() args.plugins_directory = get_plugins_directory(config_path= args.config_file) else: args.plugins_directory = args.plugins_directory.realpath() return args
[ "def", "validate_args", "(", "args", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "getattr", "(", "logging", ",", "args", ".", "log_level", ".", "upper", "(", ")", ")", ")", "if", "getattr", "(", "args", ",", "'command'", ",", "None", ...
40.769231
23.435897
def _erase_display(self, value): """ Erases display. """ if value == 0: # delete end of line self._cursor.movePosition(self._cursor.End, self._cursor.KeepAnchor) elif value == 1: # delete start of line self._cursor.movePosition(self._cursor.Start, self._cursor.KeepAnchor) else: # delete whole line self._cursor.movePosition(self._cursor.Start) self._cursor.movePosition(self._cursor.End, self._cursor.KeepAnchor) self._cursor.removeSelectedText() self._last_cursor_pos = self._cursor.position()
[ "def", "_erase_display", "(", "self", ",", "value", ")", ":", "if", "value", "==", "0", ":", "# delete end of line", "self", ".", "_cursor", ".", "movePosition", "(", "self", ".", "_cursor", ".", "End", ",", "self", ".", "_cursor", ".", "KeepAnchor", ")"...
39.25
16.5
def last_event(self): """ The mouse event immediately prior to this one. This property is None when no mouse buttons are pressed. """ if self.mouse_event.last_event is None: return None ev = self.copy() ev.mouse_event = self.mouse_event.last_event return ev
[ "def", "last_event", "(", "self", ")", ":", "if", "self", ".", "mouse_event", ".", "last_event", "is", "None", ":", "return", "None", "ev", "=", "self", ".", "copy", "(", ")", "ev", ".", "mouse_event", "=", "self", ".", "mouse_event", ".", "last_event"...
35.222222
12.555556
def value_to_jam(value, methods=False): """Makes a token to refer to a Python value inside Jam language code. The token is merely a string that can be passed around in Jam code and eventually passed back. For example, we might want to pass PropertySet instance to a tag function and it might eventually call back to virtual_target.add_suffix_and_prefix, passing the same instance. For values that are classes, we'll also make class methods callable from Jam. Note that this is necessary to make a bit more of existing Jamfiles work. This trick should not be used to much, or else the performance benefits of Python port will be eaten. """ global __value_id r = __python_to_jam.get(value, None) if r: return r exported_name = '###_' + str(__value_id) __value_id = __value_id + 1 __python_to_jam[value] = exported_name __jam_to_python[exported_name] = value if methods and type(value) == types.InstanceType: for field_name in dir(value): field = getattr(value, field_name) if callable(field) and not field_name.startswith("__"): bjam.import_rule("", exported_name + "." + field_name, field) return exported_name
[ "def", "value_to_jam", "(", "value", ",", "methods", "=", "False", ")", ":", "global", "__value_id", "r", "=", "__python_to_jam", ".", "get", "(", "value", ",", "None", ")", "if", "r", ":", "return", "r", "exported_name", "=", "'###_'", "+", "str", "("...
35.882353
22.941176
def gfstep(time): """ Return the time step set by the most recent call to :func:`gfsstp`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html :param time: Ignored ET value. :type time: float :return: Time step to take. :rtype: float """ time = ctypes.c_double(time) step = ctypes.c_double() libspice.gfstep_c(time, ctypes.byref(step)) return step.value
[ "def", "gfstep", "(", "time", ")", ":", "time", "=", "ctypes", ".", "c_double", "(", "time", ")", "step", "=", "ctypes", ".", "c_double", "(", ")", "libspice", ".", "gfstep_c", "(", "time", ",", "ctypes", ".", "byref", "(", "step", ")", ")", "retur...
27.066667
18
def recommendations(self, **kwargs): """ Get a list of recommended movies for a movie. Args: language: (optional) ISO 639-1 code. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('recommendations') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "recommendations", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'recommendations'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", ...
30.875
18.5
def is_standalone(text, start, end): """check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end] """ left = False start -= 1 while start >= 0 and text[start] in spaces_not_newline: start -= 1 if start < 0 or text[start] == '\n': left = True right = re_space.match(text, end) return (start+1, right.end()) if left and right else None
[ "def", "is_standalone", "(", "text", ",", "start", ",", "end", ")", ":", "left", "=", "False", "start", "-=", "1", "while", "start", ">=", "0", "and", "text", "[", "start", "]", "in", "spaces_not_newline", ":", "start", "-=", "1", "if", "start", "<",...
28.555556
18.222222
def _bracket_complete_sig(self, symbol, fullsymbol): """Returns the call signature and docstring for the executable immediately preceding a bracket '(' that was typed.""" if symbol != fullsymbol: #We have a sym%sym%... chain and the completion just needs to #be the signature of the member method. target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol) if symbol in target.executables: child = target.executables[symbol] return self._compile_signature(child.target, child.name) elif symbol in target.members: #We are dealing with a dimension request on an array that #is a member of the type. child = target.members[symbol] return self._bracket_dim_suggest(child) else: return {} else: #We must be dealing with a regular executable or builtin fxn #or a regular variable dimension. iexec = self._bracket_exact_exec(symbol) if iexec is not None: #It is indeed a function we are completing for. return self._compile_signature(iexec, iexec.name) else: #We need to look at local and global variables to find the #variable declaration and dimensionality. ivar = self._bracket_exact_var(symbol) return self._bracket_dim_suggest(ivar)
[ "def", "_bracket_complete_sig", "(", "self", ",", "symbol", ",", "fullsymbol", ")", ":", "if", "symbol", "!=", "fullsymbol", ":", "#We have a sym%sym%... chain and the completion just needs to", "#be the signature of the member method.", "target", ",", "targmod", "=", "self...
49.966667
16.7
def GetNotificationShard(self, queue): """Gets a single shard for a given queue.""" queue_name = str(queue) QueueManager.notification_shard_counters.setdefault(queue_name, 0) QueueManager.notification_shard_counters[queue_name] += 1 notification_shard_index = ( QueueManager.notification_shard_counters[queue_name] % self.num_notification_shards) if notification_shard_index > 0: return queue.Add(str(notification_shard_index)) else: return queue
[ "def", "GetNotificationShard", "(", "self", ",", "queue", ")", ":", "queue_name", "=", "str", "(", "queue", ")", "QueueManager", ".", "notification_shard_counters", ".", "setdefault", "(", "queue_name", ",", "0", ")", "QueueManager", ".", "notification_shard_count...
40.916667
14.083333
def get_embedded_tweet(tweet): """ Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary Args: tweet (Tweet): A Tweet object (not simply a dict) Returns: dict (or None, if the Tweet is neither a quote tweet or a Retweet): a dictionary representing the quote Tweet or the Retweet """ if tweet.retweeted_tweet is not None: return tweet.retweeted_tweet elif tweet.quoted_tweet is not None: return tweet.quoted_tweet else: return None
[ "def", "get_embedded_tweet", "(", "tweet", ")", ":", "if", "tweet", ".", "retweeted_tweet", "is", "not", "None", ":", "return", "tweet", ".", "retweeted_tweet", "elif", "tweet", ".", "quoted_tweet", "is", "not", "None", ":", "return", "tweet", ".", "quoted_t...
30.352941
19.176471
def _CreatePerformanceTarget(client, campaign_group_id): """Creates a performance target for the campaign group. Args: client: an AdWordsClient instance. campaign_group_id: an integer ID for the campaign group. """ # Get the CampaignGroupPerformanceTargetService. cgpt_service = client.GetService('CampaignGroupPerformanceTargetService', version='v201809') # Create the operation. operations = [{ 'operator': 'ADD', # Create the performance target. 'operand': { 'campaignGroupId': campaign_group_id, 'performanceTarget': { # Keep the CPC for the campaigns < $3. 'efficiencyTargetType': 'CPC_LESS_THAN_OR_EQUAL_TO', 'efficiencyTargetValue': 3000000, # Keep the maximum spend under $50. 'spendTargetType': 'MAXIMUM', 'spendTarget': { 'microAmount': 500000000 }, # Aim for at least 3000 clicks. 'volumeGoalType': 'MAXIMIZE_CLICKS', 'volumeTargetValue': 3000, # Start the performance target today, and run it for the next 90 # days. 'startDate': datetime.datetime.now().strftime('%Y%m%d'), 'endDate': (datetime.datetime.now() + datetime.timedelta(90)).strftime('%Y%m%d') } } }] cgpt = cgpt_service.mutate(operations)['value'][0] # Display the results. print ('Campaign performance target with ID "%d" was added for campaign ' 'group ID "%d".' % (cgpt['id'], cgpt['campaignGroupId']))
[ "def", "_CreatePerformanceTarget", "(", "client", ",", "campaign_group_id", ")", ":", "# Get the CampaignGroupPerformanceTargetService.", "cgpt_service", "=", "client", ".", "GetService", "(", "'CampaignGroupPerformanceTargetService'", ",", "version", "=", "'v201809'", ")", ...
37.418605
18.674419
def valuePasses(self, value): '''Returns whether this value passes this filter''' return self._conditional_cmp[self.op](value, self.value)
[ "def", "valuePasses", "(", "self", ",", "value", ")", ":", "return", "self", ".", "_conditional_cmp", "[", "self", ".", "op", "]", "(", "value", ",", "self", ".", "value", ")" ]
48
15.333333
def update_phase(self, environment, data, prediction, user, item, correct, time, answer_id, **kwargs): """ After the prediction update the environment and persist some information for the predictive model. Args: environment (proso.models.environment.Environment): environment where all the important data are persist data (object): data from the prepare phase user (int): identifier of the user answering the question item (int): identifier of the question item correct (bool): corretness of the answer """ pass
[ "def", "update_phase", "(", "self", ",", "environment", ",", "data", ",", "prediction", ",", "user", ",", "item", ",", "correct", ",", "time", ",", "answer_id", ",", "*", "*", "kwargs", ")", ":", "pass" ]
37.944444
18.5
def find_link(self, *args, **kwargs): """Find and return a link, as a bs4.element.Tag object. The search can be refined by specifying any argument that is accepted by :func:`links`. If several links match, return the first one found. If no link is found, raise :class:`LinkNotFoundError`. """ links = self.links(*args, **kwargs) if len(links) == 0: raise LinkNotFoundError() else: return links[0]
[ "def", "find_link", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "links", "=", "self", ".", "links", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "links", ")", "==", "0", ":", "raise", "LinkNotFoundError"...
36.461538
18.307692
def _InitApiApprovalFromAff4Object(api_approval, approval_obj): """Initializes Api(Client|Hunt|CronJob)Approval from an AFF4 object.""" api_approval.id = approval_obj.urn.Basename() api_approval.reason = approval_obj.Get(approval_obj.Schema.REASON) api_approval.requestor = approval_obj.Get(approval_obj.Schema.REQUESTOR) # We should check the approval validity from the standpoint of the user # who had requested it. test_token = access_control.ACLToken( username=approval_obj.Get(approval_obj.Schema.REQUESTOR)) try: approval_obj.CheckAccess(test_token) api_approval.is_valid = True except access_control.UnauthorizedAccess as e: api_approval.is_valid = False api_approval.is_valid_message = utils.SmartStr(e) notified_users = approval_obj.Get(approval_obj.Schema.NOTIFIED_USERS) if notified_users: api_approval.notified_users = sorted( u.strip() for u in notified_users.split(",")) api_approval.email_message_id = approval_obj.Get( approval_obj.Schema.EMAIL_MSG_ID) email_cc = approval_obj.Get(approval_obj.Schema.EMAIL_CC) email_cc_addresses = sorted(s.strip() for s in email_cc.split(",")) api_approval.email_cc_addresses = ( set(email_cc_addresses) - set(api_approval.notified_users)) api_approval.approvers = sorted(approval_obj.GetNonExpiredApprovers()) return api_approval
[ "def", "_InitApiApprovalFromAff4Object", "(", "api_approval", ",", "approval_obj", ")", ":", "api_approval", ".", "id", "=", "approval_obj", ".", "urn", ".", "Basename", "(", ")", "api_approval", ".", "reason", "=", "approval_obj", ".", "Get", "(", "approval_obj...
39.264706
21.117647
def in1d_sorted(ar1, ar2): """ Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster. """ if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash return [] inds = ar2.searchsorted(ar1) inds[inds == len(ar2)] = 0 return ar2[inds] == ar1
[ "def", "in1d_sorted", "(", "ar1", ",", "ar2", ")", ":", "if", "ar1", ".", "shape", "[", "0", "]", "==", "0", "or", "ar2", ".", "shape", "[", "0", "]", "==", "0", ":", "# check for empty arrays to avoid crash", "return", "[", "]", "inds", "=", "ar2", ...
33.6
21.8
def get_node(manager, handle_id, legacy=True): """ :param manager: Manager to handle sessions and transactions :param handle_id: Unique id :param legacy: Backwards compatibility :type manager: norduniclient.contextmanager.Neo4jDBSessionManager :type handle_id: str|unicode :type legacy: Boolean :rtype: dict|neo4j.v1.types.Node """ q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n' with manager.session as s: result = s.run(q, {'handle_id': handle_id}).single() if result: if legacy: return result['n'].properties return result['n'] raise exceptions.NodeNotFound(manager, handle_id)
[ "def", "get_node", "(", "manager", ",", "handle_id", ",", "legacy", "=", "True", ")", ":", "q", "=", "'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'", "with", "manager", ".", "session", "as", "s", ":", "result", "=", "s", ".", "run", "(", "q", ",", "...
32.190476
15.904762
def deserialize_single_input_output(self, transformer, node_path, attributes_map=None): """ :attributes_map: Map of attributes names. For example StandardScaler has `mean_` but is serialized as `mean` :param transformer: Scikit or Pandas transformer :param node: bundle.ml node json file :param model: bundle.ml model json file :return: Transformer """ # Load the model file with open("{}/model.json".format(node_path)) as json_data: model_j = json.load(json_data) # Set Transformer Attributes attributes = model_j['attributes'] for attribute in attributes.keys(): value_key = [key for key in attributes[attribute].keys() if key in ['string', 'boolean','long', 'double', 'data_shape']][0] if attributes_map is not None and attribute in attributes_map.keys(): setattr(transformer, attributes_map[attribute], attributes[attribute][value_key]) else: setattr(transformer, attribute, attributes[attribute][value_key]) transformer.op = model_j['op'] # Load the node file with open("{}/node.json".format(node_path)) as json_data: node_j = json.load(json_data) transformer.name = node_j['name'] transformer.input_features = self._node_features_format(node_j['shape']['inputs'][0]['name']) transformer.output_features = self._node_features_format(node_j['shape']['outputs'][0]['name']) return transformer
[ "def", "deserialize_single_input_output", "(", "self", ",", "transformer", ",", "node_path", ",", "attributes_map", "=", "None", ")", ":", "# Load the model file", "with", "open", "(", "\"{}/model.json\"", ".", "format", "(", "node_path", ")", ")", "as", "json_dat...
46.575758
25.181818
def convert(self, pedalboard_path, system_effect=None): """ :param Path pedalboard_path: Path that the pedalboard has been persisted. Generally is in format `path/to/pedalboard/name.pedalboard` :param SystemEffect system_effect: Effect that contains the audio interface outputs and inputs or None for **auto discover** :return Pedalboard: Pedalboard loaded """ info = self.get_pedalboard_info(pedalboard_path) if system_effect is None: system_effect = self.discover_system_effect(info) pedalboard = Pedalboard(info['title']) effects_instance = {} for effect_data in info['plugins']: effect = self._generate_effect(effect_data) pedalboard.append(effect) effects_instance[effect_data['instance']] = effect try: for connection_data in info['connections']: output_port = self._get_port(connection_data['source'], effects_instance, system_effect) input_port = self._get_port(connection_data['target'], effects_instance, system_effect) pedalboard.connect(output_port, input_port) except PortNotFoundError as e: if self.ignore_errors: print("WARNING:", e) else: raise e return pedalboard
[ "def", "convert", "(", "self", ",", "pedalboard_path", ",", "system_effect", "=", "None", ")", ":", "info", "=", "self", ".", "get_pedalboard_info", "(", "pedalboard_path", ")", "if", "system_effect", "is", "None", ":", "system_effect", "=", "self", ".", "di...
40.028571
24.314286
def scale_cb(self, setting, value): """Handle callback related to image scaling.""" zoomlevel = self.zoom.calc_level(value) self.t_.set(zoomlevel=zoomlevel) self.redraw(whence=0)
[ "def", "scale_cb", "(", "self", ",", "setting", ",", "value", ")", ":", "zoomlevel", "=", "self", ".", "zoom", ".", "calc_level", "(", "value", ")", "self", ".", "t_", ".", "set", "(", "zoomlevel", "=", "zoomlevel", ")", "self", ".", "redraw", "(", ...
34.333333
10.5
def add_redistribution(self, protocol, route_map_name=None): """Adds a protocol redistribution to OSPF Args: protocol (str): protocol to redistribute route_map_name (str): route-map to be used to filter the protocols Returns: bool: True if the command completes successfully Exception: ValueError: This will be raised if the protocol pass is not one of the following: [rip, bgp, static, connected] """ protocols = ['bgp', 'rip', 'static', 'connected'] if protocol not in protocols: raise ValueError('redistributed protocol must be' 'bgp, connected, rip or static') if route_map_name is None: cmd = 'redistribute {}'.format(protocol) else: cmd = 'redistribute {} route-map {}'.format(protocol, route_map_name) return self.configure_ospf(cmd)
[ "def", "add_redistribution", "(", "self", ",", "protocol", ",", "route_map_name", "=", "None", ")", ":", "protocols", "=", "[", "'bgp'", ",", "'rip'", ",", "'static'", ",", "'connected'", "]", "if", "protocol", "not", "in", "protocols", ":", "raise", "Valu...
45.913043
19.043478
def make_headers(headers): """ Make the cache control headers based on a previous request's response headers """ out = {} if 'etag' in headers: out['if-none-match'] = headers['etag'] if 'last-modified' in headers: out['if-modified-since'] = headers['last-modified'] return out
[ "def", "make_headers", "(", "headers", ")", ":", "out", "=", "{", "}", "if", "'etag'", "in", "headers", ":", "out", "[", "'if-none-match'", "]", "=", "headers", "[", "'etag'", "]", "if", "'last-modified'", "in", "headers", ":", "out", "[", "'if-modified-...
31.1
13.4
def load_data(path_dir): '''Load data, directory parameters, and accelerometer parameter names Args ---- path_dir: str Path to the data directory Returns ------- data: pandas.DataFrame Experiment data params_tag: dict A dictionary of parameters parsed from the directory name params_data: list A list of the accelerometer parameter names ''' import os import pylleo exp_name = os.path.split(path_dir)[1] params_tag = pylleo.utils.parse_experiment_params(exp_name) # Load the Little Leonardo tag data meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'], params_tag['tag_id']) data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f) # Get and curate the parameter names of the loaded dataframe params_data = pylleo.utils.get_tag_params(params_tag['tag_model']) params_data = [pylleo.utils.posix_string(p) for p in params_data] params_data = [p for p in params_data if p.startswith('acc')] return data, params_tag, params_data
[ "def", "load_data", "(", "path_dir", ")", ":", "import", "os", "import", "pylleo", "exp_name", "=", "os", ".", "path", ".", "split", "(", "path_dir", ")", "[", "1", "]", "params_tag", "=", "pylleo", ".", "utils", ".", "parse_experiment_params", "(", "exp...
31.588235
24.588235
def getworker(): ''' return settings dictionnary ''' if not Configuration.worker_initialized: Configuration._initconf() Configuration.worker_settings = Configuration.settings['worker'] Configuration.worker_initialized = True return Configuration.worker_settings
[ "def", "getworker", "(", ")", ":", "if", "not", "Configuration", ".", "worker_initialized", ":", "Configuration", ".", "_initconf", "(", ")", "Configuration", ".", "worker_settings", "=", "Configuration", ".", "settings", "[", "'worker'", "]", "Configuration", "...
36.555556
16.555556
def _reads_in_peaks(bam_file, peaks_file, sample): """Calculate number of reads in peaks""" if not peaks_file: return {} rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file) return {"metrics": {"RiP": rip}}
[ "def", "_reads_in_peaks", "(", "bam_file", ",", "peaks_file", ",", "sample", ")", ":", "if", "not", "peaks_file", ":", "return", "{", "}", "rip", "=", "number_of_mapped_reads", "(", "sample", ",", "bam_file", ",", "bed_file", "=", "peaks_file", ")", "return"...
40.333333
14.666667
def _parse_doc(docs): """ Converts a well-formed docstring into documentation to be fed into argparse. See signature_parser for details. shorts: (-k for --keyword -k, or "from" for "frm/from") metavars: (FILE for --input=FILE) helps: (docs for --keyword: docs) description: the stuff before epilog: the stuff after """ name = "(?:[a-zA-Z][a-zA-Z0-9-_]*)" re_var = re.compile(r"^ *(%s)(?: */(%s))? *:(.*)$" % (name, name)) re_opt = re.compile(r"^ *(?:(-[a-zA-Z0-9]),? +)?--(%s)(?: *=(%s))? *:(.*)$" % (name, name)) shorts, metavars, helps, description, epilog = {}, {}, {}, "", "" if docs: for line in docs.split("\n"): line = line.strip() # remove starting ':param' if line.startswith(':param'): line = line[len(':param'):] # skip ':rtype:' row if line.startswith(':rtype:'): continue if line.strip() == "----": break m = re_var.match(line) if m: if epilog: helps[prev] += epilog.strip() epilog = "" if m.group(2): shorts[m.group(1)] = m.group(2) helps[m.group(1)] = m.group(3).strip() prev = m.group(1) previndent = len(line) - len(line.lstrip()) continue m = re_opt.match(line) if m: if epilog: helps[prev] += epilog.strip() epilog = "" name = m.group(2).replace("-", "_") helps[name] = m.group(4) prev = name if m.group(1): shorts[name] = m.group(1) if m.group(3): metavars[name] = m.group(3) previndent = len(line) - len(line.lstrip()) continue if helps: if line.startswith(" " * (previndent + 1)): helps[prev] += "\n" + line.strip() else: epilog += "\n" + line.strip() else: description += "\n" + line.strip() if line.strip(): previndent = len(line) - len(line.lstrip()) return shorts, metavars, helps, description, epilog
[ "def", "_parse_doc", "(", "docs", ")", ":", "name", "=", "\"(?:[a-zA-Z][a-zA-Z0-9-_]*)\"", "re_var", "=", "re", ".", "compile", "(", "r\"^ *(%s)(?: */(%s))? *:(.*)$\"", "%", "(", "name", ",", "name", ")", ")", "re_opt", "=", "re", ".", "compile", "(", "r\"^ ...
28.853659
18.609756
def run(self, debug=False, reload=None): """ Convenience method for running bots in getUpdates mode :param bool debug: Enable debug logging and automatic reloading :param bool reload: Automatically reload bot on code change :Example: >>> if __name__ == '__main__': >>> bot.run() """ loop = asyncio.get_event_loop() logging.basicConfig(level=logging.DEBUG if debug else logging.INFO) if reload is None: reload = debug bot_loop = asyncio.ensure_future(self.loop()) try: if reload: loop.run_until_complete(run_with_reloader(loop, bot_loop, self.stop)) else: loop.run_until_complete(bot_loop) # User cancels except KeyboardInterrupt: logger.debug("User cancelled") bot_loop.cancel() self.stop() # Stop loop finally: if AIOHTTP_23: loop.run_until_complete(self.session.close()) logger.debug("Closing loop") loop.stop() loop.close()
[ "def", "run", "(", "self", ",", "debug", "=", "False", ",", "reload", "=", "None", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", "if", "debug", "else", "l...
26.285714
22.095238
def fetch_github_token(self): """ Fetch GitHub token. First try to use variable provided by --token option, otherwise try to fetch it from git config and last CHANGELOG_GITHUB_TOKEN env variable. :returns: Nothing """ if not self.options.token: try: for v in GH_CFG_VARS: cmd = ['git', 'config', '--get', '{0}'.format(v)] self.options.token = subprocess.Popen( cmd, stdout=subprocess.PIPE).communicate()[0].strip() if self.options.token: break except (subprocess.CalledProcessError, WindowsError): pass if not self.options.token: self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN) if not self.options.token: print(NO_TOKEN_PROVIDED)
[ "def", "fetch_github_token", "(", "self", ")", ":", "if", "not", "self", ".", "options", ".", "token", ":", "try", ":", "for", "v", "in", "GH_CFG_VARS", ":", "cmd", "=", "[", "'git'", ",", "'config'", ",", "'--get'", ",", "'{0}'", ".", "format", "(",...
38.304348
17
def input_from_cons(constupl, datas): ' solve bytes in |datas| based on ' def make_chr(c): try: return chr(c) except Exception: return c newset = constraints_to_constraintset(constupl) ret = '' for data in datas: for c in data: ret += make_chr(solver.get_value(newset, c)) return ret
[ "def", "input_from_cons", "(", "constupl", ",", "datas", ")", ":", "def", "make_chr", "(", "c", ")", ":", "try", ":", "return", "chr", "(", "c", ")", "except", "Exception", ":", "return", "c", "newset", "=", "constraints_to_constraintset", "(", "constupl",...
25.357143
18.5
def run(self, action): """ Runs through the phases defined by :attr:`action`. :param str action: Either ``deploy`` or ``inventory``. """ deployer = self.__class__.__name__ log.info('Running %s...' % deployer) try: if action == 'deploy': self.deploy() elif action == 'inventory': self.inventory() except BangError as e: log.error(e) raise log.info('%s complete.' % deployer)
[ "def", "run", "(", "self", ",", "action", ")", ":", "deployer", "=", "self", ".", "__class__", ".", "__name__", "log", ".", "info", "(", "'Running %s...'", "%", "deployer", ")", "try", ":", "if", "action", "==", "'deploy'", ":", "self", ".", "deploy", ...
28.388889
13.944444