text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def sli_run(parameters=object(), fname='microcircuit.sli', verbosity='M_ERROR'): ''' Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag ''' # Load parameters from params file, and pass them to nest # Python -> SLI send_nest_params_to_sli(vars(parameters)) #set SLI verbosity nest.sli_run("%s setverbosity" % verbosity) # Run NEST/SLI simulation nest.sli_run('(%s) run' % fname)
[ "def", "sli_run", "(", "parameters", "=", "object", "(", ")", ",", "fname", "=", "'microcircuit.sli'", ",", "verbosity", "=", "'M_ERROR'", ")", ":", "# Load parameters from params file, and pass them to nest", "# Python -> SLI", "send_nest_params_to_sli", "(", "vars", "...
29.5
19.863636
def _mimetext(self, text, subtype='plain'): """Creates a MIMEText object with the given subtype (default: 'plain') If the text is unicode, the utf-8 charset is used. """ charset = self.charset or 'utf-8' return MIMEText(text, _subtype=subtype, _charset=charset)
[ "def", "_mimetext", "(", "self", ",", "text", ",", "subtype", "=", "'plain'", ")", ":", "charset", "=", "self", ".", "charset", "or", "'utf-8'", "return", "MIMEText", "(", "text", ",", "_subtype", "=", "subtype", ",", "_charset", "=", "charset", ")" ]
49.333333
7.833333
def refuse_transfer(transfer, comment=None): '''Refuse an incoming a transfer request''' TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'refused' transfer.response_comment = comment transfer.save() return transfer
[ "def", "refuse_transfer", "(", "transfer", ",", "comment", "=", "None", ")", ":", "TransferResponsePermission", "(", "transfer", ")", ".", "test", "(", ")", "transfer", ".", "responded", "=", "datetime", ".", "now", "(", ")", "transfer", ".", "responder", ...
31.272727
15.454545
def _refresh_state(self): """ Refresh the job info. """ # DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves # as a workaround. # TODO(Change this to use runner_results.state once it refreshes itself) dataflow_internal_job = ( self._runner_results._runner.dataflow_client.get_job(self._runner_results.job_id())) self._is_complete = str(dataflow_internal_job.currentState) in ['JOB_STATE_STOPPED', 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED'] self._fatal_error = getattr(self._runner_results._runner, 'last_error_msg', None) # Sometimes Dataflow does not populate runner.last_error_msg even if the job fails. if self._fatal_error is None and self._runner_results.state == 'FAILED': self._fatal_error = 'FAILED'
[ "def", "_refresh_state", "(", "self", ")", ":", "# DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves", "# as a workaround.", "# TODO(Change this to use runner_results.state once it refreshes itself)", "dataflow_internal_job", "=", "(", "self", ".", "...
62.625
34.3125
def _check_fill_title_column(self, column_index): ''' Same as _check_fill_title_row but for columns. ''' # Determine if the whole column is titles table_column = TableTranspose(self.table)[column_index] prior_column = TableTranspose(self.table)[column_index-1] if column_index > 0 else table_column for row_index in range(self.start[0], self.end[0]): if is_num_cell(table_column[row_index]) or is_num_cell(prior_column[row_index]): return # Since we're a title row, stringify the column self._stringify_column(column_index)
[ "def", "_check_fill_title_column", "(", "self", ",", "column_index", ")", ":", "# Determine if the whole column is titles", "table_column", "=", "TableTranspose", "(", "self", ".", "table", ")", "[", "column_index", "]", "prior_column", "=", "TableTranspose", "(", "se...
51
23.666667
def get_compressed_filename(self, filename): """If the given filename should be compressed, returns the compressed filename. A file can be compressed if: - It is a whitelisted extension - The compressed file does not exist - The compressed file exists by is older than the file itself Otherwise, it returns False. """ if not os.path.splitext(filename)[1][1:] in self.suffixes_to_compress: return False file_stats = None compressed_stats = None compressed_filename = '{}.{}'.format(filename, self.suffix) try: file_stats = os.stat(filename) compressed_stats = os.stat(compressed_filename) except OSError: # FileNotFoundError is for Python3 only pass if file_stats and compressed_stats: return (compressed_filename if file_stats.st_mtime > compressed_stats.st_mtime else False) else: return compressed_filename
[ "def", "get_compressed_filename", "(", "self", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "[", "1", ":", "]", "in", "self", ".", "suffixes_to_compress", ":", "return", "False", "file...
33.064516
18.451613
def validiate_webhook_signature(self, webhook, signature): """Validates a webhook signature from a webhook body + client secret Parameters webhook (string) The request body of the webhook. signature (string) The webhook signature specified in X-Uber-Signature header. """ digester = hmac.new(self.session.oauth2credential.client_secret, webhook, hashlib.sha256 ) return (signature == digester.hexdigest())
[ "def", "validiate_webhook_signature", "(", "self", ",", "webhook", ",", "signature", ")", ":", "digester", "=", "hmac", ".", "new", "(", "self", ".", "session", ".", "oauth2credential", ".", "client_secret", ",", "webhook", ",", "hashlib", ".", "sha256", ")"...
40.928571
14.571429
def resolve(input, representation, resolvers=None, get3d=False, **kwargs): """Resolve input to the specified output representation. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :returns: Output representation or None :rtype: string or None :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable """ # Take first result from XML query results = query(input, representation, resolvers, False, get3d, **kwargs) result = results[0].value if results else None return result
[ "def", "resolve", "(", "input", ",", "representation", ",", "resolvers", "=", "None", ",", "get3d", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Take first result from XML query", "results", "=", "query", "(", "input", ",", "representation", ",", "res...
48.875
19.5625
def load_joke(service_num=1): """Pulls the joke from the service based on the argument. It is expected that all services used will return a string when successful or None otherwise. """ result = { 1 : ronswanson.get_joke(), 2 : chucknorris.get_joke(), 3 : catfacts.get_joke(), 4 : dadjokes.get_joke(), }.get(service_num) return result
[ "def", "load_joke", "(", "service_num", "=", "1", ")", ":", "result", "=", "{", "1", ":", "ronswanson", ".", "get_joke", "(", ")", ",", "2", ":", "chucknorris", ".", "get_joke", "(", ")", ",", "3", ":", "catfacts", ".", "get_joke", "(", ")", ",", ...
27.071429
15.214286
def masked_middle_mfcc(self): """ Return the MFCC speech frames in the MIDDLE portion of the wave. :rtype: :class:`numpy.ndarray` (2D) """ begin, end = self._masked_middle_begin_end() return (self.masked_mfcc)[:, begin:end]
[ "def", "masked_middle_mfcc", "(", "self", ")", ":", "begin", ",", "end", "=", "self", ".", "_masked_middle_begin_end", "(", ")", "return", "(", "self", ".", "masked_mfcc", ")", "[", ":", ",", "begin", ":", "end", "]" ]
30.222222
8.666667
def _initialize(cls): """ Internal method to initialize the class by creating all the operator metadata to be used afterwards. """ op_symbols = """ + add radd pos - sub rsub neg * mul rmul / truediv rtruediv // floordiv rfloordiv % mod rmod ** pow rpow >> rshift rrshift << lshift rlshift ~ invert & and rand | or ror ^ xor rxor < lt <= le == eq != ne > gt >= ge """.strip().splitlines() if HAS_MATMUL: op_symbols.append("@ matmul rmatmul") for op_line in op_symbols: symbol, names = op_line.split(None, 1) for name in names.split(): cls._insert(name, symbol)
[ "def", "_initialize", "(", "cls", ")", ":", "op_symbols", "=", "\"\"\"\n + add radd pos\n - sub rsub neg\n * mul rmul\n / truediv rtruediv\n // floordiv rfloordiv\n % mod rmod\n ** pow rpow\n >> rshift rrshift\n << lshift rlshift\n ~ invert\n & ...
21.875
17.1875
def _get_token_from_headers(self, request, refresh_token): """ Extract the token if present inside the headers of a request. """ header = request.headers.get(self.config.authorization_header(), None) if header is None: return None else: header_prefix_key = "authorization_header_prefix" header_prefix = getattr(self.config, header_prefix_key) if header_prefix(): try: prefix, token = header.split(" ") if prefix != header_prefix(): raise Exception except Exception: raise exceptions.InvalidAuthorizationHeader() else: token = header if refresh_token: token = request.json.get(self.config.refresh_token_name()) return token
[ "def", "_get_token_from_headers", "(", "self", ",", "request", ",", "refresh_token", ")", ":", "header", "=", "request", ".", "headers", ".", "get", "(", "self", ".", "config", ".", "authorization_header", "(", ")", ",", "None", ")", "if", "header", "is", ...
31.464286
21.75
def attrib(self): """ An dict of XML element attributes for this MFD. """ return dict([ ('aValue', str(self.a_val)), ('bValue', str(self.b_val)), ('minMag', str(self.min_mag)), ('maxMag', str(self.max_mag)), ])
[ "def", "attrib", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "'aValue'", ",", "str", "(", "self", ".", "a_val", ")", ")", ",", "(", "'bValue'", ",", "str", "(", "self", ".", "b_val", ")", ")", ",", "(", "'minMag'", ",", "str", "(", ...
28.9
9.1
def get_token(self): """sets client token from Cerberus""" auth_resp = self.get_auth() if auth_resp['status'] == 'mfa_req': token_resp = self.get_mfa(auth_resp) else: token_resp = auth_resp token = token_resp['data']['client_token']['client_token'] return token
[ "def", "get_token", "(", "self", ")", ":", "auth_resp", "=", "self", ".", "get_auth", "(", ")", "if", "auth_resp", "[", "'status'", "]", "==", "'mfa_req'", ":", "token_resp", "=", "self", ".", "get_mfa", "(", "auth_resp", ")", "else", ":", "token_resp", ...
29.545455
17.818182
def add_join_conditions(self, conditions: Dict[str, Any]) -> None: """Adds an extra condition to an existing JOIN. This allows you to for example do: INNER JOIN othertable ON (mytable.id = othertable.other_id AND [extra conditions]) This does not work if nothing else in your query doesn't already generate the initial join in the first place. """ alias = self.get_initial_alias() opts = self.get_meta() for name, value in conditions.items(): parts = name.split(LOOKUP_SEP) join_info = self.setup_joins(parts, opts, alias, allow_many=True) self.trim_joins(join_info[1], join_info[3], join_info[4]) target_table = join_info[3][-1] field = join_info[1][-1] join = self.alias_map.get(target_table) if not join: raise SuspiciousOperation(( 'Cannot add an extra join condition for "%s", there\'s no' ' existing join to add it to.' ) % target_table) # convert the Join object into a ConditionalJoin object, which # allows us to add the extra condition if not isinstance(join, ConditionalJoin): self.alias_map[target_table] = ConditionalJoin.from_join(join) join = self.alias_map[target_table] join.add_condition(field, value)
[ "def", "add_join_conditions", "(", "self", ",", "conditions", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "None", ":", "alias", "=", "self", ".", "get_initial_alias", "(", ")", "opts", "=", "self", ".", "get_meta", "(", ")", "for", "name", "...
39.027778
21.472222
def domain(random=random, *args, **kwargs): """ Return a domain >>> mock_random.seed(0) >>> domain(random=mock_random) 'onion.net' >>> domain(random=mock_random) 'bag-of-heroic-chimps.sexy' """ words = random.choice([ noun(random=random), thing(random=random), adjective(random=random)+noun(random=random), ]) return _slugify(words)+tld(random=random)
[ "def", "domain", "(", "random", "=", "random", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "words", "=", "random", ".", "choice", "(", "[", "noun", "(", "random", "=", "random", ")", ",", "thing", "(", "random", "=", "random", ")", ",", ...
23.823529
14.764706
def save(self, request, resource=None, **kwargs): """Create a resource.""" resources = resource if isinstance(resource, list) else [resource] for obj in resources: obj.save() return resource
[ "def", "save", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "resources", "=", "resource", "if", "isinstance", "(", "resource", ",", "list", ")", "else", "[", "resource", "]", "for", "obj", "in", "resou...
38.166667
14.833333
def type(self): """The type of the functional.""" if self.xc in self.defined_aliases: return self.defined_aliases[self.xc].type xc = (self.x, self.c) if xc in self.defined_aliases: return self.defined_aliases[xc].type # If self is not in defined_aliases, use LibxcFunc family if self.xc is not None: return self.xc.family return "+".join([self.x.family, self.c.family])
[ "def", "type", "(", "self", ")", ":", "if", "self", ".", "xc", "in", "self", ".", "defined_aliases", ":", "return", "self", ".", "defined_aliases", "[", "self", ".", "xc", "]", ".", "type", "xc", "=", "(", "self", ".", "x", ",", "self", ".", "c",...
46.444444
23.222222
def _parse_doc_ref(self): """Parse the document handle. Sets the ``_series``, ``_serial``, and ``_handle`` attributes. """ command = LatexCommand( 'setDocRef', {'name': 'handle', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no setDocRef') self._handle = None self._series = None self._serial = None return self._handle = parsed['handle'] try: self._series, self._serial = self._handle.split('-', 1) except ValueError: self._logger.warning('lsstdoc handle cannot be parsed into ' 'series and serial: %r', self._handle) self._series = None self._serial = None
[ "def", "_parse_doc_ref", "(", "self", ")", ":", "command", "=", "LatexCommand", "(", "'setDocRef'", ",", "{", "'name'", ":", "'handle'", ",", "'required'", ":", "True", ",", "'bracket'", ":", "'{'", "}", ")", "try", ":", "parsed", "=", "next", "(", "co...
35
17.8
def file_exists_on_unit(self, sentry_unit, file_name): """Check if a file exists on a unit.""" try: sentry_unit.file_stat(file_name) return True except IOError: return False except Exception as e: msg = 'Error checking file {}: {}'.format(file_name, e) amulet.raise_status(amulet.FAIL, msg=msg)
[ "def", "file_exists_on_unit", "(", "self", ",", "sentry_unit", ",", "file_name", ")", ":", "try", ":", "sentry_unit", ".", "file_stat", "(", "file_name", ")", "return", "True", "except", "IOError", ":", "return", "False", "except", "Exception", "as", "e", ":...
37.7
14.6
def set_proxy_win(server, port, types=None, bypass_hosts=None): ''' Sets the http proxy settings, only works with Windows. server The proxy server to use password The password to use if required by the server types The types of proxy connections should be setup with this server. Valid types are: - ``http`` - ``https`` - ``ftp`` bypass_hosts The hosts that are allowed to by pass the proxy. CLI Example: .. code-block:: bash salt '*' proxy.set_http_proxy example.com 1080 types="['http', 'https']" ''' if __grains__['os'] == 'Windows': return _set_proxy_windows(server=server, port=port, types=types, bypass_hosts=bypass_hosts)
[ "def", "set_proxy_win", "(", "server", ",", "port", ",", "types", "=", "None", ",", "bypass_hosts", "=", "None", ")", ":", "if", "__grains__", "[", "'os'", "]", "==", "'Windows'", ":", "return", "_set_proxy_windows", "(", "server", "=", "server", ",", "p...
26.21875
25.40625
def _validate_states(states, topology): '''Validate states to avoid ignoring states during initialization''' states = states or [] if isinstance(states, dict): for x in states: assert x in topology.node else: assert len(states) <= len(topology) return states
[ "def", "_validate_states", "(", "states", ",", "topology", ")", ":", "states", "=", "states", "or", "[", "]", "if", "isinstance", "(", "states", ",", "dict", ")", ":", "for", "x", "in", "states", ":", "assert", "x", "in", "topology", ".", "node", "el...
33.111111
14.666667
def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names): """ Convert reshape(view). Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting flatten ...') if names == 'short': tf_name = 'R' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) reshape = keras.layers.Reshape([-1], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]])
[ "def", "convert_flatten", "(", "params", ",", "w_name", ",", "scope_name", ",", "inputs", ",", "layers", ",", "weights", ",", "names", ")", ":", "print", "(", "'Converting flatten ...'", ")", "if", "names", "==", "'short'", ":", "tf_name", "=", "'R'", "+",...
30.291667
14.708333
def get_mapped_filenames(self, memoryMap = None): """ Retrieves the filenames for memory mapped files in the debugee. @type memoryMap: list( L{win32.MemoryBasicInformation} ) @param memoryMap: (Optional) Memory map returned by L{get_memory_map}. If not given, the current memory map is used. @rtype: dict( int S{->} str ) @return: Dictionary mapping memory addresses to file names. Native filenames are converted to Win32 filenames when possible. """ hProcess = self.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) if not memoryMap: memoryMap = self.get_memory_map() mappedFilenames = dict() for mbi in memoryMap: if mbi.Type not in (win32.MEM_IMAGE, win32.MEM_MAPPED): continue baseAddress = mbi.BaseAddress fileName = "" try: fileName = win32.GetMappedFileName(hProcess, baseAddress) fileName = PathOperations.native_to_win32_pathname(fileName) except WindowsError: #e = sys.exc_info()[1] #try: # msg = "Can't get mapped file name at address %s in process " \ # "%d, reason: %s" % (HexDump.address(baseAddress), # self.get_pid(), # e.strerror) # warnings.warn(msg, Warning) #except Exception: pass mappedFilenames[baseAddress] = fileName return mappedFilenames
[ "def", "get_mapped_filenames", "(", "self", ",", "memoryMap", "=", "None", ")", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_VM_READ", "|", "win32", ".", "PROCESS_QUERY_INFORMATION", ")", "if", "not", "memoryMap", ":", "memoryMa...
44.864865
19.027027
def routes_register(app, handler, *paths, methods=None, router=None, name=None): """Register routes.""" if router is None: router = app.router handler = to_coroutine(handler) resources = [] for path in paths: # Register any exception to app if isinstance(path, type) and issubclass(path, BaseException): app._error_handlers[path] = handler continue # Ensure that names are unique name = str(name or '') rname, rnum = name, 2 while rname in router: rname = "%s%d" % (name, rnum) rnum += 1 path = parse(path) if isinstance(path, RETYPE): resource = RawReResource(path, name=rname) router.register_resource(resource) else: resource = router.add_resource(path, name=rname) for method in methods or [METH_ANY]: method = method.upper() resource.add_route(method, handler) resources.append(resource) return resources
[ "def", "routes_register", "(", "app", ",", "handler", ",", "*", "paths", ",", "methods", "=", "None", ",", "router", "=", "None", ",", "name", "=", "None", ")", ":", "if", "router", "is", "None", ":", "router", "=", "app", ".", "router", "handler", ...
26.578947
19.842105
def list_trials(experiment_path, sort, output, filter_op, columns, result_columns): """Lists trials in the directory subtree starting at the given path.""" if columns: columns = columns.split(",") if result_columns: result_columns = result_columns.split(",") commands.list_trials(experiment_path, sort, output, filter_op, columns, result_columns)
[ "def", "list_trials", "(", "experiment_path", ",", "sort", ",", "output", ",", "filter_op", ",", "columns", ",", "result_columns", ")", ":", "if", "columns", ":", "columns", "=", "columns", ".", "split", "(", "\",\"", ")", "if", "result_columns", ":", "res...
45.666667
14
def element_scroll_behavior(self, value): """ Sets the options Element Scroll Behavior :Args: - value: 0 - Top, 1 - Bottom """ if value not in [ElementScrollBehavior.TOP, ElementScrollBehavior.BOTTOM]: raise ValueError('Element Scroll Behavior out of range.') self._options[self.ELEMENT_SCROLL_BEHAVIOR] = value
[ "def", "element_scroll_behavior", "(", "self", ",", "value", ")", ":", "if", "value", "not", "in", "[", "ElementScrollBehavior", ".", "TOP", ",", "ElementScrollBehavior", ".", "BOTTOM", "]", ":", "raise", "ValueError", "(", "'Element Scroll Behavior out of range.'",...
33.818182
18.909091
def csvd(A): """SVD decomposition using numpy.linalg.svd :param A: a M by N matrix :return: * U, a M by M matrix * S the N eigen values * V a N by N matrix See :func:`numpy.linalg.svd` for a detailed documentation. Should return the same as in [Marple]_ , CSVD routine. :: U, S, V = numpy.linalg.svd(A) U, S, V = cvsd(A) """ U, S, V = numpy.linalg.svd(A) return U, S, V
[ "def", "csvd", "(", "A", ")", ":", "U", ",", "S", ",", "V", "=", "numpy", ".", "linalg", ".", "svd", "(", "A", ")", "return", "U", ",", "S", ",", "V" ]
19.590909
22.863636
def obfn_dfd(self): r"""Compute data fidelity term :math:`(1/2) \| \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`. """ Ef = self.eval_Rf(self.Xf) return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0
[ "def", "obfn_dfd", "(", "self", ")", ":", "Ef", "=", "self", ".", "eval_Rf", "(", "self", ".", "Xf", ")", "return", "sl", ".", "rfl2norm2", "(", "Ef", ",", "self", ".", "S", ".", "shape", ",", "axis", "=", "self", ".", "cri", ".", "axisN", ")",...
36.285714
16.571429
def cli(env, network, quantity, vlan_id, ipv6, test): """Add a new subnet to your account. Valid quantities vary by type. \b Type - Valid Quantities (IPv4) public - 4, 8, 16, 32 private - 4, 8, 16, 32, 64 \b Type - Valid Quantities (IPv6) public - 64 """ mgr = SoftLayer.NetworkManager(env.client) if not (test or env.skip_confirmations): if not formatting.confirm("This action will incur charges on your " "account. Continue?"): raise exceptions.CLIAbort('Cancelling order.') version = 4 if ipv6: version = 6 try: result = mgr.add_subnet(network, quantity=quantity, vlan_id=vlan_id, version=version, test_order=test) except SoftLayer.SoftLayerAPIError: raise exceptions.CLIAbort('There is no price id for {} {} ipv{}'.format(quantity, network, version)) table = formatting.Table(['Item', 'cost']) table.align['Item'] = 'r' table.align['cost'] = 'r' total = 0.0 if 'prices' in result: for price in result['prices']: total += float(price.get('recurringFee', 0.0)) rate = "%.2f" % float(price['recurringFee']) table.add_row([price['item']['description'], rate]) table.add_row(['Total monthly cost', "%.2f" % total]) env.fout(table)
[ "def", "cli", "(", "env", ",", "network", ",", "quantity", ",", "vlan_id", ",", "ipv6", ",", "test", ")", ":", "mgr", "=", "SoftLayer", ".", "NetworkManager", "(", "env", ".", "client", ")", "if", "not", "(", "test", "or", "env", ".", "skip_confirmat...
30.488372
23.790698
def _date_to_json(value): """Coerce 'value' to an JSON-compatible representation.""" if isinstance(value, datetime.date): value = value.isoformat() return value
[ "def", "_date_to_json", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "date", ")", ":", "value", "=", "value", ".", "isoformat", "(", ")", "return", "value" ]
35.2
9.2
def cmd_usercheck(username, no_cache, verbose, wopen): """Check if the given username exists on various social networks and other popular sites. \b $ habu.usercheck portantier { "aboutme": "https://about.me/portantier", "disqus": "https://disqus.com/by/portantier/", "github": "https://github.com/portantier/", "ifttt": "https://ifttt.com/p/portantier", "lastfm": "https://www.last.fm/user/portantier", "medium": "https://medium.com/@portantier", "pastebin": "https://pastebin.com/u/portantier", "pinterest": "https://in.pinterest.com/portantier/", "twitter": "https://twitter.com/portantier", "vimeo": "https://vimeo.com/portantier" } """ habucfg = loadcfg() if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') if not no_cache: homedir = pwd.getpwuid(os.getuid()).pw_dir requests_cache.install_cache(homedir + '/.habu_requests_cache') logging.info('using cache on ' + homedir + '/.habu_requests_cache') existent = {} for site, url in urls.items(): u = url.format(username) logging.info(u) try: r = requests.head(u, allow_redirects=False) except Exception: continue if r.status_code == 200: if requests.head(url.format('zei4fee3q9'), allow_redirects=False).status_code == 200: logging.error('Received status 200 for user zei4fee3q9, maybe, the check needs to be fixed') else: existent[site] = u if wopen: webbrowser.open_new_tab(u) print(json.dumps(existent, indent=4))
[ "def", "cmd_usercheck", "(", "username", ",", "no_cache", ",", "verbose", ",", "wopen", ")", ":", "habucfg", "=", "loadcfg", "(", ")", "if", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "...
35.489362
21.617021
def reserved(self, value): """Reserved setter.""" self.bytearray[self._get_slicers(2)] = bytearray(c_uint16(value or 0))
[ "def", "reserved", "(", "self", ",", "value", ")", ":", "self", ".", "bytearray", "[", "self", ".", "_get_slicers", "(", "2", ")", "]", "=", "bytearray", "(", "c_uint16", "(", "value", "or", "0", ")", ")" ]
44.666667
17.333333
def flatten_dict_vals(dict_): """ Flattens only values in a heirarchical dictionary, keys are nested. """ if isinstance(dict_, dict): return dict([ ((key, augkey), augval) for key, val in dict_.items() for augkey, augval in flatten_dict_vals(val).items() ]) else: return {None: dict_}
[ "def", "flatten_dict_vals", "(", "dict_", ")", ":", "if", "isinstance", "(", "dict_", ",", "dict", ")", ":", "return", "dict", "(", "[", "(", "(", "key", ",", "augkey", ")", ",", "augval", ")", "for", "key", ",", "val", "in", "dict_", ".", "items",...
29.416667
14.416667
def on_message(self, message): """Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream. """ log.debug('Got websocket message %s', message) if message == 'websocket client ready': if self.connected: # TBD: Add ability to run commands in this branch log.debug('Websocket already connected, returning') return self.connected = True evt_processor = event_processor.SaltInfo(self) client = salt.netapi.NetapiClient(self.application.opts) client.run({ 'fun': 'grains.items', 'tgt': '*', 'token': self.token, 'mode': 'client', 'asynchronous': 'local_async', 'client': 'local' }) while True: try: event = yield self.application.event_listener.get_event(self) evt_processor.process(event, self.token, self.application.opts) # self.write_message('data: {0}\n\n'.format(salt.utils.json.dumps(event, _json_module=_json))) except Exception as err: log.debug('Error! Ending server side websocket connection. Reason = %s', err) break self.close() else: # TBD: Add logic to run salt commands here pass
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "log", ".", "debug", "(", "'Got websocket message %s'", ",", "message", ")", "if", "message", "==", "'websocket client ready'", ":", "if", "self", ".", "connected", ":", "# TBD: Add ability to run command...
40.205128
19.205128
def cat_files(infiles, outfile): '''Cats all files in list infiles into outfile''' f_out = pyfastaq.utils.open_file_write(outfile) for filename in infiles: if os.path.exists(filename): f_in = pyfastaq.utils.open_file_read(filename) for line in f_in: print(line, end='', file=f_out) pyfastaq.utils.close(f_in) pyfastaq.utils.close(f_out)
[ "def", "cat_files", "(", "infiles", ",", "outfile", ")", ":", "f_out", "=", "pyfastaq", ".", "utils", ".", "open_file_write", "(", "outfile", ")", "for", "filename", "in", "infiles", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":...
33.583333
14.583333
def compact(self, include=None): """ Return compact views - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/views#list-views---compact>`__ """ return self._get(self._build_url(self.endpoint.compact(include=include)))
[ "def", "compact", "(", "self", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "compact", "(", "include", "=", "include", ")", ")", ")" ]
46.833333
19.166667
def as_value(ele): """ convert the specified element as a python value """ dtype = ele.datatype() # print '%s = %s' % (ele.name(), dtype) if dtype in (1, 2, 3, 4, 5, 6, 7, 9, 12): # BOOL, CHAR, BYTE, INT32, INT64, FLOAT32, FLOAT64, BYTEARRAY, DECIMAL) return ele.getValue() elif dtype == 8: # String val = ele.getValue() """ if val: # us centric :) val = val.encode('ascii', 'replace') """ return str(val) elif dtype == 10: # Date if ele.isNull(): return pd.NaT else: v = ele.getValue() return datetime(year=v.year, month=v.month, day=v.day) if v else pd.NaT elif dtype == 11: # Time if ele.isNull(): return pd.NaT else: v = ele.getValue() now = pd.datetime.now() return datetime(year=now.year, month=now.month, day=now.day, hour=v.hour, minute=v.minute, second=v.second).time() if v else np.nan elif dtype == 13: # Datetime if ele.isNull(): return pd.NaT else: v = ele.getValue() return v elif dtype == 14: # Enumeration # raise NotImplementedError('ENUMERATION data type needs implemented') return str(ele.getValue()) elif dtype == 16: # Choice raise NotImplementedError('CHOICE data type needs implemented') elif dtype == 15: # SEQUENCE return XmlHelper.get_sequence_value(ele) else: raise NotImplementedError('Unexpected data type %s. Check documentation' % dtype)
[ "def", "as_value", "(", "ele", ")", ":", "dtype", "=", "ele", ".", "datatype", "(", ")", "# print '%s = %s' % (ele.name(), dtype)", "if", "dtype", "in", "(", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "9", ",", "12", ")...
40.395349
16.046512
def ltcube_moon(self, **kwargs): """ return the name of a livetime cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ltcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
[ "def", "ltcube_moon", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "self", ".", "base_dict", ".", "copy", "(", ")", "kwargs_copy", ".", "update", "(", "*", "*", "kwargs", ")", "kwargs_copy", "[", "'dataset'", "]", "=", "kwargs", ...
44.272727
11
def stop(self, now=False): """Stop the server gracefully. Do not take any new transfers, but complete the existing ones. If force is True, drop everything and stop. Note, immediately will not interrupt the select loop, it will happen when the server returns on ready data, or a timeout. ie. SOCK_TIMEOUT""" if now: self.shutdown_immediately = True else: self.shutdown_gracefully = True
[ "def", "stop", "(", "self", ",", "now", "=", "False", ")", ":", "if", "now", ":", "self", ".", "shutdown_immediately", "=", "True", "else", ":", "self", ".", "shutdown_gracefully", "=", "True" ]
45.6
17.2
def persist_upstream_structure(self): """Persist json file with the upstream steps structure, that is step names and their connections.""" persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name)) logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir)) joblib.dump(self.upstream_structure, persist_dir)
[ "def", "persist_upstream_structure", "(", "self", ")", ":", "persist_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "experiment_directory", ",", "'{}_upstream_structure.json'", ".", "format", "(", "self", ".", "name", ")", ")", "logger", ".", "...
82.8
30.4
def K(self, X, X2=None): # model : -a d^2y/dx^2 + b dy/dt + c * y = U # kernel Kyy rbf spatiol temporal # vyt Y temporal variance vyx Y spatiol variance lyt Y temporal lengthscale lyx Y spatiol lengthscale # kernel Kuu doper( doper(Kyy)) # a b c lyt lyx vyx*vyt """Compute the covariance matrix between X and X2.""" X,slices = X[:,:-1],index_to_slices(X[:,-1]) if X2 is None: X2,slices2 = X,slices K = np.zeros((X.shape[0], X.shape[0])) else: X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1]) K = np.zeros((X.shape[0], X2.shape[0])) tdist = (X[:,0][:,None] - X2[:,0][None,:])**2 xdist = (X[:,1][:,None] - X2[:,1][None,:])**2 ttdist = (X[:,0][:,None] - X2[:,0][None,:]) #rdist = [tdist,xdist] #dist = np.abs(X - X2.T) vyt = self.variance_Yt vyx = self.variance_Yx lyt=1/(2*self.lengthscale_Yt) lyx=1/(2*self.lengthscale_Yx) a = self.a ## -a is used in the model, negtive diffusion b = self.b c = self.c kyy = lambda tdist,xdist: np.exp(-lyt*(tdist) -lyx*(xdist)) k1 = lambda tdist: (2*lyt - 4*lyt**2 * (tdist) ) k2 = lambda xdist: ( 4*lyx**2 * (xdist) - 2*lyx ) k3 = lambda xdist: ( 3*4*lyx**2 - 6*8*xdist*lyx**3 + 16*xdist**2*lyx**4 ) k4 = lambda ttdist: 2*lyt*(ttdist) for i, s1 in enumerate(slices): for j, s2 in enumerate(slices2): for ss1 in s1: for ss2 in s2: if i==0 and j==0: K[ss1,ss2] = vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2]) elif i==0 and j==1: K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) + b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2]) #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[ss1,ss2]) ) ) #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(rdist[ss1,ss2]), kuyn(rdist[ss1,ss2] ) ) elif i==1 and j==1: K[ss1,ss2] = ( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 )* vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2]) else: K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) - b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2]) #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[ss1,ss2]) ) ) #K[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(rdist[ss1,ss2]), kyun(rdist[ss1,ss2] ) ) #stop return K
[ "def", "K", "(", "self", ",", "X", ",", "X2", "=", "None", ")", ":", "# model : -a d^2y/dx^2 + b dy/dt + c * y = U", "# kernel Kyy rbf spatiol temporal", "# vyt Y temporal variance vyx Y spatiol variance lyt Y temporal lengthscale lyx Y spatiol lengthscale", "# kernel Kuu doper...
45.786885
28.229508
def determine_opening_indent(indent_texts): '''Determine the opening indent level for a docstring. The opening indent level is the indent level is the first non-zero indent level of a non-empty line in the docstring. Args: indent_texts: The lines of the docstring as an iterable over 2-tuples each containing an integer indent level as the first element and the text as the second element. Returns: The opening indent level as an integer. ''' num_lines = len(indent_texts) if num_lines < 1: return 0 assert num_lines >= 1 first_line_indent = indent_texts[0][0] if num_lines == 1: return first_line_indent assert num_lines >= 2 second_line_indent = indent_texts[1][0] second_line_text = indent_texts[1][1] if len(second_line_text) == 0: return first_line_indent return second_line_indent
[ "def", "determine_opening_indent", "(", "indent_texts", ")", ":", "num_lines", "=", "len", "(", "indent_texts", ")", "if", "num_lines", "<", "1", ":", "return", "0", "assert", "num_lines", ">=", "1", "first_line_indent", "=", "indent_texts", "[", "0", "]", "...
25.542857
23.428571
def run(command): ''' Run command in shell, accepts command construction from list Return (return_code, stdout, stderr) stdout and stderr - as list of strings ''' if isinstance(command, list): command = ' '.join(command) out = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return (out.returncode, _prepare(out.stdout), _prepare(out.stderr))
[ "def", "run", "(", "command", ")", ":", "if", "isinstance", "(", "command", ",", "list", ")", ":", "command", "=", "' '", ".", "join", "(", "command", ")", "out", "=", "subprocess", ".", "run", "(", "command", ",", "shell", "=", "True", ",", "stdou...
40.9
21.1
def _run_pants_with_retry(self, pantsd_handle, retries=3): """Runs pants remotely with retry and recovery for nascent executions. :param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to. """ attempt = 1 while 1: logger.debug( 'connecting to pantsd on port {} (attempt {}/{})' .format(pantsd_handle.port, attempt, retries) ) try: return self._connect_and_execute(pantsd_handle) except self.RECOVERABLE_EXCEPTIONS as e: if attempt > retries: raise self.Fallback(e) self._backoff(attempt) logger.warn( 'pantsd was unresponsive on port {}, retrying ({}/{})' .format(pantsd_handle.port, attempt, retries) ) # One possible cause of the daemon being non-responsive during an attempt might be if a # another lifecycle operation is happening concurrently (incl teardown). To account for # this, we won't begin attempting restarts until at least 1 second has passed (1 attempt). if attempt > 1: pantsd_handle = self._restart_pantsd() attempt += 1 except NailgunClient.NailgunError as e: # Ensure a newline. logger.fatal('') logger.fatal('lost active connection to pantsd!') raise_with_traceback(self._extract_remote_exception(pantsd_handle.pid, e))
[ "def", "_run_pants_with_retry", "(", "self", ",", "pantsd_handle", ",", "retries", "=", "3", ")", ":", "attempt", "=", "1", "while", "1", ":", "logger", ".", "debug", "(", "'connecting to pantsd on port {} (attempt {}/{})'", ".", "format", "(", "pantsd_handle", ...
39.676471
23.352941
def get_dependency_element(self, symbol): """Checks if the specified symbol is the name of one of the methods that this module depends on. If it is, search for the actual code element and return it.""" for depend in self.dependencies: if "." in depend: #We know the module name and the executable name, easy if depend.split(".")[1] == symbol.lower(): found = self.parent.get_executable(depend) break else: #We only have the name of an executable or interface, we have to search #the whole module for the element. fullname = "{}.{}".format(depend, symbol) if self.parent.get_executable(fullname) is not None: found = self.parent.get_executable(fullname) break if self.parent.get_interface(fullname) is not None: found = self.parent.get_interface(fullname) break else: return None return found
[ "def", "get_dependency_element", "(", "self", ",", "symbol", ")", ":", "for", "depend", "in", "self", ".", "dependencies", ":", "if", "\".\"", "in", "depend", ":", "#We know the module name and the executable name, easy", "if", "depend", ".", "split", "(", "\".\""...
45.208333
19.291667
def setTargets(self, value): """ Sets all targets the the value of the argument. This value must be in the range [min,max]. """ # Removed this because both propagate and backprop (via compute_error) set targets #if self.verify and not self.targetSet == 0: # if not self.warningIssued: # print 'Warning! Targets have already been set and no intervening backprop() was called.', \ # (self.name, self.targetSet) # print "(Warning will not be issued again)" # self.warningIssued = 1 if value > self.maxActivation or value < self.minActivation: raise LayerError('Targets for this layer are out of the proper interval.', (self.name, value)) Numeric.put(self.target, Numeric.arange(len(self.target)), value) self.targetSet = 1
[ "def", "setTargets", "(", "self", ",", "value", ")", ":", "# Removed this because both propagate and backprop (via compute_error) set targets", "#if self.verify and not self.targetSet == 0:", "# if not self.warningIssued:", "# print 'Warning! Targets have already been set and no inte...
57.2
24.8
def open(self, vendor_id: int = 0x16c0, product_id: int = 0x5dc, bus: int = None, address: int = None) -> bool: """ Open the first device that matches the search criteria. Th default parameters are set up for the likely most common case of a single uDMX interface. However, for the case of multiple uDMX interfaces, you can use the bus and address paramters to further specifiy the uDMX interface to be opened. :param vendor_id: :param product_id: :param bus: USB bus number 1-n :param address: USB device address 1-n :return: Returns true if a device was opened. Otherwise, returns false. """ kwargs = {} if vendor_id: kwargs["idVendor"] = vendor_id if product_id: kwargs["idProduct"] = product_id if bus: kwargs["bus"] = bus if address: kwargs["address"] = address # Find the uDMX interface self._dev = usb.core.find(**kwargs) return self._dev is not None
[ "def", "open", "(", "self", ",", "vendor_id", ":", "int", "=", "0x16c0", ",", "product_id", ":", "int", "=", "0x5dc", ",", "bus", ":", "int", "=", "None", ",", "address", ":", "int", "=", "None", ")", "->", "bool", ":", "kwargs", "=", "{", "}", ...
41.64
17.96
def list(self): """Lists the jobs on the server. """ queue = self._get_queue() if queue is None: raise QueueDoesntExist output = self.check_output('%s' % shell_escape(queue / 'commands/list')) job_id, info = None, None for line in output.splitlines(): line = line.decode('utf-8') if line.startswith(' '): key, value = line[4:].split(': ', 1) info[key] = value else: if job_id is not None: yield job_id, info job_id = line info = {} if job_id is not None: yield job_id, info
[ "def", "list", "(", "self", ")", ":", "queue", "=", "self", ".", "_get_queue", "(", ")", "if", "queue", "is", "None", ":", "raise", "QueueDoesntExist", "output", "=", "self", ".", "check_output", "(", "'%s'", "%", "shell_escape", "(", "queue", "/", "'c...
31.086957
11.652174
def _translate(self): """ Convert the DataFrame in `self.data` and the attrs from `_build_styles` into a dictionary of {head, body, uuid, cellstyle}. """ table_styles = self.table_styles or [] caption = self.caption ctx = self.ctx precision = self.precision hidden_index = self.hidden_index hidden_columns = self.hidden_columns uuid = self.uuid or str(uuid1()).replace("-", "_") ROW_HEADING_CLASS = "row_heading" COL_HEADING_CLASS = "col_heading" INDEX_NAME_CLASS = "index_name" DATA_CLASS = "data" BLANK_CLASS = "blank" BLANK_VALUE = "" def format_attr(pair): return "{key}={value}".format(**pair) # for sparsifying a MultiIndex idx_lengths = _get_level_lengths(self.index) col_lengths = _get_level_lengths(self.columns, hidden_columns) cell_context = dict() n_rlvls = self.data.index.nlevels n_clvls = self.data.columns.nlevels rlabels = self.data.index.tolist() clabels = self.data.columns.tolist() if n_rlvls == 1: rlabels = [[x] for x in rlabels] if n_clvls == 1: clabels = [[x] for x in clabels] clabels = list(zip(*clabels)) cellstyle = [] head = [] for r in range(n_clvls): # Blank for Index columns... row_es = [{"type": "th", "value": BLANK_VALUE, "display_value": BLANK_VALUE, "is_visible": not hidden_index, "class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1) # ... except maybe the last for columns.names name = self.data.columns.names[r] cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS, "level{lvl}".format(lvl=r)] name = BLANK_VALUE if name is None else name row_es.append({"type": "th", "value": name, "display_value": name, "class": " ".join(cs), "is_visible": not hidden_index}) if clabels: for c, value in enumerate(clabels[r]): cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r), "col{col}".format(col=c)] cs.extend(cell_context.get( "col_headings", {}).get(r, {}).get(c, [])) es = { "type": "th", "value": value, "display_value": value, "class": " ".join(cs), "is_visible": _is_visible(c, r, col_lengths), } colspan = col_lengths.get((r, c), 0) if colspan > 1: es["attributes"] = [ format_attr({"key": "colspan", "value": colspan}) ] row_es.append(es) head.append(row_es) if (self.data.index.names and com._any_not_none(*self.data.index.names) and not hidden_index): index_header_row = [] for c, name in enumerate(self.data.index.names): cs = [INDEX_NAME_CLASS, "level{lvl}".format(lvl=c)] name = '' if name is None else name index_header_row.append({"type": "th", "value": name, "class": " ".join(cs)}) index_header_row.extend( [{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS]) }] * (len(clabels[0]) - len(hidden_columns))) head.append(index_header_row) body = [] for r, idx in enumerate(self.data.index): row_es = [] for c, value in enumerate(rlabels[r]): rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c), "row{row}".format(row=r)] es = { "type": "th", "is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index), "value": value, "display_value": value, "id": "_".join(rid[1:]), "class": " ".join(rid) } rowspan = idx_lengths.get((c, r), 0) if rowspan > 1: es["attributes"] = [ format_attr({"key": "rowspan", "value": rowspan}) ] row_es.append(es) for c, col in enumerate(self.data.columns): cs = [DATA_CLASS, "row{row}".format(row=r), "col{col}".format(col=c)] cs.extend(cell_context.get("data", {}).get(r, {}).get(c, [])) formatter = self._display_funcs[(r, c)] value = self.data.iloc[r, c] row_dict = {"type": "td", "value": value, "class": " ".join(cs), "display_value": formatter(value), "is_visible": (c not in hidden_columns)} # only add an id if the cell has a style if (self.cell_ids or not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')): row_dict["id"] = "_".join(cs[1:]) row_es.append(row_dict) props = [] for x in ctx[r, c]: # have to handle empty styles like [''] if x.count(":"): props.append(x.split(":")) else: props.append(['', '']) cellstyle.append({'props': props, 'selector': "row{row}_col{col}" .format(row=r, col=c)}) body.append(row_es) table_attr = self.table_attributes use_mathjax = get_option("display.html.use_mathjax") if not use_mathjax: table_attr = table_attr or '' if 'class="' in table_attr: table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') else: table_attr += ' class="tex2jax_ignore"' return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid, precision=precision, table_styles=table_styles, caption=caption, table_attributes=table_attr)
[ "def", "_translate", "(", "self", ")", ":", "table_styles", "=", "self", ".", "table_styles", "or", "[", "]", "caption", "=", "self", ".", "caption", "ctx", "=", "self", ".", "ctx", "precision", "=", "self", ".", "precision", "hidden_index", "=", "self",...
39.927711
15.39759
def crypto_hash_sha512(message): """ Hashes and returns the message ``message``. :param message: bytes :rtype: bytes """ digest = ffi.new("unsigned char[]", crypto_hash_sha512_BYTES) rc = lib.crypto_hash_sha512(digest, message, len(message)) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(digest, crypto_hash_sha512_BYTES)[:]
[ "def", "crypto_hash_sha512", "(", "message", ")", ":", "digest", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_hash_sha512_BYTES", ")", "rc", "=", "lib", ".", "crypto_hash_sha512", "(", "digest", ",", "message", ",", "len", "(", "message", ...
31.769231
14.230769
def process_item(self, item, spider): # pylint: disable=unused-argument "Check for DRF urls." url = URLObject(item["url"]) if url.path.startswith("/api/"): raise DropItem(u"Dropping DRF url {url}".format(url=url)) else: return item
[ "def", "process_item", "(", "self", ",", "item", ",", "spider", ")", ":", "# pylint: disable=unused-argument", "url", "=", "URLObject", "(", "item", "[", "\"url\"", "]", ")", "if", "url", ".", "path", ".", "startswith", "(", "\"/api/\"", ")", ":", "raise",...
40.285714
17.142857
def _process_service_check(self, name, nb_procs, bounds, tags): """ Report a service check, for each process in search_string. Report as OK if the process is in the warning thresholds CRITICAL out of the critical thresholds WARNING out of the warning thresholds """ # FIXME 8.x remove the `process:name` tag service_check_tags = tags + ["process:{}".format(name)] status = AgentCheck.OK status_str = {AgentCheck.OK: "OK", AgentCheck.WARNING: "WARNING", AgentCheck.CRITICAL: "CRITICAL"} if not bounds and nb_procs < 1: status = AgentCheck.CRITICAL elif bounds: warning = bounds.get('warning', [1, float('inf')]) critical = bounds.get('critical', [1, float('inf')]) if warning[1] < nb_procs or nb_procs < warning[0]: status = AgentCheck.WARNING if critical[1] < nb_procs or nb_procs < critical[0]: status = AgentCheck.CRITICAL self.service_check( "process.up", status, tags=service_check_tags, message="PROCS {}: {} processes found for {}".format(status_str[status], nb_procs, name), )
[ "def", "_process_service_check", "(", "self", ",", "name", ",", "nb_procs", ",", "bounds", ",", "tags", ")", ":", "# FIXME 8.x remove the `process:name` tag", "service_check_tags", "=", "tags", "+", "[", "\"process:{}\"", ".", "format", "(", "name", ")", "]", "s...
43.344828
21.551724
def get_docs(self, arg=None): ''' Return a dictionary of functions and the inline documentation for each ''' if arg: if '*' in arg: target_mod = arg _use_fnmatch = True else: target_mod = arg + '.' if not arg.endswith('.') else arg _use_fnmatch = False if _use_fnmatch: docs = [(fun, self.functions[fun].__doc__) for fun in fnmatch.filter(self.functions, target_mod)] else: docs = [(fun, self.functions[fun].__doc__) for fun in sorted(self.functions) if fun == arg or fun.startswith(target_mod)] else: docs = [(fun, self.functions[fun].__doc__) for fun in sorted(self.functions)] docs = dict(docs) return salt.utils.doc.strip_rst(docs)
[ "def", "get_docs", "(", "self", ",", "arg", "=", "None", ")", ":", "if", "arg", ":", "if", "'*'", "in", "arg", ":", "target_mod", "=", "arg", "_use_fnmatch", "=", "True", "else", ":", "target_mod", "=", "arg", "+", "'.'", "if", "not", "arg", ".", ...
39.869565
19.434783
def create_zones(self, add_zones): """Create instances of additional zones for the receiver.""" for zone, zname in add_zones.items(): # Name either set explicitly or name of Main Zone with suffix zonename = "{} {}".format(self._name, zone) if ( zname is None) else zname zone_inst = DenonAVRZones(self, zone, zonename) self._zones[zone] = zone_inst
[ "def", "create_zones", "(", "self", ",", "add_zones", ")", ":", "for", "zone", ",", "zname", "in", "add_zones", ".", "items", "(", ")", ":", "# Name either set explicitly or name of Main Zone with suffix", "zonename", "=", "\"{} {}\"", ".", "format", "(", "self", ...
52.625
10.625
def find_bridges(prior_bridges=None): """ Confirm or locate IP addresses of Philips Hue bridges. `prior_bridges` -- optional list of bridge serial numbers * omitted - all discovered bridges returned as dictionary * single string - returns IP as string or None * dictionary - validate provided ip's before attempting discovery * collection or sequence - return dictionary of filtered sn:ip pairs * if mutable then found bridges are removed from argument """ found_bridges = {} # Validate caller's provided list try: prior_bridges_list = prior_bridges.items() except AttributeError: # if caller didnt provide dict then assume single SN or None # in either case, the discovery must be executed run_discovery = True else: for prior_sn, prior_ip in prior_bridges_list: if prior_ip: serial, baseip = parse_description_xml(_build_from(prior_ip)) if serial: # there is a bridge at provided IP, add to found found_bridges[serial] = baseip else: # nothing usable at that ip logger.info('%s not found at %s', prior_sn, prior_ip) run_discovery = found_bridges.keys() != prior_bridges.keys() # prior_bridges is None, unknown, dict of unfound SNs, or empty dict # found_bridges is dict of found SNs from prior, or empty dict if run_discovery: # do the discovery, not all IPs were confirmed try: found_bridges.update(via_upnp()) except DiscoveryError: try: found_bridges.update(via_nupnp()) except DiscoveryError: try: found_bridges.update(via_scan()) except DiscoveryError: logger.warning("All discovery methods returned nothing") if prior_bridges: # prior_bridges is either single SN or dict of unfound SNs # first assume single Serial SN string try: ip_address = found_bridges[prior_bridges] except TypeError: # user passed an invalid type for key # presumably it's a dict meant for alternate mode logger.debug('Assuming alternate mode, prior_bridges is type %s.', type(prior_bridges)) except KeyError: # user provided Serial Number was not found # TODO: dropping tuples here if return none executed # return None pass # let it turn the string into a set, eww else: # user provided Serial Number found return ip_address # Filter the found list to subset of prior prior_bridges_keys = set(prior_bridges) keys_to_remove = prior_bridges_keys ^ found_bridges.keys() logger.debug('Removing %s from found_bridges', keys_to_remove) for key in keys_to_remove: found_bridges.pop(key, None) # Filter the prior dict to unfound only keys_to_remove = prior_bridges_keys & found_bridges.keys() logger.debug('Removing %s from prior_bridges', keys_to_remove) for key in keys_to_remove: try: prior_bridges.pop(key, None) except TypeError: # not a dict, try as set or list prior_bridges.remove(key) except AttributeError: # likely not mutable break keys_to_report = prior_bridges_keys - found_bridges.keys() for serial in keys_to_report: logger.warning('Could not locate bridge with Serial ID %s', serial) else: # prior_bridges is None or empty dict, return all found pass return found_bridges
[ "def", "find_bridges", "(", "prior_bridges", "=", "None", ")", ":", "found_bridges", "=", "{", "}", "# Validate caller's provided list", "try", ":", "prior_bridges_list", "=", "prior_bridges", ".", "items", "(", ")", "except", "AttributeError", ":", "# if caller did...
39.691489
19.297872
def buff(self, target, buff, **kwargs): """ Summon \a buff and apply it to \a target If keyword arguments are given, attempt to set the given values to the buff. Example: player.buff(target, health=random.randint(1, 5)) NOTE: Any Card can buff any other Card. The controller of the Card that buffs the target becomes the controller of the buff. """ ret = self.controller.card(buff, self) ret.source = self ret.apply(target) for k, v in kwargs.items(): setattr(ret, k, v) return ret
[ "def", "buff", "(", "self", ",", "target", ",", "buff", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "self", ".", "controller", ".", "card", "(", "buff", ",", "self", ")", "ret", ".", "source", "=", "self", "ret", ".", "apply", "(", "target", ...
33.066667
12.533333
def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx): """Internal compressed getMore message helper.""" return _compress( 2005, _get_more(collection_name, num_to_return, cursor_id), ctx)
[ "def", "_get_more_compressed", "(", "collection_name", ",", "num_to_return", ",", "cursor_id", ",", "ctx", ")", ":", "return", "_compress", "(", "2005", ",", "_get_more", "(", "collection_name", ",", "num_to_return", ",", "cursor_id", ")", ",", "ctx", ")" ]
54.75
21
def mask_roi_unique(self): """ Assemble a set of unique magnitude tuples for the ROI """ # There is no good inherent way in numpy to do this... # http://stackoverflow.com/q/16970982/ # Also possible and simple: #return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse)) A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T B = A[np.lexsort(A.T[::-1])] return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))]
[ "def", "mask_roi_unique", "(", "self", ")", ":", "# There is no good inherent way in numpy to do this...", "# http://stackoverflow.com/q/16970982/", "# Also possible and simple:", "#return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse))", "A", "=", "np", ".", "vst...
40.615385
20.923077
def is_transcript_available(video_id, language_code=None): """ Returns whether the transcripts are available for a video. Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: it will the language code of the requested transcript. """ filter_attrs = {'video__edx_video_id': video_id} if language_code: filter_attrs['language_code'] = language_code transcript_set = VideoTranscript.objects.filter(**filter_attrs) return transcript_set.exists()
[ "def", "is_transcript_available", "(", "video_id", ",", "language_code", "=", "None", ")", ":", "filter_attrs", "=", "{", "'video__edx_video_id'", ":", "video_id", "}", "if", "language_code", ":", "filter_attrs", "[", "'language_code'", "]", "=", "language_code", ...
40.5
23.928571
def get_default_cache(): """Get the default cache location. Adheres to the XDG Base Directory specification, as described in https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html :return: the default cache directory absolute path """ xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \ os.path.join(os.path.expanduser('~'), '.cache') return os.path.join(xdg_cache_home, 'pokebase')
[ "def", "get_default_cache", "(", ")", ":", "xdg_cache_home", "=", "os", ".", "environ", ".", "get", "(", "'XDG_CACHE_HOME'", ")", "or", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.cache'", ")", ...
33.076923
22.846154
def insert_cluster(self, cluster): """! @brief Insert cluster that is represented as list of points where each point is represented by list of coordinates. @details Clustering feature is created for that cluster and inserted to the tree. @param[in] cluster (list): Cluster that is represented by list of points that should be inserted to the tree. """ entry = cfentry(len(cluster), linear_sum(cluster), square_sum(cluster)); self.insert(entry);
[ "def", "insert_cluster", "(", "self", ",", "cluster", ")", ":", "entry", "=", "cfentry", "(", "len", "(", "cluster", ")", ",", "linear_sum", "(", "cluster", ")", ",", "square_sum", "(", "cluster", ")", ")", "self", ".", "insert", "(", "entry", ")" ]
47.909091
33.090909
def joint_plot(x, y, marginalBins=50, gridsize=50, plotlimits=None, logscale_cmap=False, logscale_marginals=False, alpha_hexbin=0.75, alpha_marginals=0.75, cmap="inferno_r", marginalCol=None, figsize=(8, 8), fontsize=8, *args, **kwargs): """ Plots some x and y data using hexbins along with a colorbar and marginal distributions (X and Y histograms). Parameters ---------- x : ndarray The x data y : ndarray The y data marginalBins : int, optional The number of bins to use in calculating the marginal histograms of x and y gridsize : int, optional The grid size to be passed to matplotlib.pyplot.hexbins which sets the gridsize in calculating the hexbins plotlimits : float, optional The limit of the plot in x and y (it produces a square area centred on zero. Defaults to max range of data. logscale_cmap : bool, optional Sets whether to use a logscale for the colormap. Defaults to False. logscale_marginals : bool, optional Sets whether to use a logscale for the marignals. Defaults to False. alpha_hexbin : float Alpha value to use for hexbins and color map alpha_marginals : float Alpha value to use for marginal histograms cmap : string, optional Specifies the colormap to use, see https://matplotlib.org/users/colormaps.html for options. Defaults to 'inferno_r' marginalCol : string, optional Specifies color to use for marginals, defaults to middle color of colormap for a linear colormap and 70% for a logarithmic colormap. figsize : tuple of 2 values, optional Sets the figsize, defaults to (8, 8) fontsize : int, optional Sets the fontsize for all text and axis ticks. Defaults to 8. *args, **kwargs : optional args and kwargs passed to matplotlib.pyplot.hexbins Returns ------- fig : matplotlib.figure.Figure object The figure object created to house the joint_plot axHexBin : matplotlib.axes.Axes object The axis for the hexbin plot axHistx : matplotlib.axes.Axes object The axis for the x marginal plot axHisty : matplotlib.axes.Axes object The axis for the y marginal plot cbar : matplotlib.colorbar.Colorbar The color bar object """ with _plt.rc_context({'font.size': fontsize,}): # definitions for the axes hexbin_marginal_seperation = 0.01 left, width = 0.2, 0.65-0.1 # left = left side of hexbin and hist_x bottom, height = 0.1, 0.65-0.1 # bottom = bottom of hexbin and hist_y bottom_h = height + bottom + hexbin_marginal_seperation left_h = width + left + hexbin_marginal_seperation cbar_pos = [0.03, bottom, 0.05, 0.02+width] rect_hexbin = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] # start with a rectangular Figure fig = _plt.figure(figsize=figsize) axHexBin = _plt.axes(rect_hexbin) axHistx = _plt.axes(rect_histx) axHisty = _plt.axes(rect_histy) axHisty.set_xticklabels(axHisty.xaxis.get_ticklabels(), y=0, rotation=-90) # scale specific settings if logscale_cmap == True: hexbinscale = 'log' else: hexbinscale = None if logscale_marginals == True: scale='log' else: scale='linear' # set up colors cmapOb = _mpl.cm.get_cmap(cmap) cmapOb.set_under(color='white') if marginalCol == None: if logscale_cmap == True: marginalCol = cmapOb(0.7) cbarlabel = 'log10(N)' else: marginalCol = cmapOb(0.5) cbarlabel = 'N' # set up limits if plotlimits == None: xmin = x.min() xmax = x.max() ymin = y.min() ymax = y.max() if xmax > ymax: plotlimits = xmax * 1.1 else: plotlimits = ymax * 1.1 # the hexbin plot: hb = axHexBin.hexbin(x, y, gridsize=gridsize, bins=hexbinscale, cmap=cmap, alpha=alpha_hexbin, extent=(-plotlimits, plotlimits, -plotlimits, plotlimits), *args, **kwargs) axHexBin.axis([-plotlimits, plotlimits, -plotlimits, plotlimits]) cbaraxes = fig.add_axes(cbar_pos) # This is the position for the colorbar #cbar = _plt.colorbar(axp, cax = cbaraxes) cbar = fig.colorbar(hb, cax = cbaraxes, drawedges=False) #, orientation="horizontal" cbar.solids.set_edgecolor("face") cbar.solids.set_rasterized(True) cbar.solids.set_alpha(alpha_hexbin) cbar.ax.set_yticklabels(cbar.ax.yaxis.get_ticklabels(), y=0, rotation=45) cbar.set_label(cbarlabel, labelpad=-25, y=1.05, rotation=0) axHexBin.set_xlim((-plotlimits, plotlimits)) axHexBin.set_ylim((-plotlimits, plotlimits)) # now determine bin size binwidth = (2*plotlimits)/marginalBins xymax = _np.max([_np.max(_np.fabs(x)), _np.max(_np.fabs(y))]) lim = plotlimits #(int(xymax/binwidth) + 1) * binwidth bins = _np.arange(-lim, lim + binwidth, binwidth) axHistx.hist(x, bins=bins, color=marginalCol, alpha=alpha_marginals, linewidth=0) axHistx.set_yscale(value=scale) axHisty.hist(y, bins=bins, orientation='horizontal', color=marginalCol, alpha=alpha_marginals, linewidth=0) axHisty.set_xscale(value=scale) _plt.setp(axHistx.get_xticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines _plt.setp(axHisty.get_yticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines axHistx.set_xlim(axHexBin.get_xlim()) axHisty.set_ylim(axHexBin.get_ylim()) return fig, axHexBin, axHistx, axHisty, cbar
[ "def", "joint_plot", "(", "x", ",", "y", ",", "marginalBins", "=", "50", ",", "gridsize", "=", "50", ",", "plotlimits", "=", "None", ",", "logscale_cmap", "=", "False", ",", "logscale_marginals", "=", "False", ",", "alpha_hexbin", "=", "0.75", ",", "alph...
39.993243
18.912162
def factory(cfg, login, pswd, request_type): """ Instantiate ExportRequest :param cfg: request configuration, should consist of request description (url and optional parameters) :param login: :param pswd: :param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION :return: ExportRequest instance """ if request_type == ExportRequest.TYPE_SET_FIELD_VALUE: return SetFieldValueRequest(cfg, login, pswd) elif request_type == ExportRequest.TYPE_CREATE_ENTITY: return CreateEntityRequest(cfg, login, pswd) elif request_type == ExportRequest.TYPE_DELETE_ENTITY: return DeleteEntityRequest(cfg, login, pswd) elif request_type == ExportRequest.TYPE_CREATE_RELATION: return CreateRelationRequest(cfg, login, pswd) else: raise NotImplementedError('Not supported request type - {}'.format(request_type))
[ "def", "factory", "(", "cfg", ",", "login", ",", "pswd", ",", "request_type", ")", ":", "if", "request_type", "==", "ExportRequest", ".", "TYPE_SET_FIELD_VALUE", ":", "return", "SetFieldValueRequest", "(", "cfg", ",", "login", ",", "pswd", ")", "elif", "requ...
52.052632
22.894737
def calculate_chunks(self): """ Work out the number of chunks the data is in, for cases where the meta data doesn't change at all so there is no lead in. Also increments the number of values for objects in this segment, based on the number of chunks. """ if self.toc['kTocDAQmxRawData']: # chunks defined differently for DAQmxRawData format try: data_size = next( o.number_values * o.raw_data_width for o in self.ordered_objects if o.has_data and o.number_values * o.raw_data_width > 0) except StopIteration: data_size = 0 else: data_size = sum([ o.data_size for o in self.ordered_objects if o.has_data]) total_data_size = self.next_segment_offset - self.raw_data_offset if data_size < 0 or total_data_size < 0: raise ValueError("Negative data size") elif data_size == 0: # Sometimes kTocRawData is set, but there isn't actually any data if total_data_size != data_size: raise ValueError( "Zero channel data size but data length based on " "segment offset is %d." % total_data_size) self.num_chunks = 0 return chunk_remainder = total_data_size % data_size if chunk_remainder == 0: self.num_chunks = int(total_data_size // data_size) # Update data count for the overall tdms object # using the data count for this segment. for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * self.num_chunks) else: log.warning( "Data size %d is not a multiple of the " "chunk size %d. Will attempt to read last chunk" % (total_data_size, data_size)) self.num_chunks = 1 + int(total_data_size // data_size) self.final_chunk_proportion = ( float(chunk_remainder) / float(data_size)) for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * (self.num_chunks - 1) + int( obj.number_values * self.final_chunk_proportion))
[ "def", "calculate_chunks", "(", "self", ")", ":", "if", "self", ".", "toc", "[", "'kTocDAQmxRawData'", "]", ":", "# chunks defined differently for DAQmxRawData format", "try", ":", "data_size", "=", "next", "(", "o", ".", "number_values", "*", "o", ".", "raw_dat...
40.360656
18.819672
def forward(self, to_recipients, forward_comment=None): # type: (Union[List[Contact], List[str]], str) -> None """Forward Message to recipients with an optional comment. Args: to_recipients: A list of :class:`Contacts <pyOutlook.core.contact.Contact>` to send the email to. forward_comment: String comment to append to forwarded email. Examples: >>> john = Contact('john.doe@domain.com') >>> betsy = Contact('betsy.donalds@domain.com') >>> email = Message() >>> email.forward([john, betsy]) >>> email.forward([john], 'Hey John') """ payload = dict() if forward_comment is not None: payload.update(Comment=forward_comment) # A list of strings can also be provided for convenience. If provided, convert them into Contacts if any(isinstance(recipient, str) for recipient in to_recipients): to_recipients = [Contact(email=email) for email in to_recipients] # Contact() will handle turning itself into the proper JSON format for the API to_recipients = [contact.api_representation() for contact in to_recipients] payload.update(ToRecipients=to_recipients) endpoint = 'https://outlook.office.com/api/v2.0/me/messages/{}/forward'.format(self.message_id) self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload))
[ "def", "forward", "(", "self", ",", "to_recipients", ",", "forward_comment", "=", "None", ")", ":", "# type: (Union[List[Contact], List[str]], str) -> None", "payload", "=", "dict", "(", ")", "if", "forward_comment", "is", "not", "None", ":", "payload", ".", "upda...
44.25
28.96875
def append_string(t, string): """Append a string to a node, as text or tail of last child.""" node = t.tree if string: if len(node) == 0: if node.text is not None: node.text += string else: node.text = string else: # Get last child child = list(node)[-1] if child.tail is not None: child.tail += string else: child.tail = string
[ "def", "append_string", "(", "t", ",", "string", ")", ":", "node", "=", "t", ".", "tree", "if", "string", ":", "if", "len", "(", "node", ")", "==", "0", ":", "if", "node", ".", "text", "is", "not", "None", ":", "node", ".", "text", "+=", "strin...
31.133333
10.666667
def image_mutual_information(image1, image2): """ Compute mutual information between two ANTsImage types ANTsR function: `antsImageMutualInformation` Arguments --------- image1 : ANTsImage image 1 image2 : ANTsImage image 2 Returns ------- scalar Example ------- >>> import ants >>> fi = ants.image_read( ants.get_ants_data('r16') ).clone('float') >>> mi = ants.image_read( ants.get_ants_data('r64') ).clone('float') >>> mival = ants.image_mutual_information(fi, mi) # -0.1796141 """ if (image1.pixeltype != 'float') or (image2.pixeltype != 'float'): raise ValueError('Both images must have float pixeltype') if image1.dimension != image2.dimension: raise ValueError('Both images must have same dimension') libfn = utils.get_lib_fn('antsImageMutualInformation%iD' % image1.dimension) return libfn(image1.pointer, image2.pointer)
[ "def", "image_mutual_information", "(", "image1", ",", "image2", ")", ":", "if", "(", "image1", ".", "pixeltype", "!=", "'float'", ")", "or", "(", "image2", ".", "pixeltype", "!=", "'float'", ")", ":", "raise", "ValueError", "(", "'Both images must have float ...
27.969697
25.30303
async def close(self): '''Close the listening socket. This does not close any ServerSession objects created to handle incoming connections. ''' if self.server: self.server.close() await self.server.wait_closed() self.server = None
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "server", ":", "self", ".", "server", ".", "close", "(", ")", "await", "self", ".", "server", ".", "wait_closed", "(", ")", "self", ".", "server", "=", "None" ]
36.5
17.25
def create_new_migration_record(self): """ Create a new migration record for this migration set """ migration_record = self.migration_model( name=self.name, version=self.latest_migration) self.session.add(migration_record) self.session.commit()
[ "def", "create_new_migration_record", "(", "self", ")", ":", "migration_record", "=", "self", ".", "migration_model", "(", "name", "=", "self", ".", "name", ",", "version", "=", "self", ".", "latest_migration", ")", "self", ".", "session", ".", "add", "(", ...
34.222222
6.444444
def list_default_storage_policy_of_datastore(datastore, service_instance=None): ''' Returns a list of datastores assign the the storage policies. datastore Name of the datastore to assign. The datastore needs to be visible to the VMware entity the proxy points to. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 ''' log.trace('Listing the default storage policy of datastore \'%s\'', datastore) # Find datastore target_ref = _get_proxy_target(service_instance) ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, datastore_names=[datastore]) if not ds_refs: raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' 'found'.format(datastore)) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy = salt.utils.pbm.get_default_storage_policy_of_datastore( profile_manager, ds_refs[0]) return _get_policy_dict(policy)
[ "def", "list_default_storage_policy_of_datastore", "(", "datastore", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Listing the default storage policy of datastore \\'%s\\''", ",", "datastore", ")", "# Find datastore", "target_ref", "=", "_get_p...
40.793103
26.793103
def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
[ "def", "toListString", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters"...
47.444444
17.444444
def orbit_fit_residuals(discoveries, blockID='O13AE'): """Brett: What are relevant and stunning are the actual orbit fit residuals for orbits with d(a)/a<1%. I would be happy with a simple histogram of all those numbers, although the 'tail' (probably blended and not yet completely flagged) should perhaps be excluded. There will be a pretty clear gaussian I would think, peaking near 0.07". """ ra_residuals = [] dec_residuals = [] ressum = [] for i, orbit in enumerate(discoveries): if (orbit.da / orbit.a) < 0.01: print i, len(discoveries), orbit.observations[0].provisional_name, orbit.da / orbit.a res = orbit.residuals.split('\n') for r in res: rr = r.split(' ') if (len(rr) > 1) and not (rr[0].startswith('!')): ra_residuals.append(float(r.split(' ')[4])) dec_residuals.append(float(r.split(' ')[5])) ressum.append(1000 * (float(r.split(' ')[4]) ** 2 + float(r.split(' ')[5]) ** 2)) plt.figure(figsize=(6, 6)) bins = [r for r in range(-25, 150, 25)] n, bins, patches = plt.hist(ressum, histtype='step', color='b', bins=bins, label='$dra^{2}+ddec^{2}$') # midpoints = [r/1000. for r in range(-350,350,50)] # popt, pcov = curve_fit(gaussian, midpoints, n) # print 'a', popt[0], 'mu', popt[1], 'sigma', popt[2] # xm = np.linspace(-.375, .375, 100) # 100 evenly spaced points # plt.plot(xm, gaussian(xm, popt[0], popt[1], popt[2]), ls='--', color='#E47833', linewidth=2) # sigma = r'$\sigma = %.2f \pm %.2f$' % (popt[2], np.sqrt(pcov[2, 2])) # plt.annotate(sigma, (0.12, 50), color='#E47833') plt.xlim((-25, 150)) # sufficient for 13AE data plt.ylabel('observations of orbits with d(a)/a < 1% (i.e 3-5 month arcs)') plt.xlabel('orbit fit residuals (milliarcsec)') plt.legend() # clean_legend() plt.draw() outfile = 'orbit_fit_residuals_13AE_corrected' plt.savefig('/ossos_sequence/'+ outfile + '.pdf', transparent=True) return
[ "def", "orbit_fit_residuals", "(", "discoveries", ",", "blockID", "=", "'O13AE'", ")", ":", "ra_residuals", "=", "[", "]", "dec_residuals", "=", "[", "]", "ressum", "=", "[", "]", "for", "i", ",", "orbit", "in", "enumerate", "(", "discoveries", ")", ":",...
51.075
24.675
def process(self, instance, force=False): """Processing is triggered by field's pre_save method. It will be executed if field's value has been changed (known through descriptor and stashing logic) or if model instance has never been saved before, i.e. no pk set, because there is a chance that field was initialized through model's `__init__`, hence default value was stashed with pre_init handler. """ if self.should_process and (force or self.has_stashed_value): self.set_status(instance, {'state': 'busy'}) for d in filter(lambda d: d.has_processor(), self.dependencies): d.stash_previous_value(instance, d.get_value(instance)) try: if self.has_async: for d in filter(lambda d: not d.async and d.should_process(), self.dependencies): self._process(d, instance) async_handler = AsyncHandler(self, instance) async_handler.start() else: for d in filter(lambda d: d.should_process(), self.dependencies): self._process(d, instance) self.finished_processing(instance) except BaseException as e: self.failed_processing(instance, e) if not isinstance(e, ProcessingError): raise elif self.has_stashed_value: self.cleanup_stash()
[ "def", "process", "(", "self", ",", "instance", ",", "force", "=", "False", ")", ":", "if", "self", ".", "should_process", "and", "(", "force", "or", "self", ".", "has_stashed_value", ")", ":", "self", ".", "set_status", "(", "instance", ",", "{", "'st...
50.333333
19.066667
def findReference(self, name, cls=QtGui.QWidget): """ Looks up a reference from the widget based on its object name. :param name | <str> cls | <subclass of QtGui.QObject> :return <QtGui.QObject> || None """ ref_widget = self._referenceWidget if not ref_widget: return None if ref_widget.objectName() == name: return ref_widget return ref_widget.findChild(cls, name)
[ "def", "findReference", "(", "self", ",", "name", ",", "cls", "=", "QtGui", ".", "QWidget", ")", ":", "ref_widget", "=", "self", ".", "_referenceWidget", "if", "not", "ref_widget", ":", "return", "None", "if", "ref_widget", ".", "objectName", "(", ")", "...
32.375
13.25
def createproject(): """ compile for the platform we are running on """ os.chdir(build_dir) if windows_build: command = 'cmake -A {} -DPYTHON_EXECUTABLE:FILEPATH="{}" ../..'.format("win32" if bitness==32 else "x64", sys.executable) os.system(command) if bitness==64: for line in fileinput.input("_rhino3dm.vcxproj", inplace=1): print(line.replace("WIN32;", "WIN64;")) for line in fileinput.input("opennurbs_static.vcxproj", inplace=1): print(line.replace("WIN32;", "WIN64;")) #os.system("cmake --build . --config Release --target _rhino3dm") else: rv = os.system("cmake -DPYTHON_EXECUTABLE:FILEPATH={} ../..".format(sys.executable)) if int(rv) > 0: sys.exit(1)
[ "def", "createproject", "(", ")", ":", "os", ".", "chdir", "(", "build_dir", ")", "if", "windows_build", ":", "command", "=", "'cmake -A {} -DPYTHON_EXECUTABLE:FILEPATH=\"{}\" ../..'", ".", "format", "(", "\"win32\"", "if", "bitness", "==", "32", "else", "\"x64\""...
51.066667
26.533333
def _umis_cmd(data): """Return umis command line argument, with correct python and locale. """ return "%s %s %s" % (utils.locale_export(), utils.get_program_python("umis"), config_utils.get_program("umis", data["config"], default="umis"))
[ "def", "_umis_cmd", "(", "data", ")", ":", "return", "\"%s %s %s\"", "%", "(", "utils", ".", "locale_export", "(", ")", ",", "utils", ".", "get_program_python", "(", "\"umis\"", ")", ",", "config_utils", ".", "get_program", "(", "\"umis\"", ",", "data", "[...
49.166667
15.833333
def if_none_match(self): """ Get the if-none-match option of a request. :return: True, if if-none-match is present :rtype : bool """ for option in self.options: if option.number == defines.OptionRegistry.IF_NONE_MATCH.number: return True return False
[ "def", "if_none_match", "(", "self", ")", ":", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "number", "==", "defines", ".", "OptionRegistry", ".", "IF_NONE_MATCH", ".", "number", ":", "return", "True", "return", "False" ]
29.545455
15.363636
def saveCertPem(self, cert, path): ''' Save a certificate in PEM format to a file outside the certdir. ''' with s_common.genfile(path) as fd: fd.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
[ "def", "saveCertPem", "(", "self", ",", "cert", ",", "path", ")", ":", "with", "s_common", ".", "genfile", "(", "path", ")", "as", "fd", ":", "fd", ".", "write", "(", "crypto", ".", "dump_certificate", "(", "crypto", ".", "FILETYPE_PEM", ",", "cert", ...
40.166667
21.5
def _in_class(self, node): """Find if callable is function or method.""" # Move left one indentation level and check if that callable is a class indent = self._get_indent(node) for indent_dict in reversed(self._indent_stack): # pragma: no branch if (indent_dict["level"] < indent) or (indent_dict["type"] == "module"): return indent_dict["type"] == "class"
[ "def", "_in_class", "(", "self", ",", "node", ")", ":", "# Move left one indentation level and check if that callable is a class", "indent", "=", "self", ".", "_get_indent", "(", "node", ")", "for", "indent_dict", "in", "reversed", "(", "self", ".", "_indent_stack", ...
58.714286
21.142857
def upload_theme(): """ upload and/or update the theme with the current git state""" get_vars() with fab.settings(): local_theme_path = path.abspath( path.join(fab.env['config_base'], fab.env.instance.config['local_theme_path'])) rsync( '-av', '--delete', '%s/' % local_theme_path, '{{host_string}}:{themes_dir}/{ploy_theme_name}'.format(**AV) ) briefkasten_ctl('restart')
[ "def", "upload_theme", "(", ")", ":", "get_vars", "(", ")", "with", "fab", ".", "settings", "(", ")", ":", "local_theme_path", "=", "path", ".", "abspath", "(", "path", ".", "join", "(", "fab", ".", "env", "[", "'config_base'", "]", ",", "fab", ".", ...
34.214286
16.214286
def _get_base_command(self): """Returns the base command plus command-line options. Does not include input file, output file, and training set. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_arguments = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) jar_arguments = '-jar "%s"' % self._get_jar_fp() rdp_arguments = self._commandline_join( [self.Parameters[k] for k in self._options]) command_parts = [ cd_command, jvm_command, jvm_arguments, jar_arguments, rdp_arguments, '-q'] return self._commandline_join(command_parts).strip()
[ "def", "_get_base_command", "(", "self", ")", ":", "cd_command", "=", "''", ".", "join", "(", "[", "'cd '", ",", "str", "(", "self", ".", "WorkingDir", ")", ",", "';'", "]", ")", "jvm_command", "=", "\"java\"", "jvm_arguments", "=", "self", ".", "_comm...
41.882353
17.235294
def _soup_strings(soup): """Return text strings in soup.""" paragraph_tags = set([ "caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td", "div", "span" ]) skip_children = None for descendant in soup.descendants: # If we've treated a tag as a contiguous paragraph, don't re-emit the # children (see below). if skip_children is not None: try: in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test except RecursionError: # pylint: disable=undefined-variable # Possible for this check to hit a nasty infinite recursion because of # BeautifulSoup __eq__ checks. in_skip = True if in_skip: continue else: skip_children = None # Treat some tags as contiguous paragraphs, regardless of other tags nested # inside (like <a> or <b>). if isinstance(descendant, bs4.Tag): if descendant.name in paragraph_tags: if descendant.find_all(paragraph_tags): # If there are nested paragraph tags, don't treat it as a single # contiguous tag. continue skip_children = list(descendant.descendants) text = " ".join(descendant.get_text(" ", strip=True).split()) if text: yield text continue if (isinstance(descendant, bs4.Comment) or not isinstance(descendant, bs4.NavigableString)): continue text = " ".join(descendant.strip().split()) if text: yield text
[ "def", "_soup_strings", "(", "soup", ")", ":", "paragraph_tags", "=", "set", "(", "[", "\"caption\"", ",", "\"details\"", ",", "\"h1\"", ",", "\"h2\"", ",", "\"h3\"", ",", "\"h4\"", ",", "\"h5\"", ",", "\"h6\"", ",", "\"li\"", ",", "\"p\"", ",", "\"td\""...
33.636364
21.863636
def find_split_node(mst_wood, i): """ Parameters ---------- mst_wood : list of dictionarys i : int Number of the stroke where one mst gets split Returns ------- tuple : (mst index, node index) """ for mst_index, mst in enumerate(mst_wood): if i in mst['strokes']: return (mst_index, mst['strokes'].index(i)) raise ValueError('%i was not found as stroke index.' % i)
[ "def", "find_split_node", "(", "mst_wood", ",", "i", ")", ":", "for", "mst_index", ",", "mst", "in", "enumerate", "(", "mst_wood", ")", ":", "if", "i", "in", "mst", "[", "'strokes'", "]", ":", "return", "(", "mst_index", ",", "mst", "[", "'strokes'", ...
25.294118
17.294118
def _match_vcs_scheme(url): # type: (str) -> Optional[str] """Look for VCS schemes in the URL. Returns the matched VCS scheme, or None if there's no match. """ from pipenv.patched.notpip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': return scheme return None
[ "def", "_match_vcs_scheme", "(", "url", ")", ":", "# type: (str) -> Optional[str]", "from", "pipenv", ".", "patched", ".", "notpip", ".", "_internal", ".", "vcs", "import", "VcsSupport", "for", "scheme", "in", "VcsSupport", ".", "schemes", ":", "if", "url", "....
34.636364
16.272727
def compress_pdf(filepath, output_path, ghostscript_binary): """Compress a single PDF file. Args: filepath (str): Path to the PDF file. output_path (str): Output path. ghostscript_binary (str): Name/alias of the Ghostscript binary. Raises: ValueError FileNotFoundError """ if not filepath.endswith(PDF_EXTENSION): raise ValueError("Filename must end with .pdf!\n%s does not." % filepath) try: file_size = os.stat(filepath).st_size if file_size < FILE_SIZE_LOWER_LIMIT: LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT)) process = subprocess.Popen(['cp', filepath, output_path]) else: LOGGER.info(COMPRESSING.format(filepath)) process = subprocess.Popen( [ghostscript_binary, "-sDEVICE=pdfwrite", "-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook", "-dNOPAUSE", "-dQUIET", "-dBATCH", "-sOutputFile=%s" % output_path, filepath] ) except FileNotFoundError: msg = GS_NOT_INSTALLED.format(ghostscript_binary) raise FileNotFoundError(msg) process.communicate() LOGGER.info(FILE_DONE.format(output_path))
[ "def", "compress_pdf", "(", "filepath", ",", "output_path", ",", "ghostscript_binary", ")", ":", "if", "not", "filepath", ".", "endswith", "(", "PDF_EXTENSION", ")", ":", "raise", "ValueError", "(", "\"Filename must end with .pdf!\\n%s does not.\"", "%", "filepath", ...
39.125
18.53125
def add_attachment(message, attachment, rfc2231=True): '''Attach an attachment to a message as a side effect. Arguments: message: MIMEMultipart instance. attachment: Attachment instance. ''' data = attachment.read() part = MIMEBase('application', 'octet-stream') part.set_payload(data) encoders.encode_base64(part) filename = attachment.name if rfc2231 else Header(attachment.name).encode() part.add_header('Content-Disposition', 'attachment', filename=filename) message.attach(part)
[ "def", "add_attachment", "(", "message", ",", "attachment", ",", "rfc2231", "=", "True", ")", ":", "data", "=", "attachment", ".", "read", "(", ")", "part", "=", "MIMEBase", "(", "'application'", ",", "'octet-stream'", ")", "part", ".", "set_payload", "(",...
32.117647
19.294118
def get(company='', company_uri=''): """Performs a HTTP GET for a glassdoor page and returns json""" if not company and not company_uri: raise Exception("glassdoor.gd.get(company='', company_uri=''): "\ " company or company_uri required") payload = {} if not company_uri: payload.update({'clickSource': 'searchBtn', 'sc.keyword': company }) uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL) else: uri = '%s%s' % (GLASSDOOR_API, company_uri) r = requests.get(uri, params=payload) soup = BeautifulSoup(r.content) results = parse(soup) return results
[ "def", "get", "(", "company", "=", "''", ",", "company_uri", "=", "''", ")", ":", "if", "not", "company", "and", "not", "company_uri", ":", "raise", "Exception", "(", "\"glassdoor.gd.get(company='', company_uri=''): \"", "\" company or company_uri required\"", ")", ...
39.411765
13.470588
def _read_py(self, fin_py, get_goids_only, exclude_ungrouped): """Read Python sections file. Store: section2goids sections_seen. Return goids_fin.""" goids_sec = [] with open(fin_py) as istrm: section_name = None for line in istrm: mgo = self.srch_py_goids.search(line) # Matches GO IDs in sections if mgo: goids_sec.append(mgo.group(2)) elif not get_goids_only and "[" in line: msec = self.srch_py_section.search(line) # Matches sections if msec: secstr = msec.group(3) if section_name is not None and goids_sec: self.section2goids[section_name] = goids_sec if not exclude_ungrouped or secstr != HdrgosSections.secdflt: section_name = secstr self.sections_seen.append(section_name) else: section_name = None goids_sec = [] if section_name is not None and goids_sec: self.section2goids[section_name] = goids_sec return goids_sec
[ "def", "_read_py", "(", "self", ",", "fin_py", ",", "get_goids_only", ",", "exclude_ungrouped", ")", ":", "goids_sec", "=", "[", "]", "with", "open", "(", "fin_py", ")", "as", "istrm", ":", "section_name", "=", "None", "for", "line", "in", "istrm", ":", ...
51.416667
17.5
def add_clause(self, clause, no_return=True): """ Add a new clause to solver's internal formula. """ if self.lingeling: pysolvers.lingeling_add_cl(self.lingeling, clause)
[ "def", "add_clause", "(", "self", ",", "clause", ",", "no_return", "=", "True", ")", ":", "if", "self", ".", "lingeling", ":", "pysolvers", ".", "lingeling_add_cl", "(", "self", ".", "lingeling", ",", "clause", ")" ]
30.428571
14.142857
def MergeAttributeContainers( self, callback=None, maximum_number_of_containers=0): """Reads attribute containers from a task storage file into the writer. Args: callback (function[StorageWriter, AttributeContainer]): function to call after each attribute container is deserialized. maximum_number_of_containers (Optional[int]): maximum number of containers to merge, where 0 represent no limit. Returns: bool: True if the entire task storage file has been merged. Raises: RuntimeError: if the add method for the active attribute container type is missing. OSError: if the task storage file cannot be deleted. ValueError: if the maximum number of containers is a negative value. """ if maximum_number_of_containers < 0: raise ValueError('Invalid maximum number of containers') if not self._cursor: self._Open() self._ReadStorageMetadata() self._container_types = self._GetContainerTypes() number_of_containers = 0 while self._active_cursor or self._container_types: if not self._active_cursor: self._PrepareForNextContainerType() if maximum_number_of_containers == 0: rows = self._active_cursor.fetchall() else: number_of_rows = maximum_number_of_containers - number_of_containers rows = self._active_cursor.fetchmany(size=number_of_rows) if not rows: self._active_cursor = None continue for row in rows: identifier = identifiers.SQLTableIdentifier( self._active_container_type, row[0]) if self._compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[1]) else: serialized_data = row[1] attribute_container = self._DeserializeAttributeContainer( self._active_container_type, serialized_data) attribute_container.SetIdentifier(identifier) if self._active_container_type == self._CONTAINER_TYPE_EVENT_TAG: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, attribute_container.event_row_identifier) attribute_container.SetEventIdentifier(event_identifier) del attribute_container.event_row_identifier if callback: callback(self._storage_writer, attribute_container) self._add_active_container_method(attribute_container) number_of_containers += 1 if (maximum_number_of_containers != 0 and number_of_containers >= maximum_number_of_containers): return False self._Close() os.remove(self._path) return True
[ "def", "MergeAttributeContainers", "(", "self", ",", "callback", "=", "None", ",", "maximum_number_of_containers", "=", "0", ")", ":", "if", "maximum_number_of_containers", "<", "0", ":", "raise", "ValueError", "(", "'Invalid maximum number of containers'", ")", "if",...
33.443038
23.265823
def markers(): ''' Prints a list of valid marker types for scatter() Returns: None ''' print("Available markers: \n\n - " + "\n - ".join(list(MarkerType))) print() print("Shortcuts: \n\n" + "\n".join(" %r: %s" % item for item in _MARKER_SHORTCUTS.items()))
[ "def", "markers", "(", ")", ":", "print", "(", "\"Available markers: \\n\\n - \"", "+", "\"\\n - \"", ".", "join", "(", "list", "(", "MarkerType", ")", ")", ")", "print", "(", ")", "print", "(", "\"Shortcuts: \\n\\n\"", "+", "\"\\n\"", ".", "join", "(", "\...
31.222222
32.111111
def clear_reply_handlers_by_message_id(self, message_id): """ Clears all callback functions registered by register_for_reply() and register_for_reply_by_message_id(). :param message_id: The message id for which we want to clear reply handlers """ self.reply_handlers[message_id] = [] if self.reply_saver is not None: self.reply_saver.start_save_timer()
[ "def", "clear_reply_handlers_by_message_id", "(", "self", ",", "message_id", ")", ":", "self", ".", "reply_handlers", "[", "message_id", "]", "=", "[", "]", "if", "self", ".", "reply_saver", "is", "not", "None", ":", "self", ".", "reply_saver", ".", "start_s...
40.5
22.3
def setvcpus(vm_, vcpus): ''' Changes the amount of vcpus allocated to VM. vcpus is an int representing the number to be assigned CLI Example: .. code-block:: bash salt '*' virt.setvcpus myvm 2 ''' with _get_xapi_session() as xapi: vm_uuid = _get_label_uuid(xapi, 'VM', vm_) if vm_uuid is False: return False try: xapi.VM.set_VCPUs_number_live(vm_uuid, vcpus) return True except Exception: return False
[ "def", "setvcpus", "(", "vm_", ",", "vcpus", ")", ":", "with", "_get_xapi_session", "(", ")", "as", "xapi", ":", "vm_uuid", "=", "_get_label_uuid", "(", "xapi", ",", "'VM'", ",", "vm_", ")", "if", "vm_uuid", "is", "False", ":", "return", "False", "try"...
23.904762
21.142857
def build_docx(path_jinja2, template_name, path_outfile, template_kwargs=None): '''Helper function for building a docx file from a latex jinja2 template :param path_jinja2: the root directory for latex jinja2 templates :param template_name: the relative path, to path_jinja2, to the desired jinja2 Latex template :param path_outfile: the full path to the desired final output file Must contain the same file extension as files generated by cmd_wo_infile, otherwise the process will fail :param template_kwargs: a dictionary of key/values for jinja2 variables ''' latex_template_object = LatexBuild( path_jinja2, template_name, template_kwargs, ) return latex_template_object.build_docx(path_outfile)
[ "def", "build_docx", "(", "path_jinja2", ",", "template_name", ",", "path_outfile", ",", "template_kwargs", "=", "None", ")", ":", "latex_template_object", "=", "LatexBuild", "(", "path_jinja2", ",", "template_name", ",", "template_kwargs", ",", ")", "return", "la...
46.352941
24.470588
def SearchFileNameTable(self, fileName): """ Search FileName table. Find the show id for a given file name. Parameters ---------- fileName : string File name to look up in table. Returns ---------- int or None If a match is found in the database table the show id for this entry is returned, otherwise this returns None. """ goodlogging.Log.Info("DB", "Looking up filename string '{0}' in database".format(fileName), verbosity=self.logVerbosity) queryString = "SELECT ShowID FROM FileName WHERE FileName=?" queryTuple = (fileName, ) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: goodlogging.Log.Info("DB", "No match found in database for '{0}'".format(fileName), verbosity=self.logVerbosity) return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info("DB", "Found file name match: {0}".format(result), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
[ "def", "SearchFileNameTable", "(", "self", ",", "fileName", ")", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"DB\"", ",", "\"Looking up filename string '{0}' in database\"", ".", "format", "(", "fileName", ")", ",", "verbosity", "=", "self", ".", "logVer...
34.5
27.970588
def _validate_non_edges_do_not_have_edge_like_properties(class_name, properties): """Validate that non-edges do not have the in/out properties defined.""" has_source = EDGE_SOURCE_PROPERTY_NAME in properties has_destination = EDGE_DESTINATION_PROPERTY_NAME in properties if has_source or has_destination: raise IllegalSchemaStateError(u'Found a non-edge class that defines edge-like "in" or ' u'"out" properties: {} {}'.format(class_name, properties)) for property_name, property_descriptor in six.iteritems(properties): if property_descriptor.type_id == PROPERTY_TYPE_LINK_ID: raise IllegalSchemaStateError(u'Non-edge class "{}" has a property of type Link, this ' u'is not allowed: {}'.format(class_name, property_name))
[ "def", "_validate_non_edges_do_not_have_edge_like_properties", "(", "class_name", ",", "properties", ")", ":", "has_source", "=", "EDGE_SOURCE_PROPERTY_NAME", "in", "properties", "has_destination", "=", "EDGE_DESTINATION_PROPERTY_NAME", "in", "properties", "if", "has_source", ...
64.615385
34.615385
def remove_part_images(filename): """Remove PART(#)_files directory containing images from disk.""" dirname = '{0}_files'.format(os.path.splitext(filename)[0]) if os.path.exists(dirname): shutil.rmtree(dirname)
[ "def", "remove_part_images", "(", "filename", ")", ":", "dirname", "=", "'{0}_files'", ".", "format", "(", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":",...
45.2
9.8
def download_file_insecure(url, target): """Use Python to download the file, without connection authentication.""" src = urlopen(url) try: # Read all the data in one block. data = src.read() finally: src.close() # Write all the data in one block to avoid creating a partial file. with open(target, "wb") as dst: dst.write(data)
[ "def", "download_file_insecure", "(", "url", ",", "target", ")", ":", "src", "=", "urlopen", "(", "url", ")", "try", ":", "# Read all the data in one block.", "data", "=", "src", ".", "read", "(", ")", "finally", ":", "src", ".", "close", "(", ")", "# Wr...
31.083333
17.333333
def gen_params(raw_params): u""" Generator that yields tuples of (name, default_value) for each parameter in the list If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None')) """ assert raw_params[0].type == token.STAR and len(raw_params) > 2 curr_idx = 2 # the first place a keyword-only parameter name can be is index 2 max_idx = len(raw_params) while curr_idx < max_idx: curr_item = raw_params[curr_idx] prev_item = curr_item.prev_sibling if curr_item.type != token.NAME: curr_idx += 1 continue if prev_item is not None and prev_item.type == token.DOUBLESTAR: break name = curr_item.value nxt = curr_item.next_sibling if nxt is not None and nxt.type == token.EQUAL: default_value = nxt.next_sibling curr_idx += 2 else: default_value = None yield (name, default_value) curr_idx += 1
[ "def", "gen_params", "(", "raw_params", ")", ":", "assert", "raw_params", "[", "0", "]", ".", "type", "==", "token", ".", "STAR", "and", "len", "(", "raw_params", ")", ">", "2", "curr_idx", "=", "2", "# the first place a keyword-only parameter name can be is ind...
39
16.08