code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return sum([self.counters[key] for key in self.messages if key.startswith(prefix)])
def get_count(self, prefix='')
Return the total count of errors and warnings.
8.974235
6.570931
1.365748
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) for key in sorted(self.messages) if key.startswith(prefix)]
def get_statistics(self, prefix='')
Get statistics for message codes that start with the prefix. prefix='' matches all errors and warnings prefix='E' matches all errors prefix='W' matches all warnings prefix='E4' matches all errors that have to do with imports
5.436575
4.814425
1.129226
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) if self.elapsed: for key in self._benchmark_keys: print('%-7d %s per second (%d total)' % (self.counters[key] / self.elapsed, key, self.counters[key]))
def print_benchmark(self)
Print benchmark numbers.
5.267221
4.771887
1.103803
self._deferred_print = [] return super(StandardReport, self).init_file( filename, lines, expected, line_offset)
def init_file(self, filename, lines, expected, line_offset)
Signal a new file.
6.60777
6.540308
1.010315
code = super(StandardReport, self).error(line_number, offset, text, check) if code and (self.counters[code] == 1 or self._repeat): self._deferred_print.append( (line_number, offset, code, text[5:], check.__doc__)) ...
def error(self, line_number, offset, text, check)
Report an error, according to options.
6.77175
6.323921
1.070815
self._deferred_print.sort() for line_number, offset, code, text, doc in self._deferred_print: print(self._fmt % { 'path': self.filename, 'row': self.line_offset + line_number, 'col': offset + 1, 'code': code, 'text': text, ...
def get_file_results(self)
Print the result and return the overall count for this file.
6.173759
5.906509
1.045247
self.options.report = (reporter or self.options.reporter)(self.options) return self.options.report
def init_report(self, reporter=None)
Initialize the report instance.
5.68323
4.977983
1.141673
if paths is None: paths = self.paths report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) elif not self.excluded(p...
def check_files(self, paths=None)
Run all checks on the paths.
3.921873
3.736043
1.04974
if self.options.verbose: print('checking %s' % filename) fchecker = self.checker_class( filename, lines=lines, options=self.options) return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_file(self, filename, lines=None, expected=None, line_offset=0)
Run all checks on a Python source file.
3.784238
3.37305
1.121904
dirname = dirname.rstrip('/') if self.excluded(dirname): return 0 counters = self.options.report.counters verbose = self.options.verbose filepatterns = self.options.filename runner = self.runner for root, dirs, files in os.walk(dirname): ...
def input_dir(self, dirname)
Check all files in this directory and all subdirectories.
4.52662
4.449329
1.017371
if not self.options.exclude: return False basename = os.path.basename(filename) if filename_match(basename, self.options.exclude): return True if parent: filename = os.path.join(parent, filename) filename = os.path.abspath(filename) ...
def excluded(self, filename, parent=None)
Check if the file should be excluded. Check if 'options.exclude' contains a pattern that matches filename.
2.408444
2.241351
1.07455
if len(code) < 4 and any(s.startswith(code) for s in self.options.select): return False return (code.startswith(self.options.ignore) and not code.startswith(self.options.select))
def ignore_code(self, code)
Check if the error code should be ignored. If 'options.select' contains a prefix of the error code, return False. Else, if 'options.ignore' contains a prefix of the error code, return True.
5.303097
3.476019
1.525624
checks = [] for check, attrs in _checks[argument_name].items(): (codes, args) = attrs if any(not (code and self.ignore_code(code)) for code in codes): checks.append((check.__name__, check, args)) return sorted(checks)
def get_checks(self, argument_name)
Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests.
6.282911
6.620252
0.949044
if use_datetime and not datetime: raise ValueError("the datetime module is not available") if FastParser and FastUnmarshaller: if use_datetime: mkdatetime = _datetime_type else: mkdatetime = _datetime target = FastUnmarshaller(True, False, _binary, mk...
def getparser(use_datetime=0)
getparser() -> parser, unmarshaller Create an instance of the fastest available parser, and attach it to an unmarshalling object. Return both objects.
4.568836
4.028685
1.134076
p, u = getparser(use_datetime=use_datetime) p.feed(data) p.close() return u.close(), u.getmethodname()
def loads(data, use_datetime=0)
data -> unmarshalled data, method name Convert an XML-RPC packet to unmarshalled data plus a method name (None if not present). If the XML-RPC packet represents a fault condition, this function raises a Fault exception.
6.906059
6.317891
1.093096
import_mod = results.get("module") pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_m...
def transform_import(self, node, results)
Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements.
5.91541
5.643104
1.048255
mod_member = results.get("mod_member") pref = mod_member.prefix member = results.get("member") # Simple case with only a single member being imported if member: # this may be a list of length one, or just a node if isinstance(member, list): ...
def transform_member(self, node, results)
Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module.
3.068382
2.982442
1.028815
module_dot = results.get("bare_with_attr") member = results.get("member") new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if member.value in change[1]: new_name = change[0] ...
def transform_dot(self, node, results)
Transform for calls to module members in code.
5.218267
4.649691
1.122283
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select', 'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer', 'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver'] for name in patched: try: __imp...
def protect_libraries_from_patching()
In this function we delete some modules from `sys.modules` dictionary and import them again inside `_pydev_saved_modules` in order to save their original copies there. After that we can use these saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
3.171526
2.961223
1.071019
''' Note: the difference from get_current_thread_id to get_thread_id is that for the current thread we can get the thread id while the thread.ident is still not set in the Thread instance. ''' try: # Fast path without getting lock. tid = thread.__pydevd_id__ if tid is Non...
def get_current_thread_id(thread)
Note: the difference from get_current_thread_id to get_thread_id is that for the current thread we can get the thread id while the thread.ident is still not set in the Thread instance.
9.103081
5.566024
1.635473
''' To be used as a decorator @call_only_once def func(): print 'Calling func only this time' Actually, in PyDev it must be called as: func = call_only_once(func) to support older versions of Python. ''' def new_func(*args, **kwargs): if not new_func._called: ...
def call_only_once(func)
To be used as a decorator @call_only_once def func(): print 'Calling func only this time' Actually, in PyDev it must be called as: func = call_only_once(func) to support older versions of Python.
4.187099
1.850786
2.262335
pkg = __import__(fixer_pkg, [], [], ["*"]) fixer_dir = os.path.dirname(pkg.__file__) fix_names = [] for name in sorted(os.listdir(fixer_dir)): if name.startswith("fix_") and name.endswith(".py"): if remove_prefix: name = name[4:] fix_names.append(name...
def get_all_fix_names(fixer_pkg, remove_prefix=True)
Return a sorted list of all available fix names in the given package.
2.09791
1.971956
1.063873
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): # NodePatters must either have no type and no content # or a type and content -- so they don't get any farther # Always return leafs if pat.type is None: raise _EveryNode return set([pat.type]) ...
def _get_head_types(pat)
Accepts a pytree Pattern Node and returns a set of the pattern types which will match first.
5.529945
5.243286
1.054672
head_nodes = collections.defaultdict(list) every = [] for fixer in fixer_list: if fixer.pattern: try: heads = _get_head_types(fixer.pattern) except _EveryNode: every.append(fixer) else: for node_type in heads: ...
def _get_headnode_dict(fixer_list)
Accepts a list of fixers and returns a dictionary of head node type --> fixer list.
2.928421
2.820871
1.038126
pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ["*"]) fix_name = fix_mod_path.rsplit(".", 1)[-1] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(se...
def get_fixers(self)
Inspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal.
2.321453
2.22692
1.04245
if args: msg = msg % args self.logger.info(msg)
def log_message(self, msg, *args)
Hook to log a message.
3.746071
3.704981
1.011091
for dir_or_file in items: if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file, write, doctests_only) else: self.refactor_file(dir_or_file, write, doctests_only)
def refactor(self, items, write=False, doctests_only=False)
Refactor a list of files and directories.
1.822135
1.71957
1.059646
py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: if (not name.startswith(".") and ...
def refactor_dir(self, dir_name, write=False, doctests_only=False)
Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped.
2.724752
2.67408
1.018949
try: f = open(filename, "rb") except IOError as err: self.log_error("Can't open %s: %s", filename, err) return None, None try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_wit...
def _read_python_source(self, filename)
Do our best to decode a Python source file correctly.
3.203938
3.029844
1.05746
input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return input += u"\n" # Silence certain parse errors if doctests_only: self.log_debug("Refactoring doctests in %s", filename) output ...
def refactor_file(self, filename, write=False, doctests_only=False)
Refactors a file.
3.743681
3.738926
1.001272
features = _detect_future_features(data) if "print_function" in features: self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s...
def refactor_string(self, data, name)
Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse.
4.507891
3.856506
1.168905
for fixer in chain(self.pre_order, self.post_order): fixer.start_tree(tree, name) #use traditional matching for the incompatible fixers self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) self.traverse_by(self.bmi_post_order_heads, tree.post_order()) ...
def refactor_tree(self, tree, name)
Refactors a parse tree (modifying the tree in place). For compatible patterns the bottom matcher module is used. Otherwise the tree is traversed node-to-node for matches. Args: tree: a pytree.Node instance representing the root of the tree to be refactored...
4.154745
4.076144
1.019283
if not fixers: return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: n...
def traverse_by(self, fixers, traversal)
Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None
3.376168
3.807328
0.886755
self.files.append(filename) if old_text is None: old_text = self._read_python_source(filename)[0] if old_text is None: return equal = old_text == new_text self.print_output(old_text, new_text, filename, equal) if equal: ...
def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None)
Called when a file has been refactored and there may be changes.
2.969044
2.861391
1.037623
try: f = _open_with_encoding(filename, "w", encoding=encoding) except os.error as err: self.log_error("Can't create %s: %s", filename, err) return try: f.write(_to_system_newlines(new_text)) except os.error as err: self...
def write_file(self, new_text, filename, old_text, encoding=None)
Writes a string to a file. It first shows a unified diff between the old text and the new text, and then rewrites the file; the latter is only done if the write option is set.
2.552113
2.736359
0.932668
result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refac...
def refactor_docstring(self, input, filename)
Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't us...
2.10301
2.085226
1.008528
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) tree.future_features = frozenset() return tree
def parse_block(self, block, lineno, indent)
Parses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree.
13.485756
13.407404
1.005844
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) for type, value, (line0, col0), (line1, col1), line_text in tokens: line0 += lineno - 1 line1 += lineno - 1 # Don't bother updating the columns; this is too complicated # since ...
def wrap_toks(self, block, lineno, indent)
Wraps a tokenize stream to systematically modify start/end.
6.614462
6.361721
1.039728
prefix1 = indent + self.PS1 prefix2 = indent + self.PS2 prefix = prefix1 for line in block: if line.startswith(prefix): yield line[len(prefix):] elif line == prefix.rstrip() + u"\n": yield u"\n" else: ...
def gen_lines(self, block, indent)
Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line.
3.69033
2.893183
1.275526
xml = "" keys = dict_keys(frame_f_locals) if hasattr(keys, 'sort'): keys.sort() # Python 3.0 does not have it else: keys = sorted(keys) # Jython 2.1 does not have it return_values_xml = '' for k in keys: try: v = frame_f_locals[k] eval_fu...
def frame_vars_to_xml(frame_f_locals, hidden_ns=None)
dumps frame variables to XML <var name="var_name" scope="local" type="type" value="value"/>
3.958286
3.934541
1.006035
type_name, type_qualifier, is_exception_on_eval, resolver, value = get_variable_details( val, evaluate_full_value) try: name = quote(name, '/>_= ') # TODO: Fix PY-5834 without using quote except: pass xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_...
def var_to_xml(val, name, trim_if_too_big=True, additional_in_xml='', evaluate_full_value=True)
single variable or dictionary to xml representation
3.542504
3.533328
1.002597
".exploitable - Determine the approximate exploitability rating" from winappdbg import Crash event = self.debug.lastEvent crash = Crash(event) crash.fetch_extra_data(event) status, rule, description = crash.isExploitable() print "-" * 79 print "Exploitability: %s" % status print ...
def do(self, arg)
.exploitable - Determine the approximate exploitability rating
9.202699
6.166508
1.492368
self.device = None self.doc = None self.parser = None self.resmgr = None self.interpreter = None
def _cleanup(self)
Frees lots of non-textual information, such as the fonts and images and the objects that were needed to parse the PDF.
9.33401
6.698015
1.393549
if clean: return utils.normalise_whitespace(''.join(self).replace('\n', ' ')) else: return ''.join(self)
def text(self, clean=True)
Returns the text of the PDF as a single string. Options: :clean: Removes misc cruft, like lots of whitespace.
4.321243
5.168847
0.836017
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower()
def camelToSnake(s)
https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm..
1.600899
1.583867
1.010753
type_ = locate(app_name) if inspect.isclass(type_): return type_.name return app_name
def get_app_name(app_name)
Returns a app name from new app config if is a class or the same app name if is not a class.
5.324897
5.273578
1.009731
if user and user.is_superuser: return True if not isinstance(roles, list): roles = [roles] normalized_roles = [] for role in roles: if not inspect.isclass(role): role = RolesManager.retrieve_role(role) normalized_roles.append(role) user_roles = ge...
def has_role(user, roles)
Check if a user has any of the given roles.
2.660582
2.631712
1.01097
if user and user.is_superuser: return True return permission_name in available_perm_names(user)
def has_permission(user, permission_name)
Check if a user has a given permission.
6.507121
6.426465
1.012551
if user and user.is_superuser: return True checker = PermissionsManager.retrieve_checker(checker_name) user_roles = get_user_roles(user) if not user_roles: user_roles = [None] return any([checker(user_role, user, obj) for user_role in user_roles])
def has_object_permission(checker_name, user, obj)
Check if a user has permission to perform an action on an object.
3.308292
3.141878
1.052966
user_ct = ContentType.objects.get_for_model(get_user_model()) return Permission.objects.get_or_create(content_type=user_ct, codename=codename, defaults={'name': name(codename) if callable(name) else name})
def get_or_create_permission(codename, name=camel_or_snake_to_title)
Get a Permission object from a permission name. @:param codename: permission code name @:param name: human-readable permissions name (str) or callable that takes codename as argument and returns str
2.35345
2.452985
0.959423
if user: groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related. roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names()) return sorted(roles, key=lambda r: r.get_name() ) else: ...
def get_user_roles(user)
Get a list of a users's roles.
6.04478
6.179418
0.978212
roles = get_user_roles(user) for role in roles: role.remove_role_from_user(user) return roles
def clear_roles(user)
Remove all roles from a user.
3.684204
3.758567
0.980215
roles = get_user_roles(user) permission_hash = {} for role in roles: permission_names = role.permission_names_list() for permission_name in permission_names: permission_hash[permission_name] = get_permission( permission_name) in user.user_permissions.all() ...
def available_perm_status(user)
Get a boolean map of the permissions available to a user based on that user's roles.
3.331395
3.054186
1.090764
roles = get_user_roles(user) perm_names = set(p for role in roles for p in role.permission_names_list()) return [p.codename for p in user.user_permissions.all() if p.codename in perm_names] \ if roles else []
def available_perm_names(user)
Return a list of permissions codenames available to a user, based on that user's roles. i.e., keys for all "True" permissions from available_perm_status(user): Assert: set(available_perm_names(user)) == set(perm for perm,has_perm in available_perm_status(user) if has_perm) Query efficient; especiall...
4.359984
4.256854
1.024227
roles = get_user_roles(user) for role in roles: if permission_name in role.permission_names_list(): permission = get_permission(permission_name) user.user_permissions.add(permission) return raise RolePermissionScopeException( "This permission isn't ...
def grant_permission(user, permission_name)
Grant a user a specified permission. Permissions are only granted if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised.
4.754414
3.404034
1.3967
roles = get_user_roles(user) for role in roles: if permission_name in role.permission_names_list(): permission = get_permission(permission_name) user.user_permissions.remove(permission) return raise RolePermissionScopeException( "This permission isn...
def revoke_permission(user, permission_name)
Revoke a specified permission from a user. Permissions are only revoked if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised.
4.74712
3.511772
1.351774
if dataset_name in classification_dataset_names: data_type = 'classification' elif dataset_name in regression_dataset_names: data_type = 'regression' else: raise ValueError('Dataset not found in PMLB.') dataset_url = '{GITHUB_URL}/{DATA_TYPE}/{DATASET_NAME}/{DATASET_NAME}{S...
def fetch_data(dataset_name, return_X_y=False, local_cache_dir=None)
Download a data set from the PMLB, (optionally) store it locally, and return the data set. You must be connected to the internet if you are fetching a data set that is not cached locally. Parameters ---------- dataset_name: str The name of the data set to load from PMLB. return_X_y: bool (...
1.953696
1.95656
0.998536
if not data: return 0 #imb - shows measure of inbalance within a dataset imb = 0 num_classes=float(len(Counter(data))) for x in Counter(data).values(): p_x = float(x)/len(data) if p_x > 0: imb += (p_x - 1/num_classes)*(p_x - 1/num_classes) #worst case sc...
def imbalance_metrics(data)
Computes imbalance metric for a given dataset. Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact). :param data : pandas.DataFrame A dataset in a panda's data frame :returns int A value of imbalance metric, where zero means that the ...
4.814025
4.718541
1.020236
counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes).groups.items()} if(len(features.groupby('class').apply(list))==2): return('binary') if ('float64' in counter): return ('float') return ('integer')
def determine_endpoint_type(features)
Determines the type of an endpoint :param features: pandas.DataFrame A dataset in a panda's data frame :returns string string with a name of a dataset
6.480418
7.198895
0.900196
counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes)} binary=0 if ('int64' in counter): binary=len(set(features.loc[:, (features<=1).all(axis=0)].columns.values) & set(features.loc[:, (features>=0).all(axis=0)].columns.values) ...
def count_features_type(features)
Counts three different types of features (float, integer, binary). :param features: pandas.DataFrame A dataset in a panda's data frame :returns a tuple (binary, integer, float)
3.85414
3.87371
0.994948
assert (local_cache_dir!=None) readme_file = open(os.path.join(local_cache_dir,'datasets',dataset_name,'README.md'), 'wt') try: df = fetch_data(dataset_name) fnames = [col for col in df.columns if col!='class'] #determine all required values types = get_types(df.ix[...
def generate_description(dataset_name, local_cache_dir=None)
Generates desription for a given dataset in its README.md file in a dataset local_cache_dir file. :param dataset_name: str The name of the data set to load from PMLB. :param local_cache_dir: str (required) The directory on your local machine to store the data files. If None, then th...
3.187881
3.162606
1.007992
report_filename = open(os.path.join(local_cache_dir, 'report.csv'), 'wt') assert (local_cache_dir!=None) try: writer = csv.writer(report_filename, delimiter='\t') writer.writerow(['Dataset','#instances','#features','#binary_features','#integer_features','#float_features',\ ...
def generate_pmlb_summary(local_cache_dir=None)
Generates a summary report for all dataset in PMLB :param local_cache_dir: str (required) The directory on your local machine to store the data files.
3.815614
3.835926
0.994705
response = requests.post( url=target, data=json.dumps(payload, cls=DjangoJSONEncoder), headers={'Content-Type': 'application/json'} ) if response.status_code == 410 and hook_id: HookModel = get_hook_model() hook = HookModel.ob...
def run(self, target, payload, instance=None, hook_id=None, **kwargs)
target: the url to receive the payload. payload: a python primitive data structure instance: a possibly null "trigger" instance hook: the defining Hook object (useful for removing)
2.732375
2.735111
0.999
opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) action = 'created' if created else 'updated' distill_model_event(instance, model, action)
def model_saved(sender, instance, created, raw, using, **kwargs)
Automatically triggers "created" and "updated" actions.
4.475577
4.230914
1.057828
opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) distill_model_event(instance, model, 'deleted')
def model_deleted(sender, instance, using, **kwargs)
Automatically triggers "deleted" actions.
5.973723
5.712378
1.045751
opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) distill_model_event(instance, model, action, user_override=user)
def custom_action(sender, action, instance, user=None, **kwargs)
Manually trigger a custom action (or even a standard action).
6.766157
6.632455
1.020159
HookModel = get_hook_model() hooks = HookModel.objects.filter(user=user, event=event_name) for hook in hooks: new_payload = payload if send_hook_meta: new_payload = { 'hook': hook.dict(), 'data': payload } hook.deliver_ho...
def raw_custom_event(sender, event_name, payload, user, send_hook_meta=True, instance=None, **kwargs)
Give a full payload
3.53952
3.630169
0.975029
if self.event not in HOOK_EVENTS.keys(): raise ValidationError( "Invalid hook event {evt}.".format(evt=self.event) )
def clean(self)
Validation for events.
8.275844
6.313243
1.31087
if getattr(instance, 'serialize_hook', None) and callable(instance.serialize_hook): return instance.serialize_hook(hook=self) if getattr(settings, 'HOOK_SERIALIZER', None): serializer = get_module(settings.HOOK_SERIALIZER) return serializer(instance, hook=sel...
def serialize_hook(self, instance)
Serialize the object down to Python primitives. By default it uses Django's built in serializer.
3.313737
3.248226
1.020168
payload = payload_override or self.serialize_hook(instance) if getattr(settings, 'HOOK_DELIVERER', None): deliverer = get_module(settings.HOOK_DELIVERER) deliverer(self.target, payload, instance=instance, hook=self) else: client.post( ...
def deliver_hook(self, instance, payload_override=None)
Deliver the payload to the target URL. By default it serializes to JSON and POSTs.
2.84285
2.676115
1.062305
try: from importlib import import_module except ImportError as e: from django.utils.importlib import import_module try: mod_name, func_name = path.rsplit('.', 1) mod = import_module(mod_name) except ImportError as e: raise ImportError( 'Error imp...
def get_module(path)
A modified duplicate from Django's built in backend retriever. slugify = get_module('django.template.defaultfilters.slugify')
2.101686
2.052624
1.023902
from rest_hooks.models import Hook HookModel = Hook if getattr(settings, 'HOOK_CUSTOM_MODEL', None): HookModel = get_module(settings.HOOK_CUSTOM_MODEL) return HookModel
def get_hook_model()
Returns the Custom Hook model if defined in settings, otherwise the default Hook model.
3.758198
3.300079
1.13882
try: from django.contrib.auth import get_user_model User = get_user_model() except ImportError: from django.contrib.auth.models import User from rest_hooks.models import HOOK_EVENTS if not event_name in HOOK_EVENTS.keys(): raise Exception( '"{}" does not...
def find_and_fire_hook(event_name, instance, user_override=None)
Look up Hooks that apply
3.873348
3.906233
0.991581
from rest_hooks.models import HOOK_EVENTS event_name = None for maybe_event_name, auto in HOOK_EVENTS.items(): if auto: # break auto into App.Model, Action maybe_model, maybe_action = auto.rsplit('.', 1) maybe_action = maybe_action.rsplit('+', 1) ...
def distill_model_event(instance, model, action, user_override=None)
Take created, updated and deleted actions for built-in app/model mappings, convert to the defined event.name and let hooks fly. If that model isn't represented, we just quit silenty.
4.059921
3.789904
1.071246
out = data if isinstance(data, PHATE): out = data.transform() else: try: if isinstance(data, anndata.AnnData): try: out = data.obsm['X_phate'] except KeyError: raise RuntimeError( ...
def _get_plot_data(data, ndim=None)
Get plot data out of an input object Parameters ---------- data : array-like, `phate.PHATE` or `scanpy.AnnData` ndim : int, optional (default: None) Minimum number of dimensions
3.112899
2.917491
1.066978
warnings.warn("`phate.plot.rotate_scatter3d` is deprecated. " "Use `scprep.plot.rotate_scatter3d` instead.", FutureWarning) return scprep.plot.rotate_scatter3d(data, filename=filename, elev=e...
def rotate_scatter3d(data, filename=None, elev=30, rotation_speed=30, fps=10, ax=None, figsize=None, dpi=None, ipython_html="jshtml", ...
Create a rotating 3D scatter plot Builds upon `matplotlib.pyplot.scatter` with nice defaults and handles categorical colors / legends better. Parameters ---------- data : array-like, `phate.PHATE` or `scanpy.AnnData` Input data. Only the first three dimensions are used. filename : str,...
1.825353
2.215875
0.823762
if phate_op.graph is not None: diff_potential = phate_op.calculate_potential() if isinstance(phate_op.graph, graphtools.graphs.LandmarkGraph): diff_potential = phate_op.graph.interpolate(diff_potential) return cluster.KMeans(k, random_state=random_state).fit_predict(diff_pot...
def kmeans(phate_op, k=8, random_state=None)
KMeans on the PHATE potential Clustering on the PHATE operator as introduced in Moon et al. This is similar to spectral clustering. Parameters ---------- phate_op : phate.PHATE Fitted PHATE operator k : int, optional (default: 8) Number of clusters random_state : int or No...
3.659527
3.765407
0.971881
tasklogger.log_debug("Performing classic MDS on {} of shape {}...".format( type(D).__name__, D.shape)) D = D**2 D = D - D.mean(axis=0)[None, :] D = D - D.mean(axis=1)[:, None] pca = PCA(n_components=ndim, svd_solver='randomized') Y = pca.fit_transform(D) return Y
def cmdscale_fast(D, ndim)
Fast CMDS using random SVD Parameters ---------- D : array-like, input data [n_samples, n_dimensions] ndim : int, number of dimensions in which to embed `D` Returns ------- Y : array-like, embedded data [n_sample, ndim]
3.726873
3.805647
0.979301
if how not in ['classic', 'metric', 'nonmetric']: raise ValueError("Allowable 'how' values for MDS: 'classic', " "'metric', or 'nonmetric'. " "'{}' was passed.".format(how)) # MDS embeddings, each gives a different output. X_dist = squareform(p...
def embed_MDS(X, ndim=2, how='metric', distance_metric='euclidean', n_jobs=1, seed=None, verbose=0)
Performs classic, metric, and non-metric MDS Metric MDS is initialized using classic MDS, non-metric MDS is initialized using metric MDS. Parameters ---------- X: ndarray [n_samples, n_samples] 2 dimensional input data array with n_samples embed_MDS does not check for matrix square...
3.208234
3.231682
0.992744
_, eigenvalues, _ = svd(data) entropy = [] eigenvalues_t = np.copy(eigenvalues) for _ in range(t_max): prob = eigenvalues_t / np.sum(eigenvalues_t) prob = prob + np.finfo(float).eps entropy.append(-np.sum(prob * np.log(prob))) eigenvalues_t = eigenvalues_t * eigenval...
def compute_von_neumann_entropy(data, t_max=100)
Determines the Von Neumann entropy of data at varying matrix powers. The user should select a value of t around the "knee" of the entropy curve. Parameters ---------- t_max : int, default: 100 Maximum value of t to test Returns ------- entropy : array, shape=[t_max] The...
2.652746
3.511005
0.755552
try: y.shape except AttributeError: y = np.array(y) if len(y) < 3: raise ValueError("Cannot find knee point on vector of length 3") elif len(y.shape) > 1: raise ValueError("y must be 1-dimensional") if x is None: x = np.arange(len(y)) else: ...
def find_knee_point(y, x=None)
Returns the x-location of a (single) knee of curve y=f(x) Parameters ---------- y : array, shape=[n] data for which to find the knee point x : array, optional, shape=[n], default=np.arange(len(y)) indices of the data points of y, if these are not in order and evenly spaced ...
2.230846
2.285104
0.976256
for p in params: if not isinstance(params[p], numbers.Number) or params[p] <= 0: raise ValueError( "Expected {} > 0, got {}".format(p, params[p]))
def check_positive(**params)
Check that parameters are positive as expected Raises ------ ValueError : unacceptable choice of parameters
2.568053
3.027161
0.848337
for p in params: if not isinstance(params[p], numbers.Integral): raise ValueError( "Expected {} integer, got {}".format(p, params[p]))
def check_int(**params)
Check that parameters are integers as expected Raises ------ ValueError : unacceptable choice of parameters
3.716935
4.035227
0.921122
for p in params: if params[p] is not x and params[p] != x: [check(**{p: params[p]}) for check in checks]
def check_if_not(x, *checks, **params)
Run checks only if parameters are not equal to a specified value Parameters ---------- x : excepted value Checks not run if parameters equal x checks : function Unnamed arguments, check functions to be run params : object Named arguments, parameters to be checked Rai...
5.241136
4.973336
1.053847
for p in params: if params[p] not in choices: raise ValueError( "{} value {} not recognized. Choose from {}".format( p, params[p], choices))
def check_in(choices, **params)
Checks parameters are in a list of allowed parameters Parameters ---------- choices : array-like, accepted values params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters
3.709836
4.045786
0.916963
for p in params: if params[p] < v_min or params[p] > v_max: raise ValueError("Expected {} between {} and {}, " "got {}".format(p, v_min, v_max, params[p]))
def check_between(v_min, v_max, **params)
Checks parameters are in a specified range Parameters ---------- v_min : float, minimum allowed value (inclusive) v_max : float, maximum allowed value (inclusive) params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of para...
2.366982
2.626124
0.901321
return X is Y or (isinstance(X, Y.__class__) and X.shape == Y.shape and np.sum((X != Y).sum()) == 0)
def matrix_is_equivalent(X, Y)
Checks matrix equivalence with numpy, scipy and pandas
4.845682
5.117363
0.94691
if self.graph is not None: if isinstance(self.graph, graphtools.graphs.LandmarkGraph): diff_op = self.graph.landmark_op else: diff_op = self.graph.diff_op if sparse.issparse(diff_op): diff_op = diff_op.toarray() ...
def diff_op(self)
The diffusion operator calculated from the data
3.310131
3.224094
1.026685
utils.check_positive(n_components=self.n_components, k=self.knn) utils.check_int(n_components=self.n_components, k=self.knn, n_jobs=self.n_jobs) utils.check_between(0, 1, gamma=self.gamma) utils.check_i...
def _check_params(self)
Check PHATE parameters This allows us to fail early - otherwise certain unacceptable parameter choices, such as mds='mmds', would only fail after minutes of runtime. Raises ------ ValueError : unacceptable choice of parameters
2.130252
2.080254
1.024035
X, n_pca, precomputed, update_graph = self._parse_input(X) if precomputed is None: tasklogger.log_info( "Running PHATE on {} cells and {} genes.".format( X.shape[0], X.shape[1])) else: tasklogger.log_info( "Run...
def fit(self, X)
Computes the diffusion operator Parameters ---------- X : array, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Accepted data types: `numpy.ndarray`, `scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`....
3.679681
3.579739
1.027919
if self.graph is None: raise NotFittedError("This PHATE instance is not fitted yet. Call " "'fit' with appropriate arguments before " "using this method.") elif X is not None and not utils.matrix_is_equivalent(X, self...
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None)
Computes the position of the cells in the embedding space Parameters ---------- X : array, optional, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Not required, since PHATE does not currently embed cells not ...
4.475434
4.333272
1.032807
tasklogger.log_start('PHATE') self.fit(X) embedding = self.transform(**kwargs) tasklogger.log_complete('PHATE') return embedding
def fit_transform(self, X, **kwargs)
Computes the diffusion operator and the position of the cells in the embedding space Parameters ---------- X : array, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Accepted data types: `numpy.ndarray`, ...
9.178271
7.331231
1.251941
if t is None: t = self.t if self.diff_potential is None: if t == 'auto': t = self.optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax) else: t = self.t tasklogger.log_start("diffusion potential") # diffuse...
def calculate_potential(self, t=None, t_max=100, plot_optimal_t=False, ax=None)
Calculates the diffusion potential Parameters ---------- t : int power to which the diffusion operator is powered sets the level of diffusion t_max : int, default: 100 Maximum value of `t` to test plot_optimal_t : boolean, default: False ...
3.075042
3.03757
1.012336
t = np.arange(t_max) return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max)
def von_neumann_entropy(self, t_max=100)
Calculate Von Neumann Entropy Determines the Von Neumann entropy of the diffusion affinities at varying levels of `t`. The user should select a value of `t` around the "knee" of the entropy curve. We require that 'fit' stores the value of `PHATE.diff_op` in order to calculate t...
5.151746
4.661196
1.105241
tasklogger.log_start("optimal t") t, h = self.von_neumann_entropy(t_max=t_max) t_opt = vne.find_knee_point(y=h, x=t) tasklogger.log_info("Automatically selected t = {}".format(t_opt)) tasklogger.log_complete("optimal t") if plot: if ax is None: ...
def optimal_t(self, t_max=100, plot=False, ax=None)
Find the optimal value of t Selects the optimal value of t based on the knee point of the Von Neumann Entropy of the diffusion operator. Parameters ---------- t_max : int, default: 100 Maximum value of t to test plot : boolean, default: False If...
3.002853
2.607102
1.151797
t = Default() for question_type, settings in dict_theme.items(): if question_type not in vars(t): raise ThemeError('Error while parsing theme. Question type ' '`{}` not found or not customizable.' .format(question_type)) ...
def load_theme_from_dict(dict_theme)
Load a theme from a dict. Expected format: { "Question": { "mark_color": "yellow", "brackets_color": "normal", ... }, "List": { "selection_color": "bold_blue", "selection_cursor": "->" } } Color values should be...
4.396076
4.245023
1.035584
data = json.loads(question_json) if isinstance(data, list): return load_from_list(data) if isinstance(data, dict): return load_from_dict(data) raise TypeError( 'Json contained a %s variable when a dict or list was expected', type(data))
def load_from_json(question_json)
Load Questions from a JSON string. :return: A list of Question objects with associated data if the JSON contains a list or a Question if the JSON contains a dict. :return type: List or Dict
3.556481
3.407125
1.043837
return bin(reduce(lambda x, y: (x << 8) + y, (ord(c) for c in chars), 1))[ 3: ]
def a2bits(chars: str) -> str
Converts a string to its bits representation as a string of 0's and 1's. >>> a2bits("Hello World!") '010010000110010101101100011011000110111100100000010101110110111101110010011011000110010000100001'
5.242221
6.711741
0.781052
return [bin(ord(x))[2:].rjust(ENCODINGS[encoding], "0") for x in chars]
def a2bits_list(chars: str, encoding: str = "UTF-8") -> List[str]
Convert a string to its bits representation as a list of 0's and 1's. >>> a2bits_list("Hello World!") ['01001000', '01100101', '01101100', '01101100', '01101111', '00100000', '01010111', '01101111', '01110010', '01101100', '01100100', '00100001'] >>> "".join(a2b...
4.582512
5.861503
0.781798
return str(s) if s <= 1 else bs(s >> 1) + str(s & 1)
def bs(s: int) -> str
Converts an int to its bits representation as a string of 0's and 1's.
3.669572
2.617251
1.402071