code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _32bit_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.app... | def _pstore32(ins) | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer. | 6.403168 | 6.706625 | 0.954753 |
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _f16_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.appen... | def _pstoref16(ins) | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer. | 7.046725 | 7.219427 | 0.976078 |
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _float_oper(value)
if indirect:
output.append('ld hl, %i' % I)
output.app... | def _pstoref(ins) | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer. | 7.29921 | 7.471739 | 0.976909 |
output = []
temporal = False
# 2nd operand first, because must go into the stack
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
if value[0] == '_':
output.append('ld de, (%s)' % value)
if indir... | def _pstorestr(ins) | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
Note: This procedure proceeds as _pstore16, since STRINGS are 16bit pointers. | 3.413299 | 3.405423 | 1.002313 |
assert type_ is None or isinstance(type_, SymbolTYPE)
if func is not None: # Try constant-folding
if is_number(operand): # e.g. ABS(-5)
return SymbolNUMBER(func(operand.value), lineno=lineno)
elif is_string(operand): # e.g. LEN("a")
re... | def make_node(cls, lineno, operator, operand, func=None, type_=None) | Creates a node for a unary operation. E.g. -x or LEN(a$)
Parameters:
-func: lambda function used on constant folding when possible
-type_: the resulting type (by default, the same as the argument).
For example, for LEN (str$), result type is 'u16'
and arg... | 4.567444 | 4.342848 | 1.051716 |
if type_ in cls.unsigned:
return {TYPE.ubyte: TYPE.byte_,
TYPE.uinteger: TYPE.integer,
TYPE.ulong: TYPE.long_}[type_]
if type_ in cls.decimals or type_ in cls.signed:
return type_
return cls.unknown | def to_signed(cls, type_) | Return signed type or equivalent | 5.29723 | 4.96972 | 1.065901 |
NAME_TYPES = {cls.TYPE_NAMES[x]: x for x in cls.TYPE_NAMES}
return NAME_TYPES.get(typename, None) | def to_type(cls, typename) | Converts a type ID to name. On error returns None | 4.671248 | 4.186557 | 1.115773 |
op = float(op)
negative = op < 0
if negative:
op = -op
DE = int(op)
HL = int((op - DE) * 2**16) & 0xFFFF
DE &= 0xFFFF
if negative: # Do C2
DE ^= 0xFFFF
HL ^= 0xFFFF
DEHL = ((DE << 16) | HL) + 1
HL = DEHL & 0xFFFF
DE = (DEHL >> 16) & 0... | def f16(op) | Returns a floating point operand converted to 32 bits unsigned int.
Negative numbers are returned in 2 complement.
The result is returned in a tuple (DE, HL) => High16 (Int part), Low16 (Decimal part) | 4.110703 | 3.517698 | 1.168577 |
ins.quad = [x for x in ins.quad]
for i in range(2, len(ins.quad)):
if is_float(ins.quad[i]):
de, hl = f16(ins.quad[i])
ins.quad[i] = str((de << 16) | hl)
ins.quad = tuple(ins.quad)
return ins | def _f16_to_32bit(ins) | If any of the operands within the ins(truction) are numeric,
convert them to its 32bit representation, otherwise leave them
as they are. | 4.084955 | 4.042381 | 1.010532 |
op1, op2 = tuple(ins.quad[2:])
if _f_ops(op1, op2) is not None:
op1, op2 = _f_ops(op1, op2)
if op2 == 1: # A * 1 => A
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if op2 == -1:
... | def _mulf16(ins) | Multiplies 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack. | 3.757456 | 3.779647 | 0.994129 |
op1, op2 = tuple(ins.quad[2:])
if is_float(op2):
if float(op2) == 1:
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if float(op2) == -1:
return _negf(ins)
rev = not is_float(op1) and ... | def _divf16(ins) | Divides 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 1, do nothing
* If 2nd operand is -1, do NEG32 | 4.877874 | 4.969093 | 0.981643 |
raise InvalidICError(str(quad),
"Invalid quad code params for '%s' (expected %i, but got %i)" %
(quad, QUADS[quad][0], nparams)
) | def throw_invalid_quad_params(quad, QUADS, nparams) | Exception raised when an invalid number of params in the
quad code has been emmitted. | 7.879777 | 7.499469 | 1.050711 |
def bin32(f):
result = ''
a = int(f) & 0xFFFFFFFF # ensures int 32
for i in range(32):
result = str(a % 2) + result
a = a >> 1
return result
def bindec32(f):
result = '0'
a = f
if f >= 1:
res... | def fp(x) | Returns a floating point number as EXP+128, Mantissa | 3.567224 | 3.462639 | 1.030204 |
def bin2hex(y):
return "%02X" % int(y, 2)
M, E = fp(x)
C = '0' + bin2hex(E) + 'h'
ED = '0' + bin2hex(M[8:16]) + bin2hex(M[:8]) + 'h'
LH = '0' + bin2hex(M[24:]) + bin2hex(M[16:24]) + 'h'
return C, ED, LH | def immediate_float(x) | Returns C DE HL as values for loading
and immediate floating point. | 4.099208 | 4.020427 | 1.019595 |
if stopOn is None:
for i in self.children:
i.inorder(funct)
else:
for i in self.children:
if i.inorder(funct) == stopOn:
return stopOn
return funct(self) | def inorder(self, funct, stopOn=None) | Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false. | 2.518456 | 2.474535 | 1.017749 |
if funct(self.symbol) == stopOn and stopOn is not None:
return stopOn
if stopOn is None:
for i in self.children:
i.preorder(funct)
else:
for i in self.children:
if i.preorder(funct) == stopOn:
retur... | def preorder(self, funct, stopOn=None) | Iterates in preorder, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false. | 2.911538 | 2.97795 | 0.977699 |
if stopOn is None:
for i in range(len(self.children) - 1, -1, -1):
self.children[i].postorder(funct)
else:
for i in range(len(self.children) - 1, -1, -1):
if self.children[i].postorder(funct) == stopOn:
return stopOn
... | def postorder(self, funct, stopOn=None) | Iterates in postorder, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false. | 1.936263 | 1.929191 | 1.003665 |
result = clss(symbol)
for i in nexts:
if i is None:
continue
if not isinstance(i, clss):
raise NotAnAstError(i)
result.appendChild(i)
return result | def makenode(clss, symbol, *nexts) | Stores the symbol in an AST instance,
and left and right to the given ones | 4.618403 | 4.226429 | 1.092744 |
global OUTPUT
global INCLUDED
global CURRENT_DIR
global ENABLED
global INCLUDEPATH
global IFDEFS
global ID_TABLE
global CURRENT_FILE
global_.FILENAME = '(stdin)'
OUTPUT = ''
INCLUDED = {}
CURRENT_DIR = ''
pwd = get_include_path()
INCLUDEPATH = [os.path.join(... | def init() | Initializes the preprocessor | 9.944012 | 10.028498 | 0.991575 |
f1 = os.path.basename(sys.argv[0]).lower() # script filename
f2 = os.path.basename(sys.executable).lower() # Executable filename
# If executable filename and script name are the same, we are
if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary
result = os.path.dirname(os... | def get_include_path() | Default include path using a tricky sys
calls. | 3.997144 | 3.874913 | 1.031544 |
fname = api.utils.sanitize_filename(fname)
i_path = [CURRENT_DIR] + INCLUDEPATH if local_first else list(INCLUDEPATH)
i_path.extend(OPTIONS.include_path.value.split(':') if OPTIONS.include_path.value else [])
if os.path.isabs(fname):
if os.path.isfile(fname):
return fname
el... | def search_filename(fname, lineno, local_first) | Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed. | 3.692836 | 3.332031 | 1.108284 |
global CURRENT_DIR
filename = search_filename(filename, lineno, local_first)
if filename not in INCLUDED.keys():
INCLUDED[filename] = []
if len(CURRENT_FILE) > 0: # Added from which file, line
INCLUDED[filename].append((CURRENT_FILE[-1], lineno))
CURRENT_FILE.append(filename)... | def include_file(filename, lineno, local_first) | Performs a file inclusion (#include) in the preprocessor.
Writes down that "filename" was included at the current file,
at line <lineno>.
If local_first is True, then it will first search the file in the
local path before looking for it in the include path chain.
This is used when doing a #include ... | 4.592426 | 5.149939 | 0.891744 |
filename = search_filename(filename, lineno, local_first)
if filename not in INCLUDED.keys(): # If not already included
return include_file(filename, lineno, local_first) # include it and return
# Now checks if the file has been included more than once
if len(INCLUDED[filename]) > 1:
... | def include_once(filename, lineno, local_first) | Performs a file inclusion (#include) in the preprocessor.
Writes down that "filename" was included at the current file,
at line <lineno>.
The file is ignored if it was previuosly included (a warning will
be emitted though).
If local_first is True, then it will first search the file in the
loca... | 5.449557 | 5.860834 | 0.929826 |
try:
tmp = [str(x()) if isinstance(x, MacroCall) else x for x in p[1]]
except PreprocError as v:
error(v.lineno, v.message)
tmp.append(p[2])
p[0] = tmp | def p_program_tokenstring(p) | program : defs NEWLINE | 6.532991 | 6.370153 | 1.025563 |
global CURRENT_DIR
p[0] = [p[1] + p[2]] + p[3] + [p[4]]
CURRENT_FILE.pop() # Remove top of the stack
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1]) | def p_include_file(p) | include_file : include NEWLINE program _ENDFILE_ | 4.60081 | 4.148034 | 1.109154 |
global CURRENT_DIR
p[0] = [p[1] + p[2]] + p[3] + [p[4]]
CURRENT_FILE.pop() # Remove top of the stack
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1]) | def p_include_once_ok(p) | include_file : include_once NEWLINE program _ENDFILE_ | 4.646745 | 4.03983 | 1.150233 |
if ENABLED:
p[0] = include_file(p[2], p.lineno(2), local_first=True)
else:
p[0] = []
p.lexer.next_token = '_ENDFILE_' | def p_include(p) | include : INCLUDE STRING | 9.08909 | 8.682608 | 1.046816 |
if ENABLED:
p[0] = include_file(p[2], p.lineno(2), local_first=False)
else:
p[0] = []
p.lexer.next_token = '_ENDFILE_' | def p_include_fname(p) | include : INCLUDE FILENAME | 9.414836 | 8.857437 | 1.06293 |
if ENABLED:
p[0] = include_once(p[3], p.lineno(3), local_first=True)
else:
p[0] = []
if not p[0]:
p.lexer.next_token = '_ENDFILE_' | def p_include_once(p) | include_once : INCLUDE ONCE STRING | 7.961067 | 7.92886 | 1.004062 |
p[0] = []
if ENABLED:
p[0] = include_once(p[3], p.lineno(3), local_first=False)
else:
p[0] = []
if not p[0]:
p.lexer.next_token = '_ENDFILE_' | def p_include_once_fname(p) | include_once : INCLUDE ONCE FILENAME | 7.455809 | 6.768832 | 1.101491 |
if ENABLED:
if p[4]:
if SPACES.match(p[4][0]):
p[4][0] = p[4][0][1:]
else:
warning(p.lineno(1), "missing whitespace after the macro name")
ID_TABLE.define(p[2], args=p[3], value=p[4], lineno=p.lineno(2),
fname=CURR... | def p_define(p) | define : DEFINE ID params defs | 5.95494 | 5.571095 | 1.068899 |
# Defines the 'epsilon' parameter
p[0] = [ID('', value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])] | def p_define_params_empty(p) | params : LP RP | 25.30998 | 26.433193 | 0.957507 |
for i in p[2]:
if not isinstance(i, ID):
error(p.lineno(3),
'"%s" might not appear in a macro parameter list' % str(i))
p[0] = None
return
names = [x.name for x in p[2]]
for i in range(len(names)):
if names[i] in names[i + 1:]:
... | def p_define_params_paramlist(p) | params : LP paramlist RP | 3.130396 | 3.141921 | 0.996332 |
p[0] = [ID(p[1], value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])] | def p_paramlist_single(p) | paramlist : ID | 14.520524 | 12.274528 | 1.18298 |
p[0] = p[1] + [ID(p[3], value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])] | def p_paramlist_paramlist(p) | paramlist : paramlist COMMA ID | 13.483508 | 10.254329 | 1.314909 |
global ENABLED
if ENABLED:
p[0] = [p[2]] + p[3]
p[0] += ['#line %i "%s"' % (p.lineno(4) + 1, CURRENT_FILE[-1])]
else:
p[0] = ['#line %i "%s"' % (p.lineno(4) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop() | def p_ifdef(p) | ifdef : if_header NEWLINE program ENDIF | 4.042553 | 3.815121 | 1.059613 |
global ENABLED
p[0] = p[1] + p[2]
p[0] += ['#line %i "%s"' % (p.lineno(3) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop() | def p_ifdef_else(p) | ifdef : ifdefelsea ifdefelseb ENDIF | 7.774165 | 7.146805 | 1.087782 |
global ENABLED
if ENABLED:
p[0] = [p[2]] + p[3]
else:
p[0] = []
ENABLED = not ENABLED | def p_ifdef_else_a(p) | ifdefelsea : if_header NEWLINE program | 4.143275 | 4.014042 | 1.032195 |
global ENABLED
if ENABLED:
p[0] = ['#line %i "%s"%s' % (p.lineno(1) + 1, CURRENT_FILE[-1], p[2])]
p[0] += p[3]
else:
p[0] = [] | def p_ifdef_else_b(p) | ifdefelseb : ELSE NEWLINE program | 5.336611 | 5.109688 | 1.04441 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(2)))
ENABLED = ID_TABLE.defined(p[2]) | def p_if_header(p) | if_header : IFDEF ID | 24.548096 | 15.838132 | 1.549936 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(2)))
ENABLED = not ID_TABLE.defined(p[2]) | def p_ifn_header(p) | if_header : IFNDEF ID | 26.175632 | 19.087532 | 1.371347 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(1)))
ENABLED = bool(int(p[2])) if p[2].isdigit() else ID_TABLE.defined(p[2]) | def p_if_expr_header(p) | if_header : IF expr | 13.017517 | 11.669338 | 1.115532 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a < b else '0' | def p_exprlt(p) | expr : expr LT expr | 2.815493 | 2.442285 | 1.152811 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a <= b else '0' | def p_exprle(p) | expr : expr LE expr | 3.031064 | 2.478031 | 1.223174 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a > b else '0' | def p_exprgt(p) | expr : expr GT expr | 2.74121 | 2.410606 | 1.137146 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a >= b else '0' | def p_exprge(p) | expr : expr GE expr | 2.909868 | 2.499303 | 1.164272 |
global CURRENT_DIR
prev_dir = CURRENT_DIR
CURRENT_FILE.append(filename)
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1])
LEXER.input(input_, filename)
LEXER.lex.begin(state)
parser.parse(lexer=LEXER, debug=OPTIONS.Debug.value > 2)
CURRENT_FILE.pop()
CURRENT_DIR = prev_dir | def filter_(input_, filename='<internal>', state='INITIAL') | Filter the input string thought the preprocessor.
result is appended to OUTPUT global str | 4.953292 | 5.074691 | 0.976078 |
if name is None:
tplot_names = list(data_quants.keys())
for i in tplot_names:
del data_quants[i]
return
if not isinstance(name, list):
name = [name]
entries = []
###
for i in name:
if ('?' in i) or ('*' in i):
for j... | def del_data(name=None) | This function will delete tplot variables that are already stored in memory.
Parameters:
name : str
Name of the tplot variable to be deleted. If no name is provided, then
all tplot variables will be deleted.
Returns:
None
Examples:
... | 3.625292 | 3.446289 | 1.051941 |
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfu... | def xlim(min, max) | This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series... | 2.890041 | 3.104169 | 0.931019 |
if name not in data_quants.keys():
print("That name is currently not in pytplot.")
return
temp_data_quant = data_quants[name]
temp_data_quant.zaxis_opt['z_range'] = [min, max]
return | def zlim(name, min, max) | This function will set the z axis range displayed for a specific tplot variable.
This is only used for spec plots, where the z axis represents the magnitude of the values
in each bin.
Parameters:
name : str
The name of the tplot variable that you wish to set z limits for.
... | 7.276906 | 6.287845 | 1.157297 |
if name not in data_quants.keys():
print("That name is currently not in pytplot.")
return
temp_data_quant = data_quants[name]
temp_data_quant.yaxis_opt['y_range'] = [min, max]
return | def ylim(name, min, max) | This function will set the y axis range displayed for a specific tplot variable.
Parameters:
name : str
The name of the tplot variable that you wish to set y limits for.
min : flt
The start of the y axis.
max : flt
The end of the y axis.
... | 7.010754 | 6.313552 | 1.110429 |
if 'title_size' in pytplot.tplot_opt_glob:
size = pytplot.tplot_opt_glob['title_size']
if 'title_text' in pytplot.tplot_opt_glob:
if pytplot.tplot_opt_glob['title_text'] != '':
layout.addItem(LabelItem(pytplot.tplot_opt_glob['title_text'], size=size, color='k'), row=0, col=0)
... | def _set_pyqtgraph_title(layout) | Private function to add a title to the first row of the window.
Returns True if a Title is set. Else, returns False. | 2.50631 | 2.295563 | 1.091806 |
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val) | def get_data(name) | This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1... | 4.416694 | 3.774332 | 1.170192 |
#check if old name is in current dictionary
if old_name not in pytplot.data_quants.keys():
print("That name is currently not in pytplot")
return
#if old name input is a number, convert to corresponding name
if isinstance(old_name, int):
old_name = pytplot.data_quants[ol... | def tplot_rename(old_name, new_name) | This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variabl... | 3.284808 | 3.257736 | 1.00831 |
if self.spec_bins is None:
return
if len(self.spec_bins) == len(self.data.index):
self.spec_bins_time_varying = True
break_top_loop = False
for index, row in self.spec_bins.iterrows():
if row.isnull().values.all():
... | def _check_spec_bins_ordering(self) | This is a private function of the TVar object, this is run during
object creation to check if spec_bins are ascending or descending | 3.070861 | 2.823816 | 1.087486 |
index = 0
return_names=[]
for key, _ in data_quants.items():
if isinstance(data_quants[key].data, list):
if isinstance(key, str):
names_to_print = data_quants[key].name + " data from: "
for name in data_quants[key].data:
... | def tplot_names() | This function will print out and return a list of all current Tplot Variables stored in the memory.
Parameters:
None
Returns:
list : list of str
A list of all Tplot Variables stored in the memory
Examples:
>>> import pytplot
>>> x_dat... | 2.850114 | 3.049534 | 0.934607 |
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0]))
print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1]))
return(data_quants[name... | def get_timespan(name) | This function extracts the time span from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_begin : float
The beginning of the time series
time_end : float
The end of the time series... | 3.567539 | 3.461306 | 1.030692 |
if keyword is 'days':
dt *= 86400
elif keyword is 'hours':
dt *= 3600
elif keyword is 'minutes':
dt *= 60
elif keyword is 'seconds':
dt *= 1
else:
print("Invalid 'keyword' option.\nEnum(None, 'hours', 'minutes', 'seconds', 'days')")
if n... | def timespan(t1, dt, keyword = 'days') | This function will set the time range for all time series plots. This is a wrapper for the function "xlim" to
better handle time axes.
Parameters:
t1 : flt/str
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYY... | 3.468701 | 3.342823 | 1.037656 |
option = option.lower()
temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob)
pytplot.tplot_opt_glob = temp
return | def tplot_options(option, value) | This function allows the user to set several global options for the generated plots.
Parameters:
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
======... | 5.601828 | 9.34497 | 0.599448 |
if isinstance(names,int):
names = list(data_quants.keys())[names-1]
if not isinstance(names, list):
names = [names]
#Check that we have all available data
for name in names:
if isinstance(data_quants[name].data, list):
for data_name in data_quants[name].dat... | def tplot_save(names, filename=None) | This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the ... | 3.489798 | 3.655058 | 0.954786 |
if isinstance(name, int):
name = list(data_quants.keys())[name-1]
if not isinstance(name, list):
name = [name]
name_num = len(name)
ymin = None
ymax = None
for i in range(name_num):
if name[i] not in data_quants.keys():
print(str(name[i]) + " is currently... | def get_ylimits(name, trg=None) | This function will get extract the y-limits from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
trg : list, optional
The time range that you would like to look in
Returns:
ymin : float
The mini... | 2.10468 | 2.085504 | 1.009195 |
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring
else:
if 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp']
return | def timestamp(val):
if val is 'on' | This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.tim... | 4.836646 | 7.171728 | 0.674405 |
'''
I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly.
'''
if self.picturenotgened:
self.generatePicture(self.getBoundingParents()[0].rect())
self.picturenotgened = False
pg.ImageItem.paint(sel... | def paint(self, p, *args) | I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly. | 8.278251 | 3.933718 | 2.104434 |
profile = debug.Profiler()
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.vi... | def setImage(self, image=None, autoLevels=None, **kargs) | Same this as ImageItem.setImage, but we don't update the drawing | 2.92837 | 2.78448 | 1.051676 |
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtGui.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
# CHANGE: For GraphicsLayouts, there... | def getPaintItems(self, root=None) | Return a list of all items that should be painted in the correct order. | 3.669547 | 3.546712 | 1.034633 |
'''
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
'''
# df here is actually a dictionary of several dataframes, 'mat', 'mat_orig',
# etc
if df is None:
df = net.dat... | def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False) | A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns | 5.883853 | 2.744676 | 2.143733 |
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of mi... | def qn_df(df, axis='row', keep_orig=False) | do quantile normalization of a dataframe dictionary, does not write to net | 3.938115 | 3.311168 | 1.189343 |
'''
calculate a common distribution (for col qn only) that will be used to qn
'''
# axis is col
tmp_arr = np.array([])
col_names = df.columns.tolist()
for inst_col in col_names:
# sort column
tmp_vect = df[inst_col].sort_values(ascending=False).values
# stacking rows vertically (will tran... | def calc_common_dist(df) | calculate a common distribution (for col qn only) that will be used to qn | 4.859018 | 3.429543 | 1.416812 |
'''
take the zscore of a dataframe dictionary, does not write to net (self)
'''
df_z = {}
for mat_type in df:
if keep_orig and mat_type == 'mat':
mat_orig = deepcopy(df[mat_type])
inst_df = df[mat_type]
if axis == 'row':
inst_df = inst_df.transpose()
df_z[mat_type] = (inst_df -... | def zscore_df(df, axis='row', keep_orig=False) | take the zscore of a dataframe dictionary, does not write to net (self) | 3.237395 | 2.253799 | 1.436417 |
'''
calculate pvalue of category closeness
'''
# calculate the distance between the data points within the same category and
# compare to null distribution
for inst_rc in ['row', 'col']:
inst_nodes = deepcopy(net.dat['nodes'][inst_rc])
inst_index = deepcopy(net.dat['node_info'][inst_rc]['clust'])
... | def main(net) | calculate pvalue of category closeness | 4.127836 | 3.850813 | 1.071939 |
'''
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
'''
import numpy as np
from ast import literal_eval as make_tuple
test = {}
test['row'] = df['mat'].index.tolist()
test['col'] = df['mat'].columns.tolist()
# if type( tes... | def main(df) | 1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories | 2.55089 | 2.097193 | 1.216335 |
'''
This is always run when data is loaded.
'''
from . import categories
# check if df has unique values
df['mat'] = make_unique_labels.main(net, df['mat'])
net.dat['mat'] = df['mat'].values
net.dat['nodes']['row'] = df['mat'].index.tolist()
net.dat['nodes']['col'] = df['mat'].columns.tolist()
fo... | def df_to_dat(net, df, define_cat_colors=False) | This is always run when data is loaded. | 3.234675 | 3.032104 | 1.066809 |
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray(self.dat['mat']) | def mat_to_numpy_arr(self) | convert list to numpy array - numpy arrays can not be saved as json | 10.79878 | 4.99942 | 2.160007 |
''' cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument '''
import scipy
from copy import deepcopy
from scipy.spatial.distance import pdist
from . import categories, make_viz, cat_pval
dm = {}
for inst_rc in ['row', 'col']:
tm... | def cluster_row_and_col(net, dist_type='cosine', linkage_type='average',
dendro=True, run_clustering=True, run_rank=True,
ignore_cat=False, calc_cat_pval=False, links=False) | cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument | 4.439873 | 3.449449 | 1.287125 |
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if foun... | def check_categories(lines) | find out how many row and col categories are available | 4.194168 | 3.729551 | 1.124577 |
'''
make a dictionary of node-category associations
'''
# print('---------------------------------')
# print('---- dict_cat: before setting cat colors')
# print('---------------------------------\n')
# print(define_cat_colors)
# print(net.viz['cat_colors'])
net.persistent_cat = True
for inst_rc i... | def dict_cat(net, define_cat_colors=False) | make a dictionary of node-category associations | 2.814317 | 2.763659 | 1.01833 |
'''
cluster category subset of data
'''
from .__init__ import Network
from copy import deepcopy
from . import calc_clust, run_filter
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
if len(all_cats) > 0:
for inst_name_cat in all_cats:
... | def calc_cat_clust_order(net, inst_rc) | cluster category subset of data | 3.039285 | 2.975288 | 1.021509 |
'''
If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list.
'''
no_titles = remove_titles(unordered_cats)
all_are_numbers = check_all_nu... | def order_categories(unordered_cats) | If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list. | 6.115845 | 2.214345 | 2.761921 |
'''
Load file as a string.
'''
load_data.load_file_as_string(self, file_string, filename=filename) | def load_file_as_string(self, file_string, filename='') | Load file as a string. | 4.571833 | 4.003247 | 1.142031 |
'''
Load Clustergrammer's dat format (saved as JSON).
'''
inst_dat = self.load_json_to_dict(filename)
load_data.load_data_to_net(self, inst_dat) | def load_data_file_to_net(self, filename) | Load Clustergrammer's dat format (saved as JSON). | 10.455465 | 4.024589 | 2.597896 |
'''
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
'''
initialize_net.viz(self)
make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering,
... | def cluster(self, dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=None, enrichrgram=None) | The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``. | 4.494601 | 2.32679 | 1.931675 |
'''
Load Pandas DataFrame.
'''
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True) | def load_df(self, df) | Load Pandas DataFrame. | 14.096099 | 12.843867 | 1.097496 |
'''
Load Pandas DataFrame (will be deprecated).
'''
data_formats.df_to_dat(self, df, define_cat_colors) | def df_to_dat(self, df, define_cat_colors=False) | Load Pandas DataFrame (will be deprecated). | 10.156842 | 4.452537 | 2.281136 |
'''
Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end.
'''
if hasattr(self, 'widget_class') == True:
# run clustering if necessary
if len(self.... | def widget(self, which_viz='viz') | Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end. | 8.29629 | 4.326435 | 1.917581 |
'''
Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method.
'''
if hasattr(self, 'widget_instance') == True:
if self.widget_instance.mat_st... | def widget_df(self) | Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method. | 7.456887 | 4.46862 | 1.668723 |
'''
Save dat or viz as a JSON to file.
'''
export_data.write_json_to_file(self, net_type, filename, indent) | def write_json_to_file(self, net_type, filename, indent='no-indent') | Save dat or viz as a JSON to file. | 9.580324 | 3.421215 | 2.80027 |
'''
Filter a network's rows or columns based on the sum across rows or columns.
'''
inst_df = self.dat_to_df()
if inst_rc == 'row':
inst_df = run_filter.df_filter_row_sum(inst_df, threshold, take_abs)
elif inst_rc == 'col':
inst_df = run_filter.df_filter_col_sum(inst_df, threshold, t... | def filter_sum(self, inst_rc, threshold, take_abs=True) | Filter a network's rows or columns based on the sum across rows or columns. | 3.15746 | 2.483143 | 1.271558 |
'''
Filter the matrix rows or columns based on sum/variance, and only keep the top
N.
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_N_top(inst_rc, inst_df, N_top, rank_type)
self.df_to_dat(inst_df) | def filter_N_top(self, inst_rc, N_top, rank_type='sum') | Filter the matrix rows or columns based on sum/variance, and only keep the top
N. | 5.721913 | 3.169202 | 1.805475 |
'''
Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value).
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_threshold(inst_df, inst_rc, threshold,
num_occur)
self.df_to_dat(inst_df) | def filter_threshold(self, inst_rc, threshold, num_occur=1) | Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value). | 6.373182 | 3.442139 | 1.851518 |
'''
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
'''
run_filter.filter_cat(self, axis, cat_index, cat_name) | def filter_cat(self, axis, cat_index, cat_name) | Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1. | 7.433342 | 2.904372 | 2.559363 |
'''
Trim values at input thresholds using pandas function
'''
df = self.export_df()
df = df.clip(lower=lower, upper=upper)
self.load_df(df) | def clip(self, lower=None, upper=None) | Trim values at input thresholds using pandas function | 8.466238 | 3.902894 | 2.169221 |
'''
Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object).
'''
normalize_fun.run_norm(self, df, norm_type, axis, keep_orig) | def normalize(self, df=None, norm_type='zscore', axis='row', keep_orig=False) | Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object). | 10.732241 | 2.51876 | 4.260922 |
'''
Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object).
'''
return downsample_fun.main(self, df, ds_type, axis, num_samples, random_state) | def downsample(self, df=None, ds_type='kmeans', axis='row', num_samples=100, random_state=1000) | Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object). | 11.35239 | 2.695847 | 4.211066 |
'''
Return random sample of matrix.
'''
if df is None:
df = self.dat_to_df()
if axis == 'row':
axis = 0
if axis == 'col':
axis = 1
df = self.export_df()
df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis)
self.l... | def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row') | Return random sample of matrix. | 3.369272 | 2.992014 | 1.126088 |
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
... | def add_cats(self, axis, cat_data) | Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example `... | 7.195297 | 1.500161 | 4.796349 |
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that ... | def enrichrgram(self, lib, axis='row') | Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to ... | 5.336521 | 1.569959 | 3.399146 |
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matri... | def load_gene_exp_to_df(inst_path) | Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe | 2.245439 | 2.134105 | 1.052169 |
'''
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
'''
cols = df.columns.tolist()
if type(precalc_dist) == bool:
# compute distnace between rows (transpose to get col... | def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine',
equal_var=False, plot_roc=True,
precalc_dist=False, calc_roc=True) | Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category | 2.313072 | 2.156646 | 1.072532 |
''' Generate signatures for column categories '''
df_t = df_ini.transpose()
# remove columns with constant values
df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()]
df = self.row_tuple_to_multiindex(df_t)
cell_types = sorted(list(set(df.index.get_level_values(category_level).tolis... | def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05,
num_top_dims=False, verbose=True, equal_var=False) | Generate signatures for column categories | 2.431461 | 2.429037 | 1.000998 |
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# c... | def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1) | Predict category using signature | 2.951412 | 2.955857 | 0.998496 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.