INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
Unseal data | def unseal(self, data, return_options=False):
'''Unseal data'''
data = self._remove_magic(data)
data = urlsafe_nopadding_b64decode(data)
options = self._read_header(data)
data = self._add_magic(data)
data = self._unsign_data(data, options)
data = self._remove_mag... |
Verify sealed data signature | def verify_signature(self, data):
'''Verify sealed data signature'''
data = self._remove_magic(data)
data = urlsafe_nopadding_b64decode(data)
options = self._read_header(data)
data = self._add_magic(data)
self._unsign_data(data, options) |
Encode data with specific algorithm | def _encode(self, data, algorithm, key=None):
'''Encode data with specific algorithm'''
if algorithm['type'] == 'hmac':
return data + self._hmac_generate(data, algorithm, key)
elif algorithm['type'] == 'aes':
return self._aes_encrypt(data, algorithm, key)
elif al... |
Decode data with specific algorithm | def _decode(self, data, algorithm, key=None):
'''Decode data with specific algorithm'''
if algorithm['type'] == 'hmac':
verify_signature = data[-algorithm['hash_size']:]
data = data[:-algorithm['hash_size']]
signature = self._hmac_generate(data, algorithm, key)
... |
Add signature to data | def _sign_data(self, data, options):
'''Add signature to data'''
if options['signature_algorithm_id'] not in self.signature_algorithms:
raise Exception('Unknown signature algorithm id: %d'
% options['signature_algorithm_id'])
signature_algorithm = \
... |
Verify and remove signature | def _unsign_data(self, data, options):
'''Verify and remove signature'''
if options['signature_algorithm_id'] not in self.signature_algorithms:
raise Exception('Unknown signature algorithm id: %d'
% options['signature_algorithm_id'])
signature_algorithm ... |
Encrypt data | def _encrypt_data(self, data, options):
'''Encrypt data'''
if options['encryption_algorithm_id'] not in self.encryption_algorithms:
raise Exception('Unknown encryption algorithm id: %d'
% options['encryption_algorithm_id'])
encryption_algorithm = \
... |
Decrypt data | def _decrypt_data(self, data, options):
'''Decrypt data'''
if options['encryption_algorithm_id'] not in self.encryption_algorithms:
raise Exception('Unknown encryption algorithm id: %d'
% options['encryption_algorithm_id'])
encryption_algorithm = \
... |
Serialize data | def _serialize_data(self, data, options):
'''Serialize data'''
serialization_algorithm_id = options['serialization_algorithm_id']
if serialization_algorithm_id not in self.serialization_algorithms:
raise Exception('Unknown serialization algorithm id: %d'
... |
Unserialize data | def _unserialize_data(self, data, options):
'''Unserialize data'''
serialization_algorithm_id = options['serialization_algorithm_id']
if serialization_algorithm_id not in self.serialization_algorithms:
raise Exception('Unknown serialization algorithm id: %d'
... |
Compress data | def _compress_data(self, data, options):
'''Compress data'''
compression_algorithm_id = options['compression_algorithm_id']
if compression_algorithm_id not in self.compression_algorithms:
raise Exception('Unknown compression algorithm id: %d'
% compressio... |
Decompress data | def _decompress_data(self, data, options):
'''Decompress data'''
compression_algorithm_id = options['compression_algorithm_id']
if compression_algorithm_id not in self.compression_algorithms:
raise Exception('Unknown compression algorithm id: %d'
% compre... |
Verify and remove magic | def _remove_magic(self, data):
'''Verify and remove magic'''
if not self.magic:
return data
magic_size = len(self.magic)
magic = data[:magic_size]
if magic != self.magic:
raise Exception('Invalid magic')
data = data[magic_size:]
return d... |
Add header to data | def _add_header(self, data, options):
'''Add header to data'''
# pylint: disable=W0142
version_info = self._get_version_info(options['version'])
flags = options['flags']
header_flags = dict(
(i, str(int(j))) for i, j in options['flags'].iteritems())
header... |
Read header from data | def _read_header(self, data):
'''Read header from data'''
# pylint: disable=W0212
version = self._read_version(data)
version_info = self._get_version_info(version)
header_data = data[:version_info['header_size']]
header = version_info['header']
header = header._... |
Remove header from data | def _remove_header(self, data, options):
'''Remove header from data'''
version_info = self._get_version_info(options['version'])
header_size = version_info['header_size']
if options['flags']['timestamp']:
header_size += version_info['timestamp_size']
data = data[he... |
Read header version from data | def _read_version(self, data):
'''Read header version from data'''
version = ord(data[0])
if version not in self.VERSIONS:
raise Exception('Version not defined: %d' % version)
return version |
Get algorithm info | def _get_algorithm_info(self, algorithm_info):
'''Get algorithm info'''
if algorithm_info['algorithm'] not in self.ALGORITHMS:
raise Exception('Algorithm not supported: %s'
% algorithm_info['algorithm'])
algorithm = self.ALGORITHMS[algorithm_info['algori... |
Generate and return PBKDF2 key | def _generate_key(pass_id, passphrases, salt, algorithm):
'''Generate and return PBKDF2 key'''
if pass_id not in passphrases:
raise Exception('Passphrase not defined for id: %d' % pass_id)
passphrase = passphrases[pass_id]
if len(passphrase) < 32:
raise Excepti... |
Update algorithm definition type dictionaries | def _update_dict(data, default_data, replace_data=False):
'''Update algorithm definition type dictionaries'''
if not data:
data = default_data.copy()
return data
if not isinstance(data, dict):
raise TypeError('Value not dict type')
if len(data) > 255... |
Generate HMAC hash | def _get_hashlib(digestmode):
'''Generate HMAC hash'''
if digestmode == 'sha1':
return SHA
if digestmode == 'sha256':
return SHA256
elif digestmode == 'sha384':
return SHA384
elif digestmode == 'sha512':
return SHA512
else:... |
Generate HMAC hash | def _hmac_generate(data, algorithm, key):
'''Generate HMAC hash'''
digestmod = EncryptedPickle._get_hashlib(algorithm['subtype'])
return HMAC.new(key, data, digestmod).digest() |
AES encrypt | def _aes_encrypt(data, algorithm, key):
'''AES encrypt'''
if algorithm['subtype'] == 'cbc':
mode = AES.MODE_CBC
else:
raise Exception('AES subtype not supported: %s'
% algorithm['subtype'])
iv_size = algorithm['iv_size']
block... |
AES decrypt | def _aes_decrypt(data, algorithm, key):
'''AES decrypt'''
if algorithm['subtype'] == 'cbc':
mode = AES.MODE_CBC
else:
raise Exception('AES subtype not supported: %s'
% algorithm['subtype'])
iv_size = algorithm['iv_size']
if '... |
GZIP compress | def _zlib_compress(data, algorithm):
'''GZIP compress'''
if algorithm['subtype'] == 'deflate':
encoder = zlib.compressobj(algorithm['level'], zlib.DEFLATED, -15)
compressed = encoder.compress(data)
compressed += encoder.flush()
return compressed
... |
This function populates the internal tableOfContents list with the contents of the zip file TOC. If the server does not support ranged requests this will raise and exception. It will also throw an exception if the TOC cannot be found. | def getTableOfContents(self):
"""
This function populates the internal tableOfContents list with the contents
of the zip file TOC. If the server does not support ranged requests, this will raise
and exception. It will also throw an exception if the TOC cannot be found.
"""
... |
This function will extract a single file from the remote zip without downloading the entire zip file. The filename argument should match whatever is in the filename key of the tableOfContents. | def extractFile(self, filename):
"""
This function will extract a single file from the remote zip without downloading
the entire zip file. The filename argument should match whatever is in the 'filename'
key of the tableOfContents.
"""
files = [x for x in self.tableOfCont... |
Develop a postcard region around the target star. Other stars in this postcard will be used as possible reference stars. Args: npix: The size of the postcard region. The region will be a square with sides npix pixels ( default: 300 ) shape: The size of each individual image. For Kepler/ K2 FFIs this should never need t... | def make_postcard(self, npix=300, shape=(1070, 1132), buffer_size=15):
"""
Develop a "postcard" region around the target star.
Other stars in this postcard will be used as possible reference stars.
Args:
npix: The size of the postcard region. The region will be a sq... |
Identify apertures for all sources on the postcard both for the target and potential reference stars Args: edge_lim: The initial limit for the creation of apertures. The aperture will be a region of contiguous pixels with flux values larger than the product of edge_lim and the brightest pixel value for this star as lon... | def find_other_sources(self, edge_lim = 0.015, min_val = 5000,
ntargets = 250, extend_region_size=3, remove_excess=4,
plot_flag = False, plot_window=15):
"""
Identify apertures for all sources on the postcard, both for the
target and potenti... |
Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data in each orientation. This function is called by other functions and generally the user will not need to interact with it directly. | def do_photometry(self):
"""
Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data
in each orientation. This function is called by other functions and generally the user will not need
to interact with it directly.
"""
... |
Creates the figure shown in adjust_aperture for visualization purposes. Called by other functions and generally not called by the user directly. | def generate_panel(self, img):
"""
Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions
and generally not called by the user directly.
Args:
img: The data frame to be passed through to be plotted. A cutout of the ``integrated... |
Develop a panel showing the current aperture and the light curve as judged from that aperture. Clicking on individual pixels on the aperture will toggle those pixels on or off into the aperture ( which will be updated after closing the plot ). Clicking on the 0th row or column will turn off all pixels in that column or... | def adjust_aperture(self, image_region=15, ignore_bright=0):
"""
Develop a panel showing the current aperture and the light curve as judged from that aperture.
Clicking on individual pixels on the aperture will toggle those pixels on or off into the
aperture (which will be updated after ... |
Determine the normalized photometry accounting for effects shared by reference stars. Does not provide the opportunity to adjust the aperture Args: image_region: If True allow the aperture to be shifted up to one pixel in both the x and y directions to account for differential velocity aberration ( default: True ) igno... | def data_for_target(self, do_roll=True, ignore_bright=0):
"""
Determine the normalized photometry, accounting for effects shared by reference stars. Does not provide
the opportunity to adjust the aperture
Args:
image_region: If ``True`` allow the aperture to be shif... |
Determine the suitable reference stars and then the total flux in those stars and in the target star in each epoch Args: min_flux: The size of the region around the target star to be plotted. Images will be a square with side length image_region ( default: 5000 ) outlier_iterations: The number of iterations to remove o... | def calc_fluxes(self, min_flux = 5000, outlier_iterations=5,
max_outlier_obs=4, outlier_limit=1.7):
"""
Determine the suitable reference stars, and then the total flux in those stars and
in the target star in each epoch
Args:
min_flux: The si... |
Identify the centroid positions for the target star at all epochs. Useful for verifying that there is no correlation between flux and position as might be expected for high proper motion stars. | def calc_centroids(self):
"""
Identify the centroid positions for the target star at all epochs. Useful for verifying that there is
no correlation between flux and position, as might be expected for high proper motion stars.
"""
self.cm = np.zeros((len(self.postcard), 2))
... |
Identify the expected flux value at the time of each observation based on the Kepler long - cadence data to ensure variations observed are not the effects of a single large starspot. Only works if the target star was targeted for long or short cadence observations during the primary mission. | def define_spotsignal(self):
"""
Identify the "expected" flux value at the time of each observation based on the
Kepler long-cadence data, to ensure variations observed are not the effects of a single
large starspot. Only works if the target star was targeted for long or short cadence
... |
Estimate the photometric uncertainties on each data point following Equation A. 2 of The Paper. Based on the kepcal package of Dan Foreman - Mackey. | def model_uncert(self):
"""
Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper.
Based on the kepcal package of Dan Foreman-Mackey.
"""
Y = self.photometry_array.T
Y /= np.median(Y, axis=1)[:, None]
C = np.median(Y, axis=0... |
Append line to internal list. Uses self. tabs to format indents. Keyword arguments: line -- line to append | def _print(self, line=''):
"""Append line to internal list.
Uses self.tabs to format indents.
Keyword arguments:
line -- line to append
"""
self.lines.append('{}{}'.format('\t'*self.tabs , line)) |
Dump single enum type. Keyword arguments: top -- top namespace | def _dump_enum(self, e, top=''):
"""Dump single enum type.
Keyword arguments:
top -- top namespace
"""
self._print()
self._print('enum {} {{'.format(e.name))
self.defines.append('{}.{}'.format(top,e.name))
self.tabs+=1
for v in e.... |
Dump single field. | def _dump_field(self, fd):
"""Dump single field.
"""
v = {}
v['label'] = Pbd.LABELS[fd.label]
v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type]
v['name'] = fd.name
v['number'] = fd.number
v['default'] = '[default = {}]'.format(fd.de... |
Dump single message type. Keyword arguments: top -- top namespace | def _dump_message(self, m, top=''):
"""Dump single message type.
Keyword arguments:
top -- top namespace
"""
self._print()
self._print('message {} {{'.format(m.name))
self.defines.append('{}.{}'.format(top, m.name))
self.tabs+=1
f... |
Walk and dump ( disasm ) descriptor. | def _walk(self, fd):
"""Walk and dump (disasm) descriptor.
"""
top = '.{}'.format(fd.package) if len(fd.package) > 0 else ''
for e in fd.enum_type: self._dump_enum(e, top)
for m in fd.message_type: self. _dump_message(m, top) |
Disassemble serialized protocol buffers file. | def disassemble(self):
"""Disassemble serialized protocol buffers file.
"""
ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file
fd = FileDescriptorProto()
fd.ParseFromString(ser_pb)
self.name = fd.name
self._print('// Reversed ... |
Dump proto file to given directory. Keyword arguments: out_dir -- dump directory. Default =. | def dump(self, out_dir='.'):
"""Dump proto file to given directory.
Keyword arguments:
out_dir -- dump directory. Default='.'
"""
uri = out_dir + os.sep + self.name
with open(uri, 'w') as fh:
fh.write('\n'.join(self.lines)) |
Find all missing imports in list of Pbd instances. | def find_imports(self, pbds):
"""Find all missing imports in list of Pbd instances.
"""
# List of types used, but not defined
imports = list(set(self.uses).difference(set(self.defines)))
# Clumpsy, but enought for now
for imp in imports:
for p in pbd... |
Returns a dict from a fasta file and the number of sequences as the second return value. fasta_file can be a string path or a file object. The key of fasta_dict can be set using the keyword arguments and results in a combination of id header sequence in that order. joined with ||. ( default: id ) Duplicate keys are che... | def fasta_file_to_dict(fasta_file, id=True, header=False, seq=False):
"""Returns a dict from a fasta file and the number of sequences as the second return value.
fasta_file can be a string path or a file object.
The key of fasta_dict can be set using the keyword arguments and
results in a combination of... |
Write fasta_dict to fasta_file | def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None):
"""Write fasta_dict to fasta_file
:param fasta_dict: returned by fasta_file_to_dict
:param fasta_file: output file can be a string path or a file object
:param line_char_limit: None = no limit (default)
:return: None
"""
... |
Helper function to record and log an error message | def add_line_error(self, line_data, error_info, log_level=logging.ERROR):
"""Helper function to record and log an error message
:param line_data: dict
:param error_info: dict
:param logger:
:param log_level: int
:return:
"""
if not error_info: return
... |
checks whether child features are within the coordinate boundaries of parent features | def check_parent_boundary(self):
"""
checks whether child features are within the coordinate boundaries of parent features
:return:
"""
for line in self.lines:
for parent_feature in line['parents']:
ok = False
for parent_line in parent... |
1. get a list of CDS with the same parent 2. sort according to strand 3. calculate and validate phase | def check_phase(self):
"""
1. get a list of CDS with the same parent
2. sort according to strand
3. calculate and validate phase
"""
plus_minus = set(['+', '-'])
for k, g in groupby(sorted([line for line in self.lines if line['line_type'] == 'feature' and line['t... |
Check seqid bounds and the number of Ns in each feature using one or more reference sources. | def check_reference(self, sequence_region=False, fasta_embedded=False, fasta_external=False, check_bounds=True, check_n=True, allowed_num_of_n=0, feature_types=('CDS',)):
"""
Check seqid, bounds and the number of Ns in each feature using one or more reference sources.
Seqid check: check if the ... |
Parse the gff file into the following data structures: | def parse(self, gff_file, strict=False):
"""Parse the gff file into the following data structures:
* lines(list of line_data(dict))
- line_index(int): the index in lines
- line_raw(str)
- line_type(str in ['feature', 'directive', 'comment', 'blank', 'unknown'])
... |
BFS graph algorithm: param line_data: line_data ( dict ) with line_data [ line_index ] or line_index ( int ): return: list of line_data ( dict ) | def descendants(self, line_data):
"""
BFS graph algorithm
:param line_data: line_data(dict) with line_data['line_index'] or line_index(int)
:return: list of line_data(dict)
"""
# get start node
try:
start = line_data['line_index']
except TypeEr... |
Transfer children from old_parent to new_parent | def adopt(self, old_parent, new_parent):
"""
Transfer children from old_parent to new_parent
:param old_parent: feature_id(str) or line_index(int) or line_data(dict) or feature
:param new_parent: feature_id(str) or line_index(int) or line_data(dict)
:return: List of children tra... |
Marks line_data and all of its associated feature s line_status as removed does not actually remove the line_data from the data structure. The write function checks the line_status when writing the gff file. Find the root parent of line_data of type root_type remove all of its descendants. If the root parent has a pare... | def remove(self, line_data, root_type=None):
"""
Marks line_data and all of its associated feature's 'line_status' as 'removed', does not actually remove the line_data from the data structure.
The write function checks the 'line_status' when writing the gff file.
Find the root parent of ... |
Get the sequence of line_data according to the columns seqid start end strand. Requires fasta reference. When used on mRNA type line_data child_type can be used to specify which kind of sequence to return: * child_type = None: pre - mRNA returns the sequence of line_data from start to end reverse complement according t... | def sequence(self, line_data, child_type=None, reference=None):
"""
Get the sequence of line_data, according to the columns 'seqid', 'start', 'end', 'strand'.
Requires fasta reference.
When used on 'mRNA' type line_data, child_type can be used to specify which kind of sequence to return:... |
given a filename return the ABFs ID string. | def abfIDfromFname(fname):
"""given a filename, return the ABFs ID string."""
fname=os.path.abspath(fname)
basename=os.path.basename(fname)
return os.path.splitext(basename)[0] |
Determine the protocol used to record an ABF file | def abfProtocol(fname):
"""Determine the protocol used to record an ABF file"""
f=open(fname,'rb')
raw=f.read(30*1000) #it should be in the first 30k of the file
f.close()
raw=raw.decode("utf-8","ignore")
raw=raw.split("Clampex")[1].split(".pro")[0]
protocol = os.path.basename(raw) # the who... |
given the bytestring ABF header make and launch HTML. | def headerHTML(header,fname):
"""given the bytestring ABF header, make and launch HTML."""
html="<html><body><code>"
html+="<h2>%s</h2>"%(fname)
html+=pprint.pformat(header, indent=1)
html=html.replace("\n",'<br>').replace(" "," ")
html=html.replace(r"\x00","")
... |
set the sweep and channel of an ABF. Both start at 0. | def setsweep(self, sweep=0, channel=0):
"""set the sweep and channel of an ABF. Both start at 0."""
try:
sweep=int(sweep)
except:
self.log.error("trying to set sweep to [%s]",sweep)
return
if sweep<0:
sweep=self.sweeps-1-sweep # if negative... |
iterate over every sweep | def setsweeps(self):
"""iterate over every sweep"""
for sweep in range(self.sweeps):
self.setsweep(sweep)
yield self.sweep |
read the header and populate self with information about comments | def comments_load(self):
"""read the header and populate self with information about comments"""
self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[]
self.comments=0 # will be >0 if comments exist
self.comment_text=""
try:
# this used to work
... |
Recreate the command stimulus ( protocol ) for the current sweep. It s not stored point by point ( that s a waste of time and memory! ) Instead it s stored as a few ( x y ) points which can be easily graphed. | def generate_protocol(self):
"""
Recreate the command stimulus (protocol) for the current sweep.
It's not stored point by point (that's a waste of time and memory!)
Instead it's stored as a few (x,y) points which can be easily graphed.
TODO: THIS
for segment in abf.ABFre... |
given a sweep return the protocol as [ Xs Ys ]. This is good for plotting/ recreating the protocol trace. There may be duplicate numbers. | def get_protocol(self,sweep):
"""
given a sweep, return the protocol as [Xs,Ys].
This is good for plotting/recreating the protocol trace.
There may be duplicate numbers.
"""
self.setsweep(sweep)
return list(self.protoX),list(self.protoY) |
given a sweep return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. | def get_protocol_sequence(self,sweep):
"""
given a sweep, return the protocol as condensed sequence.
This is better for comparing similarities and determining steps.
There should be no duplicate numbers.
"""
self.setsweep(sweep)
return list(self.protoSeqX),list(se... |
alternative to the existing abf protocol stuff return the start/ stop time of an epoch. Epoch start at zero. A = 0 B = 1 C = 2 D = 3... | def epochTimes(self,nEpoch=2):
"""
alternative to the existing abf protocol stuff
return the start/stop time of an epoch.
Epoch start at zero.
A=0, B=1, C=2, D=3, ...
"""
times=[]
durations=[]
for epoch in self.header['dictEpochInfoPerDAC'][self.ch... |
return the average of part of the current sweep. | def average(self,t1=0,t2=None,setsweep=False):
"""return the average of part of the current sweep."""
if setsweep:
self.setsweep(setsweep)
if t2 is None or t2>self.sweepLength:
t2=self.sweepLength
self.log.debug("resetting t2 to [%f]",t2)
t1=max(t1,0)
... |
Return a sweep which is the average of multiple sweeps. For now standard deviation is lost. | def averageSweep(self,sweepFirst=0,sweepLast=None):
"""
Return a sweep which is the average of multiple sweeps.
For now, standard deviation is lost.
"""
if sweepLast is None:
sweepLast=self.sweeps-1
nSweeps=sweepLast-sweepFirst+1
runningSum=np.zeros(le... |
create kernel based on this ABF info. | def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):
"""create kernel based on this ABF info."""
sigmaMS=sizeMS/10 if sigmaMS is None else sigmaMS
size,sigma=sizeMS*self.pointsPerMs,sigmaMS*self.pointsPerMs
self.kernel=swhlab.common.kernel_gaussian(size,sigma,forwardOnly)
... |
Get the filtered sweepY of the current sweep. Only works if self. kernel has been generated. | def sweepYfiltered(self):
"""
Get the filtered sweepY of the current sweep.
Only works if self.kernel has been generated.
"""
assert self.kernel is not None
return swhlab.common.convolve(self.sweepY,self.kernel) |
Calculates the net difference between positive/ negative phasic events Returns return the phasic difference value of the current sweep. | def phasicNet(self,biggestEvent=50,m1=.5,m2=None):
"""
Calculates the net difference between positive/negative phasic events
Returns return the phasic difference value of the current sweep.
Arguments:
biggestEvent (int): the size of the largest event anticipated
... |
ensure the./ swhlab/ folder exists. | def output_touch(self):
"""ensure the ./swhlab/ folder exists."""
if not os.path.exists(self.outFolder):
self.log.debug("creating %s",self.outFolder)
os.mkdir(self.outFolder) |
Given a list of list of dicts return just the dicts. | def dictFlat(l):
"""Given a list of list of dicts, return just the dicts."""
if type(l) is dict:
return [l]
if "numpy" in str(type(l)):
return l
dicts=[]
for item in l:
if type(item)==dict:
dicts.append(item)
elif type(item)==list:
for item2 in... |
returns len () of each item in a list as a list. | def listCount(l):
"""returns len() of each item in a list, as a list."""
for i in range(len(l)):
l[i]=len(l[i])
return l |
Return all key from a list of dicts. ( or list of list of dicts ) | def dictVals(l,key):
"""Return all 'key' from a list of dicts. (or list of list of dicts)"""
dicts=dictFlat(l)
vals=np.empty(len(dicts))*np.nan
for i in range(len(dicts)):
if key in dicts[i]:
vals[i]=dicts[i][key]
return vals |
Given a list ( l ) of dicts ( d ) return AV and SD. | def dictAvg(listOfDicts,key,stdErr=False):
"""Given a list (l) of dicts (d), return AV and SD."""
vals=dictVals(listOfDicts,key)
if len(vals) and np.any(vals):
av=np.nanmean(vals)
er=np.nanstd(vals)
if stdErr:
er=er/np.sqrt(np.count_nonzero(~np.isnan(er)))
else:
... |
returns a list ( of the given size ) of dicts with fake data. some dictionary keys are missing for some of the items. | def dummyListOfDicts(size=100):
"""
returns a list (of the given size) of dicts with fake data.
some dictionary keys are missing for some of the items.
"""
titles="ahp,halfwidth,peak,expT,expI,sweep".split(",")
ld=[] #list of dicts
for i in range(size):
d={}
for t in titles:
... |
given a key return a list of values from the matrix with that key. | def matrixValues(matrix,key):
"""given a key, return a list of values from the matrix with that key."""
assert key in matrix.dtype.names
col=matrix.dtype.names.index(key)
values=np.empty(len(matrix))*np.nan
for i in range(len(matrix)):
values[i]=matrix[i][col]
return values |
given a recarray return it as a list of dicts. | def matrixToDicts(data):
"""given a recarray, return it as a list of dicts."""
# 1D array
if "float" in str(type(data[0])):
d={}
for x in range(len(data)):
d[data.dtype.names[x]]=data[x]
return d
# 2D array
l=[]
for y in range(len(data)):
d={}
... |
Give a list of dicts ( or list of list of dicts ) return a structured array. Headings will be sorted in alphabetical order. | def matrixfromDicts(dicts):
"""
Give a list of dicts (or list of list of dicts) return a structured array.
Headings will be sorted in alphabetical order.
"""
if 'numpy' in str(type(dicts)):
return dicts #already an array?
names=set([])
dicts=dictFlat(dicts)
for item in dicts:
... |
turns a list into a <tr > <td > something</ td > </ tr > call this when generating HTML tables dynamically. | def htmlListToTR(l,trClass=None,tdClass=None,td1Class=None):
"""
turns a list into a <tr><td>something</td></tr>
call this when generating HTML tables dynamically.
"""
html="<tr>"
for item in l:
if 'array' in str(type(item)):
item=item[0] #TODO: why is this needed
htm... |
given text make it a temporary HTML file and launch it. | def html_temp_launch(html):
"""given text, make it a temporary HTML file and launch it."""
fname = tempfile.gettempdir()+"/swhlab/temp.html"
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname) |
show everything we can about an object s projects and methods. | def checkOut(thing,html=True):
"""show everything we can about an object's projects and methods."""
msg=""
for name in sorted(dir(thing)):
if not "__" in name:
msg+="<b>%s</b>\n"%name
try:
msg+=" ^-VALUE: %s\n"%getattr(thing,name)()
except:
... |
Put 2d numpy data into an Origin worksheet. If bookname and sheetname are given try to load data into that book/ sheet. If the book/ sheet doesn t exist create it. | def matrixToWks(data,names=None,units=None,bookName=None,sheetName=" ",xCol=None):
"""
Put 2d numpy data into an Origin worksheet.
If bookname and sheetname are given try to load data into that book/sheet.
If the book/sheet doesn't exist, create it.
"""
if type(data) is list:
data=matrix... |
Put 2d numpy data into a temporary HTML file. | def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None):
"""Put 2d numpy data into a temporary HTML file."""
if not names:
names=[""]*len(data[0])
if data.dtype.names:
names=list(data.dtype.names)
if not units:
units=[""]*len(data[0])
f... |
given a string or a path to an XML file return an XML object. | def XMLtoPython(xmlStr=r"C:\Apps\pythonModules\GSTemp.xml"):
"""
given a string or a path to an XML file, return an XML object.
"""
#TODO: this absolute file path crazy stuff needs to stop!
if os.path.exists(xmlStr):
with open(xmlStr) as f:
xmlStr=f.read()
print(xmlStr)
p... |
mono - exponential curve. | def algo_exp(x, m, t, b):
"""mono-exponential curve."""
return m*np.exp(-t*x)+b |
Exponential fit. Returns [ multiplier t offset time constant ] | def fit_exp(y,graphToo=False):
"""Exponential fit. Returns [multiplier, t, offset, time constant]"""
x=np.arange(len(y))
try:
params, cv = scipy.optimize.curve_fit(algo_exp, x, y, p0=(1,1e-6,1))
except:
print(" !! curve fit failed (%.02f points)"%len(x))
return np.nan,np.nan,np.n... |
given a numpy array ( XYXYXY columns ) return it aligned. data returned will be XYYY. NANs may be returned. | def numpyAlignXY(data):
"""
given a numpy array (XYXYXY columns), return it aligned.
data returned will be XYYY. NANs may be returned.
"""
print(data)
Xs=data.flatten()[::2] # get all X values
Xs=Xs[~np.isnan(Xs)] # remove nans
Xs=sorted(list(set(Xs))) # eliminate duplicates then sort it... |
simple gaussian convolution. Returns same # of points as gotten. | def filter_gaussian(Ys,sigma,plotToo=False):
"""simple gaussian convolution. Returns same # of points as gotten."""
timeA=time.time()
window=scipy.signal.gaussian(len(Ys),sigma)
window/=sum(window)
Ys2=np.convolve(Ys,window,'same')
print("LEN:",len(Ys2),len(Ys))
timeB=time.time()
print("... |
return a list of Is where the data first crosses above threshold. | def where_cross(data,threshold):
"""return a list of Is where the data first crosses above threshold."""
Is=np.where(data>threshold)[0]
Is=np.concatenate(([0],Is))
Ds=Is[:-1]-Is[1:]+1
return Is[np.where(Ds)[0]+1] |
alternative to pylab. show () that updates IPython window. | def show(closeToo=False):
"""alternative to pylab.show() that updates IPython window."""
IPython.display.display(pylab.gcf())
if closeToo:
pylab.close('all') |
Return [ {} {} {} ] as a 2d matrix. | def originFormat_listOfDicts(l):
"""Return [{},{},{}] as a 2d matrix."""
titles=[]
for d in l:
for k in d.keys():
if not k in titles:
titles.append(k)
titles.sort()
data=np.empty((len(l),len(titles)))*np.nan
for y in range(len(l)):
for x in range(len(t... |
Try to format anything as a 2D matrix with column names. | def originFormat(thing):
"""Try to format anything as a 2D matrix with column names."""
if type(thing) is list and type(thing[0]) is dict:
return originFormat_listOfDicts(thing)
if type(thing) is list and type(thing[0]) is list:
return originFormat_listOfDicts(dictFlat(thing))
else:
... |
save something to a pickle file | def pickle_save(thing,fname):
"""save something to a pickle file"""
pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL)
return thing |
convert a dictionary to a pretty formatted string. | def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None):
"""convert a dictionary to a pretty formatted string."""
msg=""
if "record" in str(type(d)):
keys=d.dtype.names
else:
keys=d.keys()
if sort:
keys=sorted(keys)
for key in keys:
if key[0]=="... |
given a groups file return a dict of groups. Example: ### GROUP: TR 16602083 16608059 ### GROUP: TU 16504000 16507011 | def groupsFromKey(keyFile='./key.txt'):
"""
given a groups file, return a dict of groups.
Example:
### GROUP: TR
16602083
16608059
### GROUP: TU
16504000
16507011
"""
groups={}
thisGroup="?"
with open(keyFile) as f:
raw=f.read().split("... |
return an abf of the * FIRST * of every type of thing. | def findRelevantData(fileList,abfs):
"""return an abf of the *FIRST* of every type of thing."""
relevant=[]
things={}
for abf in abfs:
for fname in fileList:
if abf in fname and not fname in relevant:
relevant.append(fname)
for item in sorted(relevant):
th... |
determine the comment cooked in the protocol. | def determineProtocol(fname):
"""determine the comment cooked in the protocol."""
f=open(fname,'rb')
raw=f.read(5000) #it should be in the first 5k of the file
f.close()
protoComment="unknown"
if b"SWHLab4[" in raw:
protoComment=raw.split(b"SWHLab4[")[1].split(b"]",1)[0]
elif b"SWH["... |
convert silly C: \\ names \\ like \\ this. txt to c:/ names/ like/ this. txt | def forwardSlash(listOfFiles):
"""convert silly C:\\names\\like\\this.txt to c:/names/like/this.txt"""
for i,fname in enumerate(listOfFiles):
listOfFiles[i]=fname.replace("\\","/")
return listOfFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.