Search is not available for this dataset
text
stringlengths
75
104k
def unseal(self, data, return_options=False): '''Unseal data''' data = self._remove_magic(data) data = urlsafe_nopadding_b64decode(data) options = self._read_header(data) data = self._add_magic(data) data = self._unsign_data(data, options) data = self._remove_mag...
def verify_signature(self, data): '''Verify sealed data signature''' data = self._remove_magic(data) data = urlsafe_nopadding_b64decode(data) options = self._read_header(data) data = self._add_magic(data) self._unsign_data(data, options)
def _encode(self, data, algorithm, key=None): '''Encode data with specific algorithm''' if algorithm['type'] == 'hmac': return data + self._hmac_generate(data, algorithm, key) elif algorithm['type'] == 'aes': return self._aes_encrypt(data, algorithm, key) elif al...
def _decode(self, data, algorithm, key=None): '''Decode data with specific algorithm''' if algorithm['type'] == 'hmac': verify_signature = data[-algorithm['hash_size']:] data = data[:-algorithm['hash_size']] signature = self._hmac_generate(data, algorithm, key) ...
def _sign_data(self, data, options): '''Add signature to data''' if options['signature_algorithm_id'] not in self.signature_algorithms: raise Exception('Unknown signature algorithm id: %d' % options['signature_algorithm_id']) signature_algorithm = \ ...
def _unsign_data(self, data, options): '''Verify and remove signature''' if options['signature_algorithm_id'] not in self.signature_algorithms: raise Exception('Unknown signature algorithm id: %d' % options['signature_algorithm_id']) signature_algorithm ...
def _encrypt_data(self, data, options): '''Encrypt data''' if options['encryption_algorithm_id'] not in self.encryption_algorithms: raise Exception('Unknown encryption algorithm id: %d' % options['encryption_algorithm_id']) encryption_algorithm = \ ...
def _decrypt_data(self, data, options): '''Decrypt data''' if options['encryption_algorithm_id'] not in self.encryption_algorithms: raise Exception('Unknown encryption algorithm id: %d' % options['encryption_algorithm_id']) encryption_algorithm = \ ...
def _serialize_data(self, data, options): '''Serialize data''' serialization_algorithm_id = options['serialization_algorithm_id'] if serialization_algorithm_id not in self.serialization_algorithms: raise Exception('Unknown serialization algorithm id: %d' ...
def _unserialize_data(self, data, options): '''Unserialize data''' serialization_algorithm_id = options['serialization_algorithm_id'] if serialization_algorithm_id not in self.serialization_algorithms: raise Exception('Unknown serialization algorithm id: %d' ...
def _compress_data(self, data, options): '''Compress data''' compression_algorithm_id = options['compression_algorithm_id'] if compression_algorithm_id not in self.compression_algorithms: raise Exception('Unknown compression algorithm id: %d' % compressio...
def _decompress_data(self, data, options): '''Decompress data''' compression_algorithm_id = options['compression_algorithm_id'] if compression_algorithm_id not in self.compression_algorithms: raise Exception('Unknown compression algorithm id: %d' % compre...
def _remove_magic(self, data): '''Verify and remove magic''' if not self.magic: return data magic_size = len(self.magic) magic = data[:magic_size] if magic != self.magic: raise Exception('Invalid magic') data = data[magic_size:] return d...
def _add_header(self, data, options): '''Add header to data''' # pylint: disable=W0142 version_info = self._get_version_info(options['version']) flags = options['flags'] header_flags = dict( (i, str(int(j))) for i, j in options['flags'].iteritems()) header...
def _read_header(self, data): '''Read header from data''' # pylint: disable=W0212 version = self._read_version(data) version_info = self._get_version_info(version) header_data = data[:version_info['header_size']] header = version_info['header'] header = header._...
def _remove_header(self, data, options): '''Remove header from data''' version_info = self._get_version_info(options['version']) header_size = version_info['header_size'] if options['flags']['timestamp']: header_size += version_info['timestamp_size'] data = data[he...
def _read_version(self, data): '''Read header version from data''' version = ord(data[0]) if version not in self.VERSIONS: raise Exception('Version not defined: %d' % version) return version
def _get_algorithm_info(self, algorithm_info): '''Get algorithm info''' if algorithm_info['algorithm'] not in self.ALGORITHMS: raise Exception('Algorithm not supported: %s' % algorithm_info['algorithm']) algorithm = self.ALGORITHMS[algorithm_info['algori...
def _generate_key(pass_id, passphrases, salt, algorithm): '''Generate and return PBKDF2 key''' if pass_id not in passphrases: raise Exception('Passphrase not defined for id: %d' % pass_id) passphrase = passphrases[pass_id] if len(passphrase) < 32: raise Excepti...
def _update_dict(data, default_data, replace_data=False): '''Update algorithm definition type dictionaries''' if not data: data = default_data.copy() return data if not isinstance(data, dict): raise TypeError('Value not dict type') if len(data) > 255...
def _get_hashlib(digestmode): '''Generate HMAC hash''' if digestmode == 'sha1': return SHA if digestmode == 'sha256': return SHA256 elif digestmode == 'sha384': return SHA384 elif digestmode == 'sha512': return SHA512 else:...
def _hmac_generate(data, algorithm, key): '''Generate HMAC hash''' digestmod = EncryptedPickle._get_hashlib(algorithm['subtype']) return HMAC.new(key, data, digestmod).digest()
def _aes_encrypt(data, algorithm, key): '''AES encrypt''' if algorithm['subtype'] == 'cbc': mode = AES.MODE_CBC else: raise Exception('AES subtype not supported: %s' % algorithm['subtype']) iv_size = algorithm['iv_size'] block...
def _aes_decrypt(data, algorithm, key): '''AES decrypt''' if algorithm['subtype'] == 'cbc': mode = AES.MODE_CBC else: raise Exception('AES subtype not supported: %s' % algorithm['subtype']) iv_size = algorithm['iv_size'] if '...
def _zlib_compress(data, algorithm): '''GZIP compress''' if algorithm['subtype'] == 'deflate': encoder = zlib.compressobj(algorithm['level'], zlib.DEFLATED, -15) compressed = encoder.compress(data) compressed += encoder.flush() return compressed ...
def getTableOfContents(self): """ This function populates the internal tableOfContents list with the contents of the zip file TOC. If the server does not support ranged requests, this will raise and exception. It will also throw an exception if the TOC cannot be found. """ ...
def extractFile(self, filename): """ This function will extract a single file from the remote zip without downloading the entire zip file. The filename argument should match whatever is in the 'filename' key of the tableOfContents. """ files = [x for x in self.tableOfCont...
def make_postcard(self, npix=300, shape=(1070, 1132), buffer_size=15): """ Develop a "postcard" region around the target star. Other stars in this postcard will be used as possible reference stars. Args: npix: The size of the postcard region. The region will be a sq...
def find_other_sources(self, edge_lim = 0.015, min_val = 5000, ntargets = 250, extend_region_size=3, remove_excess=4, plot_flag = False, plot_window=15): """ Identify apertures for all sources on the postcard, both for the target and potenti...
def do_photometry(self): """ Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data in each orientation. This function is called by other functions and generally the user will not need to interact with it directly. """ ...
def generate_panel(self, img): """ Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions and generally not called by the user directly. Args: img: The data frame to be passed through to be plotted. A cutout of the ``integrated...
def adjust_aperture(self, image_region=15, ignore_bright=0): """ Develop a panel showing the current aperture and the light curve as judged from that aperture. Clicking on individual pixels on the aperture will toggle those pixels on or off into the aperture (which will be updated after ...
def data_for_target(self, do_roll=True, ignore_bright=0): """ Determine the normalized photometry, accounting for effects shared by reference stars. Does not provide the opportunity to adjust the aperture Args: image_region: If ``True`` allow the aperture to be shif...
def calc_fluxes(self, min_flux = 5000, outlier_iterations=5, max_outlier_obs=4, outlier_limit=1.7): """ Determine the suitable reference stars, and then the total flux in those stars and in the target star in each epoch Args: min_flux: The si...
def calc_centroids(self): """ Identify the centroid positions for the target star at all epochs. Useful for verifying that there is no correlation between flux and position, as might be expected for high proper motion stars. """ self.cm = np.zeros((len(self.postcard), 2)) ...
def define_spotsignal(self): """ Identify the "expected" flux value at the time of each observation based on the Kepler long-cadence data, to ensure variations observed are not the effects of a single large starspot. Only works if the target star was targeted for long or short cadence ...
def model_uncert(self): """ Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper. Based on the kepcal package of Dan Foreman-Mackey. """ Y = self.photometry_array.T Y /= np.median(Y, axis=1)[:, None] C = np.median(Y, axis=0...
def _print(self, line=''): """Append line to internal list. Uses self.tabs to format indents. Keyword arguments: line -- line to append """ self.lines.append('{}{}'.format('\t'*self.tabs , line))
def _dump_enum(self, e, top=''): """Dump single enum type. Keyword arguments: top -- top namespace """ self._print() self._print('enum {} {{'.format(e.name)) self.defines.append('{}.{}'.format(top,e.name)) self.tabs+=1 for v in e....
def _dump_field(self, fd): """Dump single field. """ v = {} v['label'] = Pbd.LABELS[fd.label] v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type] v['name'] = fd.name v['number'] = fd.number v['default'] = '[default = {}]'.format(fd.de...
def _dump_message(self, m, top=''): """Dump single message type. Keyword arguments: top -- top namespace """ self._print() self._print('message {} {{'.format(m.name)) self.defines.append('{}.{}'.format(top, m.name)) self.tabs+=1 f...
def _walk(self, fd): """Walk and dump (disasm) descriptor. """ top = '.{}'.format(fd.package) if len(fd.package) > 0 else '' for e in fd.enum_type: self._dump_enum(e, top) for m in fd.message_type: self. _dump_message(m, top)
def disassemble(self): """Disassemble serialized protocol buffers file. """ ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file fd = FileDescriptorProto() fd.ParseFromString(ser_pb) self.name = fd.name self._print('// Reversed ...
def dump(self, out_dir='.'): """Dump proto file to given directory. Keyword arguments: out_dir -- dump directory. Default='.' """ uri = out_dir + os.sep + self.name with open(uri, 'w') as fh: fh.write('\n'.join(self.lines))
def find_imports(self, pbds): """Find all missing imports in list of Pbd instances. """ # List of types used, but not defined imports = list(set(self.uses).difference(set(self.defines))) # Clumpsy, but enought for now for imp in imports: for p in pbd...
def fasta_file_to_dict(fasta_file, id=True, header=False, seq=False): """Returns a dict from a fasta file and the number of sequences as the second return value. fasta_file can be a string path or a file object. The key of fasta_dict can be set using the keyword arguments and results in a combination of...
def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None): """Write fasta_dict to fasta_file :param fasta_dict: returned by fasta_file_to_dict :param fasta_file: output file can be a string path or a file object :param line_char_limit: None = no limit (default) :return: None """ ...
def add_line_error(self, line_data, error_info, log_level=logging.ERROR): """Helper function to record and log an error message :param line_data: dict :param error_info: dict :param logger: :param log_level: int :return: """ if not error_info: return ...
def check_parent_boundary(self): """ checks whether child features are within the coordinate boundaries of parent features :return: """ for line in self.lines: for parent_feature in line['parents']: ok = False for parent_line in parent...
def check_phase(self): """ 1. get a list of CDS with the same parent 2. sort according to strand 3. calculate and validate phase """ plus_minus = set(['+', '-']) for k, g in groupby(sorted([line for line in self.lines if line['line_type'] == 'feature' and line['t...
def check_reference(self, sequence_region=False, fasta_embedded=False, fasta_external=False, check_bounds=True, check_n=True, allowed_num_of_n=0, feature_types=('CDS',)): """ Check seqid, bounds and the number of Ns in each feature using one or more reference sources. Seqid check: check if the ...
def parse(self, gff_file, strict=False): """Parse the gff file into the following data structures: * lines(list of line_data(dict)) - line_index(int): the index in lines - line_raw(str) - line_type(str in ['feature', 'directive', 'comment', 'blank', 'unknown']) ...
def descendants(self, line_data): """ BFS graph algorithm :param line_data: line_data(dict) with line_data['line_index'] or line_index(int) :return: list of line_data(dict) """ # get start node try: start = line_data['line_index'] except TypeEr...
def adopt(self, old_parent, new_parent): """ Transfer children from old_parent to new_parent :param old_parent: feature_id(str) or line_index(int) or line_data(dict) or feature :param new_parent: feature_id(str) or line_index(int) or line_data(dict) :return: List of children tra...
def remove(self, line_data, root_type=None): """ Marks line_data and all of its associated feature's 'line_status' as 'removed', does not actually remove the line_data from the data structure. The write function checks the 'line_status' when writing the gff file. Find the root parent of ...
def sequence(self, line_data, child_type=None, reference=None): """ Get the sequence of line_data, according to the columns 'seqid', 'start', 'end', 'strand'. Requires fasta reference. When used on 'mRNA' type line_data, child_type can be used to specify which kind of sequence to return:...
def abfIDfromFname(fname): """given a filename, return the ABFs ID string.""" fname=os.path.abspath(fname) basename=os.path.basename(fname) return os.path.splitext(basename)[0]
def abfProtocol(fname): """Determine the protocol used to record an ABF file""" f=open(fname,'rb') raw=f.read(30*1000) #it should be in the first 30k of the file f.close() raw=raw.decode("utf-8","ignore") raw=raw.split("Clampex")[1].split(".pro")[0] protocol = os.path.basename(raw) # the who...
def headerHTML(header,fname): """given the bytestring ABF header, make and launch HTML.""" html="<html><body><code>" html+="<h2>%s</h2>"%(fname) html+=pprint.pformat(header, indent=1) html=html.replace("\n",'<br>').replace(" ","&nbsp;") html=html.replace(r"\x00","") ...
def setsweep(self, sweep=0, channel=0): """set the sweep and channel of an ABF. Both start at 0.""" try: sweep=int(sweep) except: self.log.error("trying to set sweep to [%s]",sweep) return if sweep<0: sweep=self.sweeps-1-sweep # if negative...
def setsweeps(self): """iterate over every sweep""" for sweep in range(self.sweeps): self.setsweep(sweep) yield self.sweep
def comments_load(self): """read the header and populate self with information about comments""" self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[] self.comments=0 # will be >0 if comments exist self.comment_text="" try: # this used to work ...
def generate_protocol(self): """ Recreate the command stimulus (protocol) for the current sweep. It's not stored point by point (that's a waste of time and memory!) Instead it's stored as a few (x,y) points which can be easily graphed. TODO: THIS for segment in abf.ABFre...
def get_protocol(self,sweep): """ given a sweep, return the protocol as [Xs,Ys]. This is good for plotting/recreating the protocol trace. There may be duplicate numbers. """ self.setsweep(sweep) return list(self.protoX),list(self.protoY)
def get_protocol_sequence(self,sweep): """ given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. """ self.setsweep(sweep) return list(self.protoSeqX),list(se...
def epochTimes(self,nEpoch=2): """ alternative to the existing abf protocol stuff return the start/stop time of an epoch. Epoch start at zero. A=0, B=1, C=2, D=3, ... """ times=[] durations=[] for epoch in self.header['dictEpochInfoPerDAC'][self.ch...
def average(self,t1=0,t2=None,setsweep=False): """return the average of part of the current sweep.""" if setsweep: self.setsweep(setsweep) if t2 is None or t2>self.sweepLength: t2=self.sweepLength self.log.debug("resetting t2 to [%f]",t2) t1=max(t1,0) ...
def averageSweep(self,sweepFirst=0,sweepLast=None): """ Return a sweep which is the average of multiple sweeps. For now, standard deviation is lost. """ if sweepLast is None: sweepLast=self.sweeps-1 nSweeps=sweepLast-sweepFirst+1 runningSum=np.zeros(le...
def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False): """create kernel based on this ABF info.""" sigmaMS=sizeMS/10 if sigmaMS is None else sigmaMS size,sigma=sizeMS*self.pointsPerMs,sigmaMS*self.pointsPerMs self.kernel=swhlab.common.kernel_gaussian(size,sigma,forwardOnly) ...
def sweepYfiltered(self): """ Get the filtered sweepY of the current sweep. Only works if self.kernel has been generated. """ assert self.kernel is not None return swhlab.common.convolve(self.sweepY,self.kernel)
def phasicNet(self,biggestEvent=50,m1=.5,m2=None): """ Calculates the net difference between positive/negative phasic events Returns return the phasic difference value of the current sweep. Arguments: biggestEvent (int): the size of the largest event anticipated ...
def output_touch(self): """ensure the ./swhlab/ folder exists.""" if not os.path.exists(self.outFolder): self.log.debug("creating %s",self.outFolder) os.mkdir(self.outFolder)
def dictFlat(l): """Given a list of list of dicts, return just the dicts.""" if type(l) is dict: return [l] if "numpy" in str(type(l)): return l dicts=[] for item in l: if type(item)==dict: dicts.append(item) elif type(item)==list: for item2 in...
def listCount(l): """returns len() of each item in a list, as a list.""" for i in range(len(l)): l[i]=len(l[i]) return l
def dictVals(l,key): """Return all 'key' from a list of dicts. (or list of list of dicts)""" dicts=dictFlat(l) vals=np.empty(len(dicts))*np.nan for i in range(len(dicts)): if key in dicts[i]: vals[i]=dicts[i][key] return vals
def dictAvg(listOfDicts,key,stdErr=False): """Given a list (l) of dicts (d), return AV and SD.""" vals=dictVals(listOfDicts,key) if len(vals) and np.any(vals): av=np.nanmean(vals) er=np.nanstd(vals) if stdErr: er=er/np.sqrt(np.count_nonzero(~np.isnan(er))) else: ...
def dummyListOfDicts(size=100): """ returns a list (of the given size) of dicts with fake data. some dictionary keys are missing for some of the items. """ titles="ahp,halfwidth,peak,expT,expI,sweep".split(",") ld=[] #list of dicts for i in range(size): d={} for t in titles: ...
def matrixValues(matrix,key): """given a key, return a list of values from the matrix with that key.""" assert key in matrix.dtype.names col=matrix.dtype.names.index(key) values=np.empty(len(matrix))*np.nan for i in range(len(matrix)): values[i]=matrix[i][col] return values
def matrixToDicts(data): """given a recarray, return it as a list of dicts.""" # 1D array if "float" in str(type(data[0])): d={} for x in range(len(data)): d[data.dtype.names[x]]=data[x] return d # 2D array l=[] for y in range(len(data)): d={} ...
def matrixfromDicts(dicts): """ Give a list of dicts (or list of list of dicts) return a structured array. Headings will be sorted in alphabetical order. """ if 'numpy' in str(type(dicts)): return dicts #already an array? names=set([]) dicts=dictFlat(dicts) for item in dicts: ...
def htmlListToTR(l,trClass=None,tdClass=None,td1Class=None): """ turns a list into a <tr><td>something</td></tr> call this when generating HTML tables dynamically. """ html="<tr>" for item in l: if 'array' in str(type(item)): item=item[0] #TODO: why is this needed htm...
def html_temp_launch(html): """given text, make it a temporary HTML file and launch it.""" fname = tempfile.gettempdir()+"/swhlab/temp.html" with open(fname,'w') as f: f.write(html) webbrowser.open(fname)
def checkOut(thing,html=True): """show everything we can about an object's projects and methods.""" msg="" for name in sorted(dir(thing)): if not "__" in name: msg+="<b>%s</b>\n"%name try: msg+=" ^-VALUE: %s\n"%getattr(thing,name)() except: ...
def matrixToWks(data,names=None,units=None,bookName=None,sheetName=" ",xCol=None): """ Put 2d numpy data into an Origin worksheet. If bookname and sheetname are given try to load data into that book/sheet. If the book/sheet doesn't exist, create it. """ if type(data) is list: data=matrix...
def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None): """Put 2d numpy data into a temporary HTML file.""" if not names: names=[""]*len(data[0]) if data.dtype.names: names=list(data.dtype.names) if not units: units=[""]*len(data[0]) f...
def XMLtoPython(xmlStr=r"C:\Apps\pythonModules\GSTemp.xml"): """ given a string or a path to an XML file, return an XML object. """ #TODO: this absolute file path crazy stuff needs to stop! if os.path.exists(xmlStr): with open(xmlStr) as f: xmlStr=f.read() print(xmlStr) p...
def algo_exp(x, m, t, b): """mono-exponential curve.""" return m*np.exp(-t*x)+b
def fit_exp(y,graphToo=False): """Exponential fit. Returns [multiplier, t, offset, time constant]""" x=np.arange(len(y)) try: params, cv = scipy.optimize.curve_fit(algo_exp, x, y, p0=(1,1e-6,1)) except: print(" !! curve fit failed (%.02f points)"%len(x)) return np.nan,np.nan,np.n...
def numpyAlignXY(data): """ given a numpy array (XYXYXY columns), return it aligned. data returned will be XYYY. NANs may be returned. """ print(data) Xs=data.flatten()[::2] # get all X values Xs=Xs[~np.isnan(Xs)] # remove nans Xs=sorted(list(set(Xs))) # eliminate duplicates then sort it...
def filter_gaussian(Ys,sigma,plotToo=False): """simple gaussian convolution. Returns same # of points as gotten.""" timeA=time.time() window=scipy.signal.gaussian(len(Ys),sigma) window/=sum(window) Ys2=np.convolve(Ys,window,'same') print("LEN:",len(Ys2),len(Ys)) timeB=time.time() print("...
def where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
def show(closeToo=False): """alternative to pylab.show() that updates IPython window.""" IPython.display.display(pylab.gcf()) if closeToo: pylab.close('all')
def originFormat_listOfDicts(l): """Return [{},{},{}] as a 2d matrix.""" titles=[] for d in l: for k in d.keys(): if not k in titles: titles.append(k) titles.sort() data=np.empty((len(l),len(titles)))*np.nan for y in range(len(l)): for x in range(len(t...
def originFormat(thing): """Try to format anything as a 2D matrix with column names.""" if type(thing) is list and type(thing[0]) is dict: return originFormat_listOfDicts(thing) if type(thing) is list and type(thing[0]) is list: return originFormat_listOfDicts(dictFlat(thing)) else: ...
def pickle_save(thing,fname): """save something to a pickle file""" pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL) return thing
def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None): """convert a dictionary to a pretty formatted string.""" msg="" if "record" in str(type(d)): keys=d.dtype.names else: keys=d.keys() if sort: keys=sorted(keys) for key in keys: if key[0]=="...
def groupsFromKey(keyFile='./key.txt'): """ given a groups file, return a dict of groups. Example: ### GROUP: TR 16602083 16608059 ### GROUP: TU 16504000 16507011 """ groups={} thisGroup="?" with open(keyFile) as f: raw=f.read().split("...
def findRelevantData(fileList,abfs): """return an abf of the *FIRST* of every type of thing.""" relevant=[] things={} for abf in abfs: for fname in fileList: if abf in fname and not fname in relevant: relevant.append(fname) for item in sorted(relevant): th...
def determineProtocol(fname): """determine the comment cooked in the protocol.""" f=open(fname,'rb') raw=f.read(5000) #it should be in the first 5k of the file f.close() protoComment="unknown" if b"SWHLab4[" in raw: protoComment=raw.split(b"SWHLab4[")[1].split(b"]",1)[0] elif b"SWH["...
def forwardSlash(listOfFiles): """convert silly C:\\names\\like\\this.txt to c:/names/like/this.txt""" for i,fname in enumerate(listOfFiles): listOfFiles[i]=fname.replace("\\","/") return listOfFiles