_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q263900
ABFplot.figure_protocol
validation
def figure_protocol(self): """plot the current sweep protocol.""" self.log.debug("creating overlayed protocols plot") self.figure() plt.plot(self.abf.protoX,self.abf.protoY,color='r') self.marginX=0 self.decorate(protocol=True)
python
{ "resource": "" }
q263901
ABFplot.figure_protocols
validation
def figure_protocols(self): """plot the protocol of all sweeps.""" self.log.debug("creating overlayed protocols plot") self.figure() for sweep in range(self.abf.sweeps): self.abf.setsweep(sweep) plt.plot(self.abf.protoX,self.abf.protoY,color='r') self.marginX=0 self.decorate(protocol=True)
python
{ "resource": "" }
q263902
clampfit_rename
validation
def clampfit_rename(path,char): """ Given ABFs and TIFs formatted long style, rename each of them to prefix their number with a different number. Example: 2017_10_11_0011.abf Becomes: 2017_10_11_?011.abf where ? can be any character. """ assert len(char)==1 and type(char)==str, "replacement character must be a single character" assert os.path.exists(path), "path doesn't exist" files = sorted(os.listdir(path)) files = [x for x in files if len(x)>18 and x[4]+x[7]+x[10]=='___'] for fname in files: fname2 = list(fname) fname2[11]=char fname2="".join(fname2) if fname==fname2: print(fname, "==", fname2) else: print(fname, "->", fname2) # fname=os.path.join(path,fname) # fname2=os.path.join(path,fname2) # if not os.path.exists(fname2): # os.rename(fname,fname2) return
python
{ "resource": "" }
q263903
filesByExtension
validation
def filesByExtension(fnames): """given a list of files, return a dict organized by extension.""" byExt={"abf":[],"jpg":[],"tif":[]} # prime it with empties for fname in fnames: ext = os.path.splitext(fname)[1].replace(".",'').lower() if not ext in byExt.keys(): byExt[ext]=[] byExt[ext]=byExt[ext]+[fname] return byExt
python
{ "resource": "" }
q263904
filesByCell
validation
def filesByCell(fnames,cells): """given files and cells, return a dict of files grouped by cell.""" byCell={} fnames=smartSort(fnames) days = list(set([elem[:5] for elem in fnames if elem.endswith(".abf")])) # so pythonic! for day in smartSort(days): parent=None for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(".abf")]): ID=os.path.splitext(fname)[0] if len([x for x in fnames if x.startswith(ID)])-1: parent=ID if not parent in byCell: byCell[parent]=[] byCell[parent]=byCell[parent]+[fname] return byCell
python
{ "resource": "" }
q263905
ABFindex.folderScan
validation
def folderScan(self,abfFolder=None): """populate class properties relating to files in the folder.""" if abfFolder is None and 'abfFolder' in dir(self): abfFolder=self.abfFolder else: self.abfFolder=abfFolder self.abfFolder=os.path.abspath(self.abfFolder) self.log.info("scanning [%s]",self.abfFolder) if not os.path.exists(self.abfFolder): self.log.error("path doesn't exist: [%s]",abfFolder) return self.abfFolder2=os.path.abspath(self.abfFolder+"/swhlab/") if not os.path.exists(self.abfFolder2): self.log.error("./swhlab/ doesn't exist. creating it...") os.mkdir(self.abfFolder2) self.fnames=os.listdir(self.abfFolder) self.fnames2=os.listdir(self.abfFolder2) self.log.debug("./ has %d files",len(self.fnames)) self.log.debug("./swhlab/ has %d files",len(self.fnames2)) self.fnamesByExt = filesByExtension(self.fnames) if not "abf" in self.fnamesByExt.keys(): self.log.error("no ABF files found") self.log.debug("found %d ABFs",len(self.fnamesByExt["abf"])) self.cells=findCells(self.fnames) # list of cells by their ID self.log.debug("found %d cells"%len(self.cells)) self.fnamesByCell = filesByCell(self.fnames,self.cells) # only ABFs self.log.debug("grouped cells by number of source files: %s"%\ str([len(self.fnamesByCell[elem]) for elem in self.fnamesByCell]))
python
{ "resource": "" }
q263906
ABFindex.html_index
validation
def html_index(self,launch=False,showChildren=False): """ generate list of cells with links. keep this simple. automatically generates splash page and regnerates frames. """ self.makePics() # ensure all pics are converted # generate menu html='<a href="index_splash.html" target="content">./%s/</a><br>'%os.path.basename(self.abfFolder) for ID in smartSort(self.fnamesByCell.keys()): link='' if ID+".html" in self.fnames2: link='href="%s.html" target="content"'%ID html+=('<a %s>%s</a><br>'%(link,ID)) # show the parent ABF (ID) if showChildren: for fname in self.fnamesByCell[ID]: thisID=os.path.splitext(fname)[0] files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(".html")] html+='<i>%s</i>'%thisID # show the child ABF if len(files2): html+=' (%s)'%len(files2) # show number of supporting files html+='<br>' html+="<br>" style.save(html,self.abfFolder2+"/index_menu.html") self.html_index_splash() # make splash page style.frames(self.abfFolder2+"/index.html",launch=launch)
python
{ "resource": "" }
q263907
ABFindex.html_singleAll
validation
def html_singleAll(self,template="basic"): """generate a data view for every ABF in the project folder.""" for fname in smartSort(self.cells): if template=="fixed": self.html_single_fixed(fname) else: self.html_single_basic(fname)
python
{ "resource": "" }
q263908
proto_01_01_HP010
validation
def proto_01_01_HP010(abf=exampleABF): """hyperpolarization step. Use to calculate tau and stuff.""" swhlab.memtest.memtest(abf) #knows how to do IC memtest swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did swhlab.plot.save(abf,tag="tau")
python
{ "resource": "" }
q263909
proto_01_12_steps025
validation
def proto_01_12_steps025(abf=exampleABF): """IC steps. Use to determine gain function.""" swhlab.ap.detect(abf) standard_groupingForInj(abf,200) for feature in ['freq','downslope']: swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info swhlab.plot.save(abf,tag='A_'+feature) swhlab.plot.gain(abf) #easy way to do a gain function! swhlab.plot.save(abf,tag='05-gain')
python
{ "resource": "" }
q263910
proto_01_13_steps025dual
validation
def proto_01_13_steps025dual(abf=exampleABF): """IC steps. See how hyperpol. step affects things.""" swhlab.ap.detect(abf) standard_groupingForInj(abf,200) for feature in ['freq','downslope']: swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info swhlab.plot.save(abf,tag='A_'+feature) f1=swhlab.ap.getAvgBySweep(abf,'freq',None,1) f2=swhlab.ap.getAvgBySweep(abf,'freq',1,None) f1=np.nan_to_num(f1) f2=np.nan_to_num(f2) Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)]) swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)", ylabel="average inst. freq. (Hz)") pylab.plot(Xs,f1,'.-',ms=20,alpha=.5,label="step 1",color='b') pylab.plot(Xs,f2,'.-',ms=20,alpha=.5,label="step 2",color='r') pylab.legend(loc='upper left') pylab.axis([Xs[0],Xs[-1],None,None]) swhlab.plot.save(abf,tag='gain')
python
{ "resource": "" }
q263911
proto_02_01_MT70
validation
def proto_02_01_MT70(abf=exampleABF): """repeated membrane tests.""" standard_overlayWithAverage(abf) swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) swhlab.plot.save(abf,tag='check',resize=False)
python
{ "resource": "" }
q263912
proto_02_03_IVfast
validation
def proto_02_03_IVfast(abf=exampleABF): """fast sweeps, 1 step per sweep, for clean IV without fast currents.""" av1,sd1=swhlab.plot.IV(abf,.6,.9,True) swhlab.plot.save(abf,tag='iv1') Xs=abf.clampValues(.6) #generate IV clamp values abf.saveThing([Xs,av1],'iv')
python
{ "resource": "" }
q263913
proto_04_01_MTmon70s2
validation
def proto_04_01_MTmon70s2(abf=exampleABF): """repeated membrane tests, likely with drug added. Maybe IPSCs.""" standard_inspect(abf) swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) swhlab.plot.save(abf,tag='check',resize=False) swhlab.memtest.plot_standard4(abf) swhlab.plot.save(abf,tag='memtests')
python
{ "resource": "" }
q263914
proto_VC_50_MT_IV
validation
def proto_VC_50_MT_IV(abf=exampleABF): """combination of membrane test and IV steps.""" swhlab.memtest.memtest(abf) #do membrane test on every sweep swhlab.memtest.checkSweep(abf) #see all MT values swhlab.plot.save(abf,tag='02-check',resize=False) av1,sd1=swhlab.plot.IV(abf,1.2,1.4,True,'b') swhlab.plot.save(abf,tag='iv') Xs=abf.clampValues(1.2) #generate IV clamp values abf.saveThing([Xs,av1],'01_iv')
python
{ "resource": "" }
q263915
indexImages
validation
def indexImages(folder,fname="index.html"): """OBSOLETE WAY TO INDEX A FOLDER.""" #TODO: REMOVE html="<html><body>" for item in glob.glob(folder+"/*.*"): if item.split(".")[-1] in ['jpg','png']: html+="<h3>%s</h3>"%os.path.basename(item) html+='<img src="%s">'%os.path.basename(item) html+='<br>'*10 html+="</html></body>" f=open(folder+"/"+fname,'w') f.write(html) f.close print("indexed:") print(" ",os.path.abspath(folder+"/"+fname)) return
python
{ "resource": "" }
q263916
BaseActivatableModel.save
validation
def save(self, *args, **kwargs): """ A custom save method that handles figuring out when something is activated or deactivated. """ current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME) is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value self.__original_activatable_value = current_activable_value ret_val = super(BaseActivatableModel, self).save(*args, **kwargs) # Emit the signals for when the is_active flag is changed if is_active_changed: model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value) if self.activatable_field_updated: model_activations_updated.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value) return ret_val
python
{ "resource": "" }
q263917
BaseActivatableModel.delete
validation
def delete(self, force=False, **kwargs): """ It is impossible to delete an activatable model unless force is True. This function instead sets it to inactive. """ if force: return super(BaseActivatableModel, self).delete(**kwargs) else: setattr(self, self.ACTIVATABLE_FIELD_NAME, False) return self.save(update_fields=[self.ACTIVATABLE_FIELD_NAME])
python
{ "resource": "" }
q263918
Command.show
validation
def show(self, args, file_handle=None, **kwargs): "Write to file_handle if supplied, othewise print output" full_string = '' info = {'root_directory': '<root_directory>', 'batch_name': '<batch_name>', 'batch_tag': '<batch_tag>', 'batch_description': '<batch_description>', 'launcher': '<launcher>', 'timestamp_format': '<timestamp_format>', 'timestamp': tuple(time.localtime()), 'varying_keys': args.varying_keys, 'constant_keys': args.constant_keys, 'constant_items': args.constant_items} quoted_cmds = [ subprocess.list2cmdline( [el for el in self(self._formatter(s),'<tid>',info)]) for s in args.specs] cmd_lines = ['%d: %s\n' % (i, qcmds) for (i,qcmds) in enumerate(quoted_cmds)] full_string += ''.join(cmd_lines) if file_handle: file_handle.write(full_string) file_handle.flush() else: print(full_string)
python
{ "resource": "" }
q263919
Launcher.get_root_directory
validation
def get_root_directory(self, timestamp=None): """ A helper method that supplies the root directory name given a timestamp. """ if timestamp is None: timestamp = self.timestamp if self.timestamp_format is not None: root_name = (time.strftime(self.timestamp_format, timestamp) + '-' + self.batch_name) else: root_name = self.batch_name path = os.path.join(self.output_directory, *(self.subdir+[root_name])) return os.path.abspath(path)
python
{ "resource": "" }
q263920
Launcher._append_log
validation
def _append_log(self, specs): """ The log contains the tids and corresponding specifications used during launch with the specifications in JSON format. """ self._spec_log += specs # This should be removed log_path = os.path.join(self.root_directory, ("%s.log" % self.batch_name)) core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True)
python
{ "resource": "" }
q263921
Launcher._record_info
validation
def _record_info(self, setup_info=None): """ All launchers should call this method to write the info file at the end of the launch. The .info file is saved given setup_info supplied by _setup_launch into the root_directory. When called without setup_info, the existing info file is updated with the end-time. """ info_path = os.path.join(self.root_directory, ('%s.info' % self.batch_name)) if setup_info is None: try: with open(info_path, 'r') as info_file: setup_info = json.load(info_file) except: setup_info = {} setup_info.update({'end_time' : tuple(time.localtime())}) else: setup_info.update({ 'end_time' : None, 'metadata' : self.metadata }) with open(info_path, 'w') as info_file: json.dump(setup_info, info_file, sort_keys=True, indent=4)
python
{ "resource": "" }
q263922
Launcher._launch_process_group
validation
def _launch_process_group(self, process_commands, streams_path): """ Launches processes defined by process_commands, but only executes max_concurrency processes at a time; if a process completes and there are still outstanding processes to be executed, the next processes are run until max_concurrency is reached again. """ processes = {} def check_complete_processes(wait=False): """ Returns True if a process completed, False otherwise. Optionally allows waiting for better performance (avoids sleep-poll cycle if possible). """ result = False # list creates copy of keys, as dict is modified in loop for proc in list(processes): if wait: proc.wait() if proc.poll() is not None: # process is done, free up slot self.debug("Process %d exited with code %d." % (processes[proc]['tid'], proc.poll())) processes[proc]['stdout'].close() processes[proc]['stderr'].close() del processes[proc] result = True return result for cmd, tid in process_commands: self.debug("Starting process %d..." % tid) job_timestamp = time.strftime('%H%M%S') basename = "%s_%s_tid_%d" % (self.batch_name, job_timestamp, tid) stdout_handle = open(os.path.join(streams_path, "%s.o.%d" % (basename, tid)), "wb") stderr_handle = open(os.path.join(streams_path, "%s.e.%d" % (basename, tid)), "wb") proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) processes[proc] = { 'tid' : tid, 'stdout' : stdout_handle, 'stderr' : stderr_handle } if self.max_concurrency: # max_concurrency reached, wait until more slots available while len(processes) >= self.max_concurrency: if not check_complete_processes(len(processes)==1): time.sleep(0.1) # Wait for all processes to complete while len(processes) > 0: if not check_complete_processes(True): time.sleep(0.1)
python
{ "resource": "" }
q263923
Launcher.summary
validation
def summary(self): """ A succinct summary of the Launcher configuration. Unlike the repr, a summary does not have to be complete but must supply key information relevant to the user. """ print("Type: %s" % self.__class__.__name__) print("Batch Name: %r" % self.batch_name) if self.tag: print("Tag: %s" % self.tag) print("Root directory: %r" % self.get_root_directory()) print("Maximum concurrency: %s" % self.max_concurrency) if self.description: print("Description: %s" % self.description)
python
{ "resource": "" }
q263924
QLauncher._qsub_collate_and_launch
validation
def _qsub_collate_and_launch(self, output_dir, error_dir, job_names): """ The method that actually runs qsub to invoke the python process with the necessary commands to trigger the next collation step and next block of jobs. """ job_name = "%s_%s_collate_%d" % (self.batch_name, self.job_timestamp, self.collate_count) overrides = [("-e",error_dir), ('-N',job_name), ("-o",output_dir), ('-hold_jid',','.join(job_names))] resume_cmds =["import os, pickle, lancet", ("pickle_path = os.path.join(%r, 'qlauncher.pkl')" % self.root_directory), "launcher = pickle.load(open(pickle_path,'rb'))", "launcher.collate_and_launch()"] cmd_args = [self.command.executable, '-c', ';'.join(resume_cmds)] popen_args = self._qsub_args(overrides, cmd_args) p = subprocess.Popen(popen_args, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.debug(stdout) if p.poll() != 0: raise EnvironmentError("qsub command exit with code: %d" % p.poll()) self.collate_count += 1 self.message("Invoked qsub for next batch.") return job_name
python
{ "resource": "" }
q263925
QLauncher._qsub_block
validation
def _qsub_block(self, output_dir, error_dir, tid_specs): """ This method handles static argument specifiers and cases where the dynamic specifiers cannot be queued before the arguments are known. """ processes = [] job_names = [] for (tid, spec) in tid_specs: job_name = "%s_%s_tid_%d" % (self.batch_name, self.job_timestamp, tid) job_names.append(job_name) cmd_args = self.command( self.command._formatter(spec), tid, self._launchinfo) popen_args = self._qsub_args([("-e",error_dir), ('-N',job_name), ("-o",output_dir)], cmd_args) p = subprocess.Popen(popen_args, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.debug(stdout) if p.poll() != 0: raise EnvironmentError("qsub command exit with code: %d" % p.poll()) processes.append(p) self.message("Invoked qsub for %d commands" % len(processes)) if (self.reduction_fn is not None) or self.dynamic: self._qsub_collate_and_launch(output_dir, error_dir, job_names)
python
{ "resource": "" }
q263926
ScriptLauncher._launch_process_group
validation
def _launch_process_group(self, process_commands, streams_path): """ Aggregates all process_commands and the designated output files into a list, and outputs it as JSON, after which the wrapper script is called. """ processes = [] for cmd, tid in process_commands: job_timestamp = time.strftime('%H%M%S') basename = "%s_%s_tid_%d" % (self.batch_name, job_timestamp, tid) stdout_path = os.path.join(streams_path, "%s.o.%d" % (basename, tid)) stderr_path = os.path.join(streams_path, "%s.e.%d" % (basename, tid)) process = { 'tid' : tid, 'cmd' : cmd, 'stdout' : stdout_path, 'stderr' : stderr_path } processes.append(process) # To make the JSON filename unique per group, we use the last tid in # this group. json_path = os.path.join(self.root_directory, self.json_name % (tid)) with open(json_path, 'w') as json_file: json.dump(processes, json_file, sort_keys=True, indent=4) p = subprocess.Popen([self.script_path, json_path, self.batch_name, str(len(processes)), str(self.max_concurrency)]) if p.wait() != 0: raise EnvironmentError("Script command exit with code: %d" % p.poll())
python
{ "resource": "" }
q263927
review_and_launch.cross_check_launchers
validation
def cross_check_launchers(self, launchers): """ Performs consistency checks across all the launchers. """ if len(launchers) == 0: raise Exception('Empty launcher list') timestamps = [launcher.timestamp for launcher in launchers] if not all(timestamps[0] == tstamp for tstamp in timestamps): raise Exception("Launcher timestamps not all equal. " "Consider setting timestamp explicitly.") root_directories = [] for launcher in launchers: command = launcher.command args = launcher.args command.verify(args) root_directory = launcher.get_root_directory() if os.path.isdir(root_directory): raise Exception("Root directory already exists: %r" % root_directory) if root_directory in root_directories: raise Exception("Each launcher requires a unique root directory") root_directories.append(root_directory)
python
{ "resource": "" }
q263928
review_and_launch._launch_all
validation
def _launch_all(self, launchers): """ Launches all available launchers. """ for launcher in launchers: print("== Launching %s ==" % launcher.batch_name) launcher() return True
python
{ "resource": "" }
q263929
review_and_launch._review_all
validation
def _review_all(self, launchers): """ Runs the review process for all the launchers. """ # Run review of launch args if necessary if self.launch_args is not None: proceed = self.review_args(self.launch_args, show_repr=True, heading='Meta Arguments') if not proceed: return False reviewers = [self.review_args, self.review_command, self.review_launcher] for (count, launcher) in enumerate(launchers): # Run reviews for all launchers if desired... if not all(reviewer(launcher) for reviewer in reviewers): print("\n == Aborting launch ==") return False # But allow the user to skip these extra reviews if len(launchers)!= 1 and count < len(launchers)-1: skip_remaining = self.input_options(['Y', 'n','quit'], '\nSkip remaining reviews?', default='y') if skip_remaining == 'y': break elif skip_remaining == 'quit': return False if self.input_options(['y','N'], 'Execute?', default='n') != 'y': return False else: return self._launch_all(launchers)
python
{ "resource": "" }
q263930
review_and_launch.input_options
validation
def input_options(self, options, prompt='Select option', default=None): """ Helper to prompt the user for input on the commandline. """ check_options = [x.lower() for x in options] while True: response = input('%s [%s]: ' % (prompt, ', '.join(options))).lower() if response in check_options: return response.strip() elif response == '' and default is not None: return default.lower().strip()
python
{ "resource": "" }
q263931
FileType.save
validation
def save(self, filename, metadata={}, **data): """ The implementation in the base class simply checks there is no clash between the metadata and data keys. """ intersection = set(metadata.keys()) & set(data.keys()) if intersection: msg = 'Key(s) overlap between data and metadata: %s' raise Exception(msg % ','.join(intersection))
python
{ "resource": "" }
q263932
FileType._savepath
validation
def _savepath(self, filename): """ Returns the full path for saving the file, adding an extension and making the filename unique as necessary. """ (basename, ext) = os.path.splitext(filename) basename = basename if (ext in self.extensions) else filename ext = ext if (ext in self.extensions) else self.extensions[0] savepath = os.path.abspath(os.path.join(self.directory, '%s%s' % (basename, ext))) return (tempfile.mkstemp(ext, basename + "_", self.directory)[1] if self.hash_suffix else savepath)
python
{ "resource": "" }
q263933
FileType.file_supported
validation
def file_supported(cls, filename): """ Returns a boolean indicating whether the filename has an appropriate extension for this class. """ if not isinstance(filename, str): return False (_, ext) = os.path.splitext(filename) if ext not in cls.extensions: return False else: return True
python
{ "resource": "" }
q263934
ImageFile.save
validation
def save(self, filename, imdata, **data): """ Data may be either a PIL Image object or a Numpy array. """ if isinstance(imdata, numpy.ndarray): imdata = Image.fromarray(numpy.uint8(imdata)) elif isinstance(imdata, Image.Image): imdata.save(self._savepath(filename))
python
{ "resource": "" }
q263935
fileModifiedTimestamp
validation
def fileModifiedTimestamp(fname): """return "YYYY-MM-DD" when the file was modified.""" modifiedTime=os.path.getmtime(fname) stamp=time.strftime('%Y-%m-%d', time.localtime(modifiedTime)) return stamp
python
{ "resource": "" }
q263936
loadResults
validation
def loadResults(resultsFile): """returns a dict of active folders with days as keys.""" with open(resultsFile) as f: raw=f.read().split("\n") foldersByDay={} for line in raw: folder=line.split('"')[1]+"\\" line=[]+line.split('"')[2].split(", ") for day in line[1:]: if not day in foldersByDay: foldersByDay[day]=[] foldersByDay[day]=foldersByDay[day]+[folder] nActiveDays=len(foldersByDay) dayFirst=sorted(foldersByDay.keys())[0] dayLast=sorted(foldersByDay.keys())[-1] dayFirst=datetime.datetime.strptime(dayFirst, "%Y-%m-%d" ) dayLast=datetime.datetime.strptime(dayLast, "%Y-%m-%d" ) nDays = (dayLast - dayFirst).days + 1 emptyDays=0 for deltaDays in range(nDays): day=dayFirst+datetime.timedelta(days=deltaDays) stamp=datetime.datetime.strftime(day, "%Y-%m-%d" ) if not stamp in foldersByDay: foldersByDay[stamp]=[] emptyDays+=1 percActive=nActiveDays/nDays*100 print("%d of %d days were active (%.02f%%)"%(nActiveDays,nDays,percActive)) return foldersByDay
python
{ "resource": "" }
q263937
ndist
validation
def ndist(data,Xs): """ given some data and a list of X posistions, return the normal distribution curve as a Y point at each of those Xs. """ sigma=np.sqrt(np.var(data)) center=np.average(data) curve=mlab.normpdf(Xs,center,sigma) curve*=len(data)*HIST_RESOLUTION return curve
python
{ "resource": "" }
q263938
ABF.abfinfo
validation
def abfinfo(self,printToo=False,returnDict=False): """show basic info about ABF class variables.""" info="\n### ABF INFO ###\n" d={} for thingName in sorted(dir(self)): if thingName in ['cm','evIs','colormap','dataX','dataY', 'protoX','protoY']: continue if "_" in thingName: continue thing=getattr(self,thingName) if type(thing) is list and len(thing)>5: continue thingType=str(type(thing)).split("'")[1] if "method" in thingType or "neo." in thingType: continue if thingName in ["header","MT"]: continue info+="%s <%s> %s\n"%(thingName,thingType,thing) d[thingName]=thing if printToo: print() for line in info.split("\n"): if len(line)<3: continue print(" ",line) print() if returnDict: return d return info
python
{ "resource": "" }
q263939
ABF.headerHTML
validation
def headerHTML(self,fname=None): """read the ABF header and save it HTML formatted.""" if fname is None: fname = self.fname.replace(".abf","_header.html") html="<html><body><code>" html+="<h2>abfinfo() for %s.abf</h2>"%self.ID html+=self.abfinfo().replace("<","&lt;").replace(">","&gt;").replace("\n","<br>") html+="<h2>Header for %s.abf</h2>"%self.ID html+=pprint.pformat(self.header, indent=1) html=html.replace("\n",'<br>').replace(" ","&nbsp;") html=html.replace(r"\x00","") html+="</code></body></html>" print("WRITING HEADER TO:") print(fname) f=open(fname,'w') f.write(html) f.close()
python
{ "resource": "" }
q263940
ABF.generate_colormap
validation
def generate_colormap(self,colormap=None,reverse=False): """use 1 colormap for the whole abf. You can change it!.""" if colormap is None: colormap = pylab.cm.Dark2 self.cm=colormap self.colormap=[] for i in range(self.sweeps): #TODO: make this the only colormap self.colormap.append(colormap(i/self.sweeps)) if reverse: self.colormap.reverse()
python
{ "resource": "" }
q263941
ABF.get_data_around
validation
def get_data_around(self,timePoints,thisSweep=False,padding=0.02,msDeriv=0): """ return self.dataY around a time point. All units are seconds. if thisSweep==False, the time point is considered to be experiment time and an appropriate sweep may be selected. i.e., with 10 second sweeps and timePint=35, will select the 5s mark of the third sweep """ if not np.array(timePoints).shape: timePoints=[float(timePoints)] data=None for timePoint in timePoints: if thisSweep: sweep=self.currentSweep else: sweep=int(timePoint/self.sweepInterval) timePoint=timePoint-sweep*self.sweepInterval self.setSweep(sweep) if msDeriv: dx=int(msDeriv*self.rate/1000) #points per ms newData=(self.dataY[dx:]-self.dataY[:-dx])*self.rate/1000/dx else: newData=self.dataY padPoints=int(padding*self.rate) pad=np.empty(padPoints)*np.nan Ic=timePoint*self.rate #center point (I) newData=np.concatenate((pad,pad,newData,pad,pad)) Ic+=padPoints*2 newData=newData[Ic-padPoints:Ic+padPoints] newData=newData[:int(padPoints*2)] #TODO: omg so much trouble with this! if data is None: data=[newData] else: data=np.vstack((data,newData))#TODO: omg so much trouble with this! return data
python
{ "resource": "" }
q263942
ABF.filter_gaussian
validation
def filter_gaussian(self,sigmaMs=100,applyFiltered=False,applyBaseline=False): """RETURNS filtered trace. Desn't filter it in place.""" if sigmaMs==0: return self.dataY filtered=cm.filter_gaussian(self.dataY,sigmaMs) if applyBaseline: self.dataY=self.dataY-filtered elif applyFiltered: self.dataY=filtered else: return filtered
python
{ "resource": "" }
q263943
validate_activatable_models
validation
def validate_activatable_models(): """ Raises a ValidationError for any ActivatableModel that has ForeignKeys or OneToOneFields that will cause cascading deletions to occur. This function also raises a ValidationError if the activatable model has not defined a Boolean field with the field name defined by the ACTIVATABLE_FIELD_NAME variable on the model. """ for model in get_activatable_models(): # Verify the activatable model has an activatable boolean field activatable_field = next(( f for f in model._meta.fields if f.__class__ == models.BooleanField and f.name == model.ACTIVATABLE_FIELD_NAME ), None) if activatable_field is None: raise ValidationError(( 'Model {0} is an activatable model. It must define an activatable BooleanField that ' 'has a field name of model.ACTIVATABLE_FIELD_NAME (which defaults to is_active)'.format(model) )) # Ensure all foreign keys and onetoone fields will not result in cascade deletions if not cascade deletable if not model.ALLOW_CASCADE_DELETE: for field in model._meta.fields: if field.__class__ in (models.ForeignKey, models.OneToOneField): if field.remote_field.on_delete == models.CASCADE: raise ValidationError(( 'Model {0} is an activatable model. All ForeignKey and OneToOneFields ' 'must set on_delete methods to something other than CASCADE (the default). ' 'If you want to explicitely allow cascade deletes, then you must set the ' 'ALLOW_CASCADE_DELETE=True class variable on your model.' ).format(model))
python
{ "resource": "" }
q263944
to_table
validation
def to_table(args, vdims=[]): "Helper function to convet an Args object to a HoloViews Table" if not Table: return "HoloViews Table not available" kdims = [dim for dim in args.constant_keys + args.varying_keys if dim not in vdims] items = [tuple([spec[k] for k in kdims+vdims]) for spec in args.specs] return Table(items, kdims=kdims, vdims=vdims)
python
{ "resource": "" }
q263945
PrettyPrinted.pprint_args
validation
def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}): """ Method to define the positional arguments and keyword order for pretty printing. """ if infix_operator and not (len(pos_args)==2 and keyword_args==[]): raise Exception('Infix format requires exactly two' ' positional arguments and no keywords') (kwargs,_,_,_) = self._pprint_args self._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params)
python
{ "resource": "" }
q263946
Arguments.spec_formatter
validation
def spec_formatter(cls, spec): " Formats the elements of an argument set appropriately" return type(spec)((k, str(v)) for (k,v) in spec.items())
python
{ "resource": "" }
q263947
Arguments._collect_by_key
validation
def _collect_by_key(self,specs): """ Returns a dictionary like object with the lists of values collapsed by their respective key. Useful to find varying vs constant keys and to find how fast keys vary. """ # Collect (key, value) tuples as list of lists, flatten with chain allkeys = itertools.chain.from_iterable( [[(k, run[k]) for k in run] for run in specs]) collection = defaultdict(list) for (k,v) in allkeys: collection[k].append(v) return collection
python
{ "resource": "" }
q263948
Arguments.summary
validation
def summary(self): """ A succinct summary of the argument specifier. Unlike the repr, a summary does not have to be complete but must supply the most relevant information about the object to the user. """ print("Items: %s" % len(self)) varying_keys = ', '.join('%r' % k for k in self.varying_keys) print("Varying Keys: %s" % varying_keys) items = ', '.join(['%s=%r' % (k,v) for (k,v) in self.constant_items]) if self.constant_items: print("Constant Items: %s" % items)
python
{ "resource": "" }
q263949
Args._build_specs
validation
def _build_specs(self, specs, kwargs, fp_precision): """ Returns the specs, the remaining kwargs and whether or not the constructor was called with kwarg or explicit specs. """ if specs is None: overrides = param.ParamOverrides(self, kwargs, allow_extra_keywords=True) extra_kwargs = overrides.extra_keywords() kwargs = dict([(k,v) for (k,v) in kwargs.items() if k not in extra_kwargs]) rounded_specs = list(self.round_floats([extra_kwargs], fp_precision)) if extra_kwargs=={}: return [], kwargs, True else: return rounded_specs, kwargs, False return list(self.round_floats(specs, fp_precision)), kwargs, True
python
{ "resource": "" }
q263950
Args.show
validation
def show(self, exclude=[]): """ Convenience method to inspect the available argument values in human-readable format. The ordering of keys is determined by how quickly they vary. The exclude list allows specific keys to be excluded for readability (e.g. to hide long, absolute filenames). """ ordering = self.constant_keys + self.varying_keys spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering if (k in s) and (k not in exclude)]) for s in self.specs] print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)]))
python
{ "resource": "" }
q263951
Args.lexsort
validation
def lexsort(self, *order): """ The lexical sort order is specified by a list of string arguments. Each string is a key name prefixed by '+' or '-' for ascending and descending sort respectively. If the key is not found in the operand's set of varying keys, it is ignored. """ if order == []: raise Exception("Please specify the keys for sorting, use" "'+' prefix for ascending," "'-' for descending.)") if not set(el[1:] for el in order).issubset(set(self.varying_keys)): raise Exception("Key(s) specified not in the set of varying keys.") sorted_args = copy.deepcopy(self) specs_param = sorted_args.params('specs') specs_param.constant = False sorted_args.specs = self._lexsorted_specs(order) specs_param.constant = True sorted_args._lexorder = order return sorted_args
python
{ "resource": "" }
q263952
Range.linspace
validation
def linspace(self, start, stop, n): """ Simple replacement for numpy linspace""" if n == 1: return [start] L = [0.0] * n nm1 = n - 1 nm1inv = 1.0 / nm1 for i in range(n): L[i] = nm1inv * (start*(nm1 - i) + stop*i) return L
python
{ "resource": "" }
q263953
Log.extract_log
validation
def extract_log(log_path, dict_type=dict): """ Parses the log file generated by a launcher and returns dictionary with tid keys and specification values. Ordering can be maintained by setting dict_type to the appropriate constructor (i.e. OrderedDict). Keys are converted from unicode to strings for kwarg use. """ log_path = (log_path if os.path.isfile(log_path) else os.path.join(os.getcwd(), log_path)) with open(log_path,'r') as log: splits = (line.split() for line in log) uzipped = ((int(split[0]), json.loads(" ".join(split[1:]))) for split in splits) szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped] return dict_type(szipped)
python
{ "resource": "" }
q263954
Log.write_log
validation
def write_log(log_path, data, allow_append=True): """ Writes the supplied specifications to the log path. The data may be supplied as either as a an Args or as a list of dictionaries. By default, specifications will be appropriately appended to an existing log file. This can be disabled by setting allow_append to False. """ append = os.path.isfile(log_path) islist = isinstance(data, list) if append and not allow_append: raise Exception('Appending has been disabled' ' and file %s exists' % log_path) if not (islist or isinstance(data, Args)): raise Exception('Can only write Args objects or dictionary' ' lists to log file.') specs = data if islist else data.specs if not all(isinstance(el,dict) for el in specs): raise Exception('List elements must be dictionaries.') log_file = open(log_path, 'r+') if append else open(log_path, 'w') start = int(log_file.readlines()[-1].split()[0])+1 if append else 0 ascending_indices = range(start, start+len(data)) log_str = '\n'.join(['%d %s' % (tid, json.dumps(el)) for (tid, el) in zip(ascending_indices,specs)]) log_file.write("\n"+log_str if append else log_str) log_file.close()
python
{ "resource": "" }
q263955
FilePattern.directory
validation
def directory(cls, directory, root=None, extension=None, **kwargs): """ Load all the files in a given directory selecting only files with the given extension if specified. The given kwargs are passed through to the normal constructor. """ root = os.getcwd() if root is None else root suffix = '' if extension is None else '.' + extension.rsplit('.')[-1] pattern = directory + os.sep + '*' + suffix key = os.path.join(root, directory,'*').rsplit(os.sep)[-2] format_parse = list(string.Formatter().parse(key)) if not all([el is None for el in zip(*format_parse)[1]]): raise Exception('Directory cannot contain format field specifications') return cls(key, pattern, root, **kwargs)
python
{ "resource": "" }
q263956
FilePattern.fields
validation
def fields(self): """ Return the fields specified in the pattern using Python's formatting mini-language. """ parse = list(string.Formatter().parse(self.pattern)) return [f for f in zip(*parse)[1] if f is not None]
python
{ "resource": "" }
q263957
FilePattern._load_expansion
validation
def _load_expansion(self, key, root, pattern): """ Loads the files that match the given pattern. """ path_pattern = os.path.join(root, pattern) expanded_paths = self._expand_pattern(path_pattern) specs=[] for (path, tags) in expanded_paths: filelist = [os.path.join(path,f) for f in os.listdir(path)] if os.path.isdir(path) else [path] for filepath in filelist: specs.append(dict(tags,**{key:os.path.abspath(filepath)})) return sorted(specs, key=lambda s: s[key])
python
{ "resource": "" }
q263958
FilePattern._expand_pattern
validation
def _expand_pattern(self, pattern): """ From the pattern decomposition, finds the absolute paths matching the pattern. """ (globpattern, regexp, fields, types) = self._decompose_pattern(pattern) filelist = glob.glob(globpattern) expansion = [] for fname in filelist: if fields == []: expansion.append((fname, {})) continue match = re.match(regexp, fname) if match is None: continue match_items = match.groupdict().items() tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items) expansion.append((fname, tags)) return expansion
python
{ "resource": "" }
q263959
FileInfo.from_pattern
validation
def from_pattern(cls, pattern, filetype=None, key='filename', root=None, ignore=[]): """ Convenience method to directly chain a pattern processed by FilePattern into a FileInfo instance. Note that if a default filetype has been set on FileInfo, the filetype argument may be omitted. """ filepattern = FilePattern(key, pattern, root=root) if FileInfo.filetype and filetype is None: filetype = FileInfo.filetype elif filetype is None: raise Exception("The filetype argument must be supplied unless " "an appropriate default has been specified as " "FileInfo.filetype") return FileInfo(filepattern, key, filetype, ignore=ignore)
python
{ "resource": "" }
q263960
FileInfo.load_table
validation
def load_table(self, table): """ Load the file contents into the supplied Table using the specified key and filetype. The input table should have the filenames as values which will be replaced by the loaded data. If data_key is specified, this key will be used to index the loaded data to retrive the specified item. """ items, data_keys = [], None for key, filename in table.items(): data_dict = self.filetype.data(filename[0]) current_keys = tuple(sorted(data_dict.keys())) values = [data_dict[k] for k in current_keys] if data_keys is None: data_keys = current_keys elif data_keys != current_keys: raise Exception("Data keys are inconsistent") items.append((key, values)) return Table(items, kdims=table.kdims, vdims=data_keys)
python
{ "resource": "" }
q263961
FileInfo.load_dframe
validation
def load_dframe(self, dframe): """ Load the file contents into the supplied dataframe using the specified key and filetype. """ filename_series = dframe[self.key] loaded_data = filename_series.map(self.filetype.data) keys = [list(el.keys()) for el in loaded_data.values] for key in set().union(*keys): key_exists = key in dframe.columns if key_exists: self.warning("Appending '_data' suffix to data key %r to avoid" "overwriting existing metadata with the same name." % key) suffix = '_data' if key_exists else '' dframe[key+suffix] = loaded_data.map(lambda x: x.get(key, np.nan)) return dframe
python
{ "resource": "" }
q263962
FileInfo._info
validation
def _info(self, source, key, filetype, ignore): """ Generates the union of the source.specs and the metadata dictionary loaded by the filetype object. """ specs, mdata = [], {} mdata_clashes = set() for spec in source.specs: if key not in spec: raise Exception("Key %r not available in 'source'." % key) mdata = dict((k,v) for (k,v) in filetype.metadata(spec[key]).items() if k not in ignore) mdata_spec = {} mdata_spec.update(spec) mdata_spec.update(mdata) specs.append(mdata_spec) mdata_clashes = mdata_clashes | (set(spec.keys()) & set(mdata.keys())) # Metadata clashes can be avoided by using the ignore list. if mdata_clashes: self.warning("Loaded metadata keys overriding source keys.") return specs
python
{ "resource": "" }
q263963
EventIterator._push
validation
async def _push(self, *args, **kwargs): """Push new data into the buffer. Resume looping if paused.""" self._data.append((args, kwargs)) if self._future is not None: future, self._future = self._future, None future.set_result(True)
python
{ "resource": "" }
q263964
figureStimulus
validation
def figureStimulus(abf,sweeps=[0]): """ Create a plot of one area of interest of a single sweep. """ stimuli=[2.31250, 2.35270] for sweep in sweeps: abf.setsweep(sweep) for stimulus in stimuli: S1=int(abf.pointsPerSec*stimulus) S2=int(abf.pointsPerSec*(stimulus+0.001)) # 1ms of blanking abf.sweepY[S1:S2]=np.nan # blank out the stimulus area I1=int(abf.pointsPerSec*2.2) # time point (sec) to start I2=int(abf.pointsPerSec*2.6) # time point (sec) to end baseline=np.average(abf.sweepY[int(abf.pointsPerSec*2.0):int(abf.pointsPerSec*2.2)]) Ys=lowPassFilter(abf.sweepY[I1:I2])-baseline Xs=abf.sweepX2[I1:I1+len(Ys)].flatten() plt.plot(Xs,Ys,alpha=.5,lw=2) return
python
{ "resource": "" }
q263965
doStuff
validation
def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True, launch=True): """Inelegant for now, but lets you manually analyze every ABF in a folder.""" IN=INDEX(ABFfolder) if analyze: IN.analyzeAll() if convert: IN.convertImages()
python
{ "resource": "" }
q263966
analyzeSingle
validation
def analyzeSingle(abfFname): """Reanalyze data for a single ABF. Also remakes child and parent html.""" assert os.path.exists(abfFname) and abfFname.endswith(".abf") ABFfolder,ABFfname=os.path.split(abfFname) abfID=os.path.splitext(ABFfname)[0] IN=INDEX(ABFfolder) IN.analyzeABF(abfID) IN.scan() IN.html_single_basic([abfID],overwrite=True) IN.html_single_plot([abfID],overwrite=True) IN.scan() IN.html_index() return
python
{ "resource": "" }
q263967
INDEX.scan
validation
def scan(self): """ scan folder1 and folder2 into files1 and files2. since we are on windows, simplify things by making them all lowercase. this WILL cause problems on 'nix operating systems.If this is the case, just run a script to rename every file to all lowercase. """ t1=cm.timeit() self.files1=cm.list_to_lowercase(sorted(os.listdir(self.folder1))) self.files2=cm.list_to_lowercase(sorted(os.listdir(self.folder2))) self.files1abf=[x for x in self.files1 if x.endswith(".abf")] self.files1abf=cm.list_to_lowercase(cm.abfSort(self.files1abf)) self.IDs=[x[:-4] for x in self.files1abf] self.log.debug("folder1 has %d files",len(self.files1)) self.log.debug("folder1 has %d abfs",len(self.files1abf)) self.log.debug("folder2 has %d files",len(self.files2)) self.log.debug("scanning folders took %s",cm.timeit(t1))
python
{ "resource": "" }
q263968
INDEX.convertImages
validation
def convertImages(self): """ run this to turn all folder1 TIFs and JPGs into folder2 data. TIFs will be treated as micrographs and converted to JPG with enhanced contrast. JPGs will simply be copied over. """ # copy over JPGs (and such) exts=['.jpg','.png'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_jpg_"+fname if not fname2 in self.files2: self.log.info("copying over [%s]"%fname2) shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname) # convert TIFs (and such) to JPGs exts=['.tif','.tiff'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_tif_"+fname+".jpg" if not fname2 in self.files2: self.log.info("converting micrograph [%s]"%fname2) imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname)
python
{ "resource": "" }
q263969
INDEX.analyzeAll
validation
def analyzeAll(self): """analyze every unanalyzed ABF in the folder.""" searchableData=str(self.files2) self.log.debug("considering analysis for %d ABFs",len(self.IDs)) for ID in self.IDs: if not ID+"_" in searchableData: self.log.debug("%s needs analysis",ID) try: self.analyzeABF(ID) except: print("EXCEPTION! "*100) else: self.log.debug("%s has existing analysis, not overwriting",ID) self.log.debug("verified analysis of %d ABFs",len(self.IDs))
python
{ "resource": "" }
q263970
INDEX.htmlFor
validation
def htmlFor(self,fname): """return appropriate HTML determined by file extension.""" if os.path.splitext(fname)[1].lower() in ['.jpg','.png']: html='<a href="%s"><img src="%s"></a>'%(fname,fname) if "_tif_" in fname: html=html.replace('<img ','<img class="datapic micrograph"') if "_plot_" in fname: html=html.replace('<img ','<img class="datapic intrinsic" ') if "_experiment_" in fname: html=html.replace('<img ','<img class="datapic experiment" ') elif os.path.splitext(fname)[1].lower() in ['.html','.htm']: html='LINK: %s'%fname else: html='<br>Not sure how to show: [%s]</br>'%fname return html
python
{ "resource": "" }
q263971
INDEX.html_single_basic
validation
def html_single_basic(self,abfID,launch=False,overwrite=False): """ generate a generic flat file html for an ABF parent. You could give this a single ABF ID, its parent ID, or a list of ABF IDs. If a child ABF is given, the parent will automatically be used. """ if type(abfID) is str: abfID=[abfID] for thisABFid in cm.abfSort(abfID): parentID=cm.parent(self.groups,thisABFid) saveAs=os.path.abspath("%s/%s_basic.html"%(self.folder2,parentID)) if overwrite is False and os.path.basename(saveAs) in self.files2: continue filesByType=cm.filesByType(self.groupFiles[parentID]) html="" html+='<div style="background-color: #DDDDDD;">' html+='<span class="title">summary of data from: %s</span></br>'%parentID html+='<code>%s</code>'%os.path.abspath(self.folder1+"/"+parentID+".abf") html+='</div>' catOrder=["experiment","plot","tif","other"] categories=cm.list_order_by(filesByType.keys(),catOrder) for category in [x for x in categories if len(filesByType[x])]: if category=='experiment': html+="<h3>Experimental Data:</h3>" elif category=='plot': html+="<h3>Intrinsic Properties:</h3>" elif category=='tif': html+="<h3>Micrographs:</h3>" elif category=='other': html+="<h3>Additional Files:</h3>" else: html+="<h3>????:</h3>" #html+="<hr>" #html+='<br>'*3 for fname in filesByType[category]: html+=self.htmlFor(fname) html+='<br>'*3 print("creating",saveAs,'...') style.save(html,saveAs,launch=launch)
python
{ "resource": "" }
q263972
INDEX.html_single_plot
validation
def html_single_plot(self,abfID,launch=False,overwrite=False): """create ID_plot.html of just intrinsic properties.""" if type(abfID) is str: abfID=[abfID] for thisABFid in cm.abfSort(abfID): parentID=cm.parent(self.groups,thisABFid) saveAs=os.path.abspath("%s/%s_plot.html"%(self.folder2,parentID)) if overwrite is False and os.path.basename(saveAs) in self.files2: continue filesByType=cm.filesByType(self.groupFiles[parentID]) html="" html+='<div style="background-color: #DDDDFF;">' html+='<span class="title">intrinsic properties for: %s</span></br>'%parentID html+='<code>%s</code>'%os.path.abspath(self.folder1+"/"+parentID+".abf") html+='</div>' for fname in filesByType['plot']: html+=self.htmlFor(fname) print("creating",saveAs,'...') style.save(html,saveAs,launch=launch)
python
{ "resource": "" }
q263973
convolve
validation
def convolve(signal,kernel): """ This applies a kernel to a signal through convolution and returns the result. Some magic is done at the edges so the result doesn't apprach zero: 1. extend the signal's edges with len(kernel)/2 duplicated values 2. perform the convolution ('same' mode) 3. slice-off the ends we added 4. return the same number of points as the original """ pad=np.ones(len(kernel)/2) signal=np.concatenate((pad*signal[0],signal,pad*signal[-1])) signal=np.convolve(signal,kernel,mode='same') signal=signal[len(pad):-len(pad)] return signal
python
{ "resource": "" }
q263974
timeit
validation
def timeit(timer=None): """simple timer. returns a time object, or a string.""" if timer is None: return time.time() else: took=time.time()-timer if took<1: return "%.02f ms"%(took*1000.0) elif took<60: return "%.02f s"%(took) else: return "%.02f min"%(took/60.0)
python
{ "resource": "" }
q263975
list_move_to_front
validation
def list_move_to_front(l,value='other'): """if the value is in the list, move it to the front and return it.""" l=list(l) if value in l: l.remove(value) l.insert(0,value) return l
python
{ "resource": "" }
q263976
list_move_to_back
validation
def list_move_to_back(l,value='other'): """if the value is in the list, move it to the back and return it.""" l=list(l) if value in l: l.remove(value) l.append(value) return l
python
{ "resource": "" }
q263977
list_order_by
validation
def list_order_by(l,firstItems): """given a list and a list of items to be first, return the list in the same order except that it begins with each of the first items.""" l=list(l) for item in firstItems[::-1]: #backwards if item in l: l.remove(item) l.insert(0,item) return l
python
{ "resource": "" }
q263978
abfSort
validation
def abfSort(IDs): """ given a list of goofy ABF names, return it sorted intelligently. This places things like 16o01001 after 16901001. """ IDs=list(IDs) monO=[] monN=[] monD=[] good=[] for ID in IDs: if ID is None: continue if 'o' in ID: monO.append(ID) elif 'n' in ID: monN.append(ID) elif 'd' in ID: monD.append(ID) else: good.append(ID) return sorted(good)+sorted(monO)+sorted(monN)+sorted(monD)
python
{ "resource": "" }
q263979
abfGroupFiles
validation
def abfGroupFiles(groups,folder): """ when given a dictionary where every key contains a list of IDs, replace the keys with the list of files matching those IDs. This is how you get a list of files belonging to each child for each parent. """ assert os.path.exists(folder) files=os.listdir(folder) group2={} for parent in groups.keys(): if not parent in group2.keys(): group2[parent]=[] for ID in groups[parent]: for fname in [x.lower() for x in files if ID in x.lower()]: group2[parent].extend([fname]) return group2
python
{ "resource": "" }
q263980
parent
validation
def parent(groups,ID): """given a groups dictionary and an ID, return its actual parent ID.""" if ID in groups.keys(): return ID # already a parent if not ID in groups.keys(): for actualParent in groups.keys(): if ID in groups[actualParent]: return actualParent # found the actual parent return None
python
{ "resource": "" }
q263981
userFolder
validation
def userFolder(): """return the semi-temporary user folder""" #path=os.path.abspath(tempfile.gettempdir()+"/swhlab/") #don't use tempdir! it will get deleted easily. path=os.path.expanduser("~")+"/.swhlab/" # works on windows or linux # for me, path=r"C:\Users\swharden\.swhlab" if not os.path.exists(path): print("creating",path) os.mkdir(path) return os.path.abspath(path)
python
{ "resource": "" }
q263982
_try_catch_coro
validation
async def _try_catch_coro(emitter, event, listener, coro): """Coroutine wrapper to catch errors after async scheduling. Args: emitter (EventEmitter): The event emitter that is attempting to call a listener. event (str): The event that triggered the emitter. listener (async def): The async def that was used to generate the coro. coro (coroutine): The coroutine that should be tried. If an exception is caught the function will use the emitter to emit the failure event. If, however, the current event _is_ the failure event then the method reraises. The reraised exception may show in debug mode for the event loop but is otherwise silently dropped. """ try: await coro except Exception as exc: if event == emitter.LISTENER_ERROR_EVENT: raise emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)
python
{ "resource": "" }
q263983
EventEmitter._check_limit
validation
def _check_limit(self, event): """Check if the listener limit is hit and warn if needed.""" if self.count(event) > self.max_listeners: warnings.warn( 'Too many listeners for event {}'.format(event), ResourceWarning, )
python
{ "resource": "" }
q263984
EventEmitter.add_listener
validation
def add_listener(self, event, listener): """Bind a listener to a particular event. Args: event (str): The name of the event to listen for. This may be any string value. listener (def or async def): The callback to execute when the event fires. This may be a sync or async function. """ self.emit('new_listener', event, listener) self._listeners[event].append(listener) self._check_limit(event) return self
python
{ "resource": "" }
q263985
EventEmitter.once
validation
def once(self, event, listener): """Add a listener that is only called once.""" self.emit('new_listener', event, listener) self._once[event].append(listener) self._check_limit(event) return self
python
{ "resource": "" }
q263986
EventEmitter.remove_listener
validation
def remove_listener(self, event, listener): """Remove a listener from the emitter. Args: event (str): The event name on which the listener is bound. listener: A reference to the same object given to add_listener. Returns: bool: True if a listener was removed else False. This method only removes one listener at a time. If a listener is attached multiple times then this method must be called repeatedly. Additionally, this method removes listeners first from the those registered with 'on' or 'add_listener'. If none are found it continue to remove afterwards from those added with 'once'. """ with contextlib.suppress(ValueError): self._listeners[event].remove(listener) return True with contextlib.suppress(ValueError): self._once[event].remove(listener) return True return False
python
{ "resource": "" }
q263987
EventEmitter._dispatch_coroutine
validation
def _dispatch_coroutine(self, event, listener, *args, **kwargs): """Schedule a coroutine for execution. Args: event (str): The name of the event that triggered this call. listener (async def): The async def that needs to be executed. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. The values of *args and **kwargs are passed, unaltered, to the async def when generating the coro. If there is an exception generating the coro, such as the wrong number of arguments, the emitter's error event is triggered. If the triggering event _is_ the emitter's error event then the exception is reraised. The reraised exception may show in debug mode for the event loop but is otherwise silently dropped. """ try: coro = listener(*args, **kwargs) except Exception as exc: if event == self.LISTENER_ERROR_EVENT: raise return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc) asyncio.ensure_future( _try_catch_coro(self, event, listener, coro), loop=self._loop, )
python
{ "resource": "" }
q263988
EventEmitter._dispatch_function
validation
def _dispatch_function(self, event, listener, *args, **kwargs): """Execute a sync function. Args: event (str): The name of the event that triggered this call. listener (def): The def that needs to be executed. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. The values of *args and **kwargs are passed, unaltered, to the def when exceuting. If there is an exception executing the def, such as the wrong number of arguments, the emitter's error event is triggered. If the triggering event _is_ the emitter's error event then the exception is reraised. The reraised exception may show in debug mode for the event loop but is otherwise silently dropped. """ try: return listener(*args, **kwargs) except Exception as exc: if event == self.LISTENER_ERROR_EVENT: raise return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)
python
{ "resource": "" }
q263989
EventEmitter._dispatch
validation
def _dispatch(self, event, listener, *args, **kwargs): """Dispatch an event to a listener. Args: event (str): The name of the event that triggered this call. listener (def or async def): The listener to trigger. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. This method inspects the listener. If it is a def it dispatches the listener to a method that will execute that def. If it is an async def it dispatches it to a method that will schedule the resulting coro with the event loop. """ if ( asyncio.iscoroutinefunction(listener) or isinstance(listener, functools.partial) and asyncio.iscoroutinefunction(listener.func) ): return self._dispatch_coroutine(event, listener, *args, **kwargs) return self._dispatch_function(event, listener, *args, **kwargs)
python
{ "resource": "" }
q263990
EventEmitter.emit
validation
def emit(self, event, *args, **kwargs): """Call each listener for the event with the given arguments. Args: event (str): The event to trigger listeners on. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. This method passes all arguments other than the event name directly to the listeners. If a listener raises an exception for any reason the 'listener-error', or current value of LISTENER_ERROR_EVENT, is emitted. Listeners to this event are given the event name, listener object, and the exception raised. If an error listener fails it does so silently. All event listeners are fired in a deferred way so this method returns immediately. The calling coro must yield at some point for the event to propagate to the listeners. """ listeners = self._listeners[event] listeners = itertools.chain(listeners, self._once[event]) self._once[event] = [] for listener in listeners: self._loop.call_soon( functools.partial( self._dispatch, event, listener, *args, **kwargs, ) ) return self
python
{ "resource": "" }
q263991
EventEmitter.count
validation
def count(self, event): """Get the number of listeners for the event. Args: event (str): The event for which to count all listeners. The resulting count is a combination of listeners added using 'on'/'add_listener' and 'once'. """ return len(self._listeners[event]) + len(self._once[event])
python
{ "resource": "" }
q263992
genPNGs
validation
def genPNGs(folder,files=None): """Convert each TIF to PNG. Return filenames of new PNGs.""" if files is None: files=glob.glob(folder+"/*.*") new=[] for fname in files: ext=os.path.basename(fname).split(".")[-1].lower() if ext in ['tif','tiff']: if not os.path.exists(fname+".png"): print(" -- converting %s to PNG..."%os.path.basename(fname)) cm.image_convert(fname) new.append(fname) #fancy burn-in of image data else: pass #print(" -- already converted %s to PNG..."%os.path.basename(fname)) return new
python
{ "resource": "" }
q263993
htmlABF
validation
def htmlABF(ID,group,d,folder,overwrite=False): """given an ID and the dict of files, generate a static html for that abf.""" fname=folder+"/swhlab4/%s_index.html"%ID if overwrite is False and os.path.exists(fname): return html=TEMPLATES['abf'] html=html.replace("~ID~",ID) html=html.replace("~CONTENT~",htmlABFcontent(ID,group,d)) print(" <- writing [%s]"%os.path.basename(fname)) with open(fname,'w') as f: f.write(html) return
python
{ "resource": "" }
q263994
genIndex
validation
def genIndex(folder,forceIDs=[]): """expects a folder of ABFs.""" if not os.path.exists(folder+"/swhlab4/"): print(" !! cannot index if no /swhlab4/") return timestart=cm.timethis() files=glob.glob(folder+"/*.*") #ABF folder files.extend(glob.glob(folder+"/swhlab4/*.*")) print(" -- indexing glob took %.02f ms"%(cm.timethis(timestart)*1000)) files.extend(genPNGs(folder,files)) files=sorted(files) timestart=cm.timethis() d=cm.getIDfileDict(files) #TODO: this is really slow print(" -- filedict length:",len(d)) print(" -- generating ID dict took %.02f ms"%(cm.timethis(timestart)*1000)) groups=cm.getABFgroups(files) print(" -- groups length:",len(groups)) for ID in sorted(list(groups.keys())): overwrite=False for abfID in groups[ID]: if abfID in forceIDs: overwrite=True try: htmlABF(ID,groups[ID],d,folder,overwrite) except: print("~~ HTML GENERATION FAILED!!!") menu=expMenu(groups,folder) makeSplash(menu,folder) makeMenu(menu,folder) htmlFrames(d,folder) makeMenu(menu,folder) makeSplash(menu,folder)
python
{ "resource": "" }
q263995
plotAllSweeps
validation
def plotAllSweeps(abfFile): """simple example how to load an ABF file and plot every sweep.""" r = io.AxonIO(filename=abfFile) bl = r.read_block(lazy=False, cascade=True) print(abfFile+"\nplotting %d sweeps..."%len(bl.segments)) plt.figure(figsize=(12,10)) plt.title(abfFile) for sweep in range(len(bl.segments)): trace = bl.segments[sweep].analogsignals[0] plt.plot(trace.times-trace.times[0],trace.magnitude,alpha=.5) plt.ylabel(trace.dimensionality) plt.xlabel("seconds") plt.show() plt.close()
python
{ "resource": "" }
q263996
plot_shaded_data
validation
def plot_shaded_data(X,Y,variances,varianceX): """plot X and Y data, then shade its background by variance.""" plt.plot(X,Y,color='k',lw=2) nChunks=int(len(Y)/CHUNK_POINTS) for i in range(0,100,PERCENT_STEP): varLimitLow=np.percentile(variances,i) varLimitHigh=np.percentile(variances,i+PERCENT_STEP) varianceIsAboveMin=np.where(variances>=varLimitLow)[0] varianceIsBelowMax=np.where(variances<=varLimitHigh)[0] varianceIsRange=[chunkNumber for chunkNumber in range(nChunks) \ if chunkNumber in varianceIsAboveMin \ and chunkNumber in varianceIsBelowMax] for chunkNumber in varianceIsRange: t1=chunkNumber*CHUNK_POINTS/POINTS_PER_SEC t2=t1+CHUNK_POINTS/POINTS_PER_SEC plt.axvspan(t1,t2,alpha=.3,color=COLORMAP(i/100),lw=0)
python
{ "resource": "" }
q263997
show_variances
validation
def show_variances(Y,variances,varianceX,logScale=False): """create some fancy graphs to show color-coded variances.""" plt.figure(1,figsize=(10,7)) plt.figure(2,figsize=(10,7)) varSorted=sorted(variances) plt.figure(1) plt.subplot(211) plt.grid() plt.title("chronological variance") plt.ylabel("original data") plot_shaded_data(X,Y,variances,varianceX) plt.margins(0,.1) plt.subplot(212) plt.ylabel("variance (pA) (log%s)"%str(logScale)) plt.xlabel("time in sweep (sec)") plt.plot(varianceX,variances,'k-',lw=2) plt.figure(2) plt.ylabel("variance (pA) (log%s)"%str(logScale)) plt.xlabel("chunk number") plt.title("sorted variance") plt.plot(varSorted,'k-',lw=2) for i in range(0,100,PERCENT_STEP): varLimitLow=np.percentile(variances,i) varLimitHigh=np.percentile(variances,i+PERCENT_STEP) label="%2d-%d percentile"%(i,i++PERCENT_STEP) color=COLORMAP(i/100) print("%s: variance = %.02f - %.02f"%(label,varLimitLow,varLimitHigh)) plt.figure(1) plt.axhspan(varLimitLow,varLimitHigh,alpha=.5,lw=0,color=color,label=label) plt.figure(2) chunkLow=np.where(varSorted>=varLimitLow)[0][0] chunkHigh=np.where(varSorted>=varLimitHigh)[0][0] plt.axvspan(chunkLow,chunkHigh,alpha=.5,lw=0,color=color,label=label) for fignum in [1,2]: plt.figure(fignum) if logScale: plt.semilogy() plt.margins(0,0) plt.grid() if fignum is 2: plt.legend(fontsize=10,loc='upper left',shadow=True) plt.tight_layout() plt.savefig('2016-12-15-variance-%d-log%s.png'%(fignum,str(logScale))) plt.show()
python
{ "resource": "" }
q263998
AP.ensureDetection
validation
def ensureDetection(self): """ run this before analysis. Checks if event detection occured. If not, runs AP detection on all sweeps. """ if self.APs==False: self.log.debug("analysis attempted before event detection...") self.detect()
python
{ "resource": "" }
q263999
AP.detect
validation
def detect(self): """runs AP detection on every sweep.""" self.log.info("initializing AP detection on all sweeps...") t1=cm.timeit() for sweep in range(self.abf.sweeps): self.detectSweep(sweep) self.log.info("AP analysis of %d sweeps found %d APs (completed in %s)", self.abf.sweeps,len(self.APs),cm.timeit(t1))
python
{ "resource": "" }