_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q263800 | CIDR.set_ip | validation | def set_ip(self, ip):
"""Change the current IP."""
| python | {
"resource": ""
} |
q263801 | CIDR.set_netmask | validation | def set_netmask(self, netmask):
"""Change the current netmask."""
| python | {
"resource": ""
} |
q263802 | CIDR.is_valid_ip | validation | def is_valid_ip(self, ip):
"""Return true if the given address in amongst the usable addresses,
or if the given CIDR is contained in this one."""
if not isinstance(ip, (IPv4Address, CIDR)):
if str(ip).find('/') == -1:
ip = IPv4Address(ip)
else:
# Support for CIDR strings/objects, an idea of Nicola Novello.
ip = CIDR(ip)
if isinstance(ip, IPv4Address):
if ip < self._first_ip or ip > self._last_ip:
| python | {
"resource": ""
} |
q263803 | S3tools.upload_file | validation | async def upload_file(self, bucket, file, uploadpath=None, key=None,
ContentType=None, **kw):
"""Upload a file to S3 possibly using the multi-part uploader
Return the key uploaded
"""
is_filename = False
if hasattr(file, 'read'):
if hasattr(file, 'seek'):
file.seek(0)
file = file.read()
size = len(file)
elif key:
size = len(file)
else:
is_filename = True
size = os.stat(file).st_size
key = os.path.basename(file)
assert key, 'key not available'
if not ContentType:
ContentType, _ = mimetypes.guess_type(key)
if uploadpath:
if not uploadpath.endswith('/'):
uploadpath = '%s/' % uploadpath
key = '%s%s' % (uploadpath, key)
params = dict(Bucket=bucket, Key=key)
if not ContentType:
ContentType = 'application/octet-stream'
params['ContentType'] = ContentType
| python | {
"resource": ""
} |
q263804 | S3tools.copy_storage_object | validation | async def copy_storage_object(self, source_bucket, source_key,
bucket, key):
"""Copy a file from one bucket into another
"""
info = await self.head_object(Bucket=source_bucket, Key=source_key)
size = info['ContentLength']
if size > MULTI_PART_SIZE:
result = await _multipart_copy(self, source_bucket, source_key,
| python | {
"resource": ""
} |
q263805 | S3tools.upload_folder | validation | def upload_folder(self, bucket, folder, key=None, skip=None,
content_types=None):
"""Recursively upload a ``folder`` into a backet.
:param bucket: bucket where to upload the folder to
:param folder: the folder location in the local file system
:param key: Optional key where the folder is uploaded
:param skip: Optional list of files to skip
| python | {
"resource": ""
} |
q263806 | FolderUploader._upload_file | validation | async def _upload_file(self, full_path):
"""Coroutine for uploading a single file
"""
rel_path = os.path.relpath(full_path, self.folder)
key = s3_key(os.path.join(self.key, rel_path))
ct = self.content_types.get(key.split('.')[-1])
with open(full_path, 'rb') as fp:
file = fp.read()
try:
await self.botocore.upload_file(self.bucket, file, key=key,
ContentType=ct)
except Exception as exc:
LOGGER.error('Could not | python | {
"resource": ""
} |
q263807 | PusherChannel.trigger | validation | async def trigger(self, event, data=None, socket_id=None):
'''Trigger an ``event`` on this channel
'''
json_data = json.dumps(data, cls=self.pusher.encoder)
query_string = self.signed_query(event, json_data, socket_id)
signed_path = "%s?%s" % (self.path, query_string)
pusher = self.pusher
absolute_url = pusher.get_absolute_path(signed_path) | python | {
"resource": ""
} |
q263808 | Pusher.connect | validation | async def connect(self):
'''Connect to a Pusher websocket
'''
if not self._consumer:
waiter = self._waiter = asyncio.Future()
try:
address = self._websocket_host()
self.logger.info('Connect to %s', address)
| python | {
"resource": ""
} |
q263809 | Pusher.on_message | validation | def on_message(self, websocket, message):
'''Handle websocket incoming messages
'''
waiter = self._waiter
self._waiter = None
encoded = json.loads(message)
event = encoded.get('event')
channel = encoded.get('channel')
data = json.loads(encoded.get('data'))
try:
if event == PUSHER_ERROR:
raise PusherError(data['message'], data['code'])
elif event == PUSHER_CONNECTION:
| python | {
"resource": ""
} |
q263810 | const_equal | validation | def const_equal(str_a, str_b):
'''Constant time string comparison'''
if len(str_a) != len(str_b):
return False
result = True | python | {
"resource": ""
} |
q263811 | decode_html_entities | validation | def decode_html_entities(html):
"""
Decodes a limited set of HTML entities.
"""
if not html:
return html
for | python | {
"resource": ""
} |
q263812 | EncryptedPickle.set_signature_passphrases | validation | def set_signature_passphrases(self, signature_passphrases):
'''Set signature passphrases'''
self.signature_passphrases = self._update_dict(signature_passphrases,
| python | {
"resource": ""
} |
q263813 | EncryptedPickle.set_encryption_passphrases | validation | def set_encryption_passphrases(self, encryption_passphrases):
'''Set encryption passphrases'''
self.encryption_passphrases = self._update_dict(encryption_passphrases, | python | {
"resource": ""
} |
q263814 | EncryptedPickle.set_algorithms | validation | def set_algorithms(self, signature=None, encryption=None,
serialization=None, compression=None):
'''Set algorithms used for sealing. Defaults can not be overridden.'''
self.signature_algorithms = \
self._update_dict(signature, self.DEFAULT_SIGNATURE)
self.encryption_algorithms = \
self._update_dict(encryption, self.DEFAULT_ENCRYPTION)
| python | {
"resource": ""
} |
q263815 | EncryptedPickle.get_algorithms | validation | def get_algorithms(self):
'''Get algorithms used for sealing'''
return {
'signature': self.signature_algorithms,
'encryption': self.encryption_algorithms,
| python | {
"resource": ""
} |
q263816 | EncryptedPickle._set_options | validation | def _set_options(self, options):
'''Private function for setting options used for sealing'''
if not options:
return self.options.copy()
options = options.copy()
if 'magic' in options:
self.set_magic(options['magic'])
del(options['magic'])
if 'flags' in options:
flags = options['flags']
del(options['flags'])
for key, value in flags.iteritems():
| python | {
"resource": ""
} |
q263817 | EncryptedPickle.verify_signature | validation | def verify_signature(self, data):
'''Verify sealed data signature'''
data = self._remove_magic(data)
data = urlsafe_nopadding_b64decode(data)
| python | {
"resource": ""
} |
q263818 | EncryptedPickle._encode | validation | def _encode(self, data, algorithm, key=None):
'''Encode data with specific algorithm'''
if algorithm['type'] == 'hmac':
return data + self._hmac_generate(data, algorithm, key)
elif algorithm['type'] == 'aes':
return self._aes_encrypt(data, algorithm, key)
| python | {
"resource": ""
} |
q263819 | EncryptedPickle._decode | validation | def _decode(self, data, algorithm, key=None):
'''Decode data with specific algorithm'''
if algorithm['type'] == 'hmac':
verify_signature = data[-algorithm['hash_size']:]
data = data[:-algorithm['hash_size']]
signature = self._hmac_generate(data, algorithm, key)
if not const_equal(verify_signature, signature):
raise Exception('Invalid signature')
return data
elif algorithm['type'] == 'aes':
return self._aes_decrypt(data, algorithm, key)
elif algorithm['type'] == 'no-serialization':
return data
| python | {
"resource": ""
} |
q263820 | EncryptedPickle._sign_data | validation | def _sign_data(self, data, options):
'''Add signature to data'''
if options['signature_algorithm_id'] not in self.signature_algorithms:
raise Exception('Unknown signature algorithm id: %d'
% options['signature_algorithm_id'])
signature_algorithm = \
self.signature_algorithms[options['signature_algorithm_id']]
algorithm = self._get_algorithm_info(signature_algorithm)
| python | {
"resource": ""
} |
q263821 | EncryptedPickle._unsign_data | validation | def _unsign_data(self, data, options):
'''Verify and remove signature'''
if options['signature_algorithm_id'] not in self.signature_algorithms:
raise Exception('Unknown signature algorithm id: %d'
% options['signature_algorithm_id'])
signature_algorithm = \
self.signature_algorithms[options['signature_algorithm_id']]
algorithm = self._get_algorithm_info(signature_algorithm)
key_salt = ''
if algorithm['salt_size']:
| python | {
"resource": ""
} |
q263822 | EncryptedPickle._remove_magic | validation | def _remove_magic(self, data):
'''Verify and remove magic'''
if not self.magic:
return data
magic_size = len(self.magic)
magic = data[:magic_size]
| python | {
"resource": ""
} |
q263823 | EncryptedPickle._add_header | validation | def _add_header(self, data, options):
'''Add header to data'''
# pylint: disable=W0142
version_info = self._get_version_info(options['version'])
flags = options['flags']
header_flags = dict(
(i, str(int(j))) for i, j in options['flags'].iteritems())
header_flags = ''.join(version_info['flags'](**header_flags))
header_flags = int(header_flags, 2)
options['flags'] = header_flags
header = version_info['header']
| python | {
"resource": ""
} |
q263824 | EncryptedPickle._read_header | validation | def _read_header(self, data):
'''Read header from data'''
# pylint: disable=W0212
version = self._read_version(data)
version_info = self._get_version_info(version)
header_data = data[:version_info['header_size']]
header = version_info['header']
header = header._make(
unpack(version_info['header_format'], header_data))
header = dict(header._asdict())
flags = list("{0:0>8b}".format(header['flags']))
flags = dict(version_info['flags']._make(flags)._asdict())
flags = dict((i, bool(int(j))) for i, j in flags.iteritems())
header['flags'] = flags
timestamp = None
if flags['timestamp']: | python | {
"resource": ""
} |
q263825 | EncryptedPickle._remove_header | validation | def _remove_header(self, data, options):
'''Remove header from data'''
version_info = self._get_version_info(options['version'])
header_size = version_info['header_size']
| python | {
"resource": ""
} |
q263826 | EncryptedPickle._read_version | validation | def _read_version(self, data):
'''Read header version from data'''
version = ord(data[0])
if version not in self.VERSIONS:
| python | {
"resource": ""
} |
q263827 | EncryptedPickle._get_algorithm_info | validation | def _get_algorithm_info(self, algorithm_info):
'''Get algorithm info'''
if algorithm_info['algorithm'] not in self.ALGORITHMS:
raise Exception('Algorithm not supported: %s'
% algorithm_info['algorithm'])
| python | {
"resource": ""
} |
q263828 | EncryptedPickle._generate_key | validation | def _generate_key(pass_id, passphrases, salt, algorithm):
'''Generate and return PBKDF2 key'''
if pass_id not in passphrases:
raise Exception('Passphrase not defined for id: %d' % pass_id)
passphrase = passphrases[pass_id]
if len(passphrase) < 32:
raise Exception('Passphrase less than 32 characters long')
| python | {
"resource": ""
} |
q263829 | EncryptedPickle._update_dict | validation | def _update_dict(data, default_data, replace_data=False):
'''Update algorithm definition type dictionaries'''
if not data:
data = default_data.copy()
return data
if not isinstance(data, dict):
raise TypeError('Value not dict type')
if len(data) > 255:
raise ValueError('More than 255 values defined')
for i in data.keys():
if not isinstance(i, int):
| python | {
"resource": ""
} |
q263830 | RemoteZip.getTableOfContents | validation | def getTableOfContents(self):
"""
This function populates the internal tableOfContents list with the contents
of the zip file TOC. If the server does not support ranged requests, this will raise
and exception. It will also throw an exception if the TOC cannot be found.
"""
self.directory_size = self.getDirectorySize()
if self.directory_size > 65536:
self.directory_size += 2
self.requestContentDirectory()
# and find the offset from start of file where it can be found
directory_start = unpack("i", self.raw_bytes[self.directory_end + 16: self.directory_end + 20])[0]
# find the data in the raw_bytes
self.raw_bytes = self.raw_bytes
current_start = directory_start - self.start
filestart = 0
compressedsize = 0
tableOfContents = []
try:
while True:
# get file name size (n), extra len (m) and comm len (k)
zip_n = unpack("H", self.raw_bytes[current_start + 28: current_start + 28 + 2])[0]
zip_m = unpack("H", self.raw_bytes[current_start + 30: current_start + 30 + 2])[0]
zip_k = unpack("H", self.raw_bytes[current_start + 32: current_start + 32 + 2])[0]
filename = self.raw_bytes[current_start + 46: current_start + 46 + zip_n]
| python | {
"resource": ""
} |
q263831 | RemoteZip.extractFile | validation | def extractFile(self, filename):
"""
This function will extract a single file from the remote zip without downloading
the entire zip file. The filename argument should match whatever is in the 'filename'
key of the tableOfContents.
"""
files = [x for x in self.tableOfContents if x['filename'] == filename]
if len(files) == 0:
raise FileNotFoundException()
fileRecord = files[0]
# got here? need to fetch the file size
metaheadroom = 1024 # should be enough
request = urllib2.Request(self.zipURI)
start = fileRecord['filestart']
end = fileRecord['filestart'] + fileRecord['compressedsize'] + metaheadroom
request.headers['Range'] = "bytes=%s-%s" % (start, end)
handle = urllib2.urlopen(request)
# make sure the response is ranged
return_range = handle.headers.get('Content-Range')
if return_range != "bytes %d-%d/%s" % (start, end, self.filesize):
raise Exception("Ranged requests are not supported for this URI")
filedata = handle.read()
# find start of raw file data
zip_n = unpack("H", filedata[26:28])[0]
zip_m = unpack("H", filedata[28:30])[0]
# check compressed size
has_data_descriptor = bool(unpack("H", filedata[6:8])[0] & 8)
comp_size = unpack("I", filedata[18:22])[0]
if comp_size == 0 and has_data_descriptor:
| python | {
"resource": ""
} |
q263832 | star.do_photometry | validation | def do_photometry(self):
"""
Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data
in each orientation. This function is called by other functions and generally the user will not need
to interact with it directly.
"""
std_f = np.zeros(4)
data_save = np.zeros_like(self.postcard)
self.obs_flux = np.zeros_like(self.reference_flux)
for i in range(4):
g = np.where(self.qs == i)[0]
wh = np.where(self.times[g] > 54947)
data_save[g] = np.roll(self.postcard[g], int(self.roll_best[i,0]), axis=1)
data_save[g] = np.roll(data_save[g], int(self.roll_best[i,1]), axis=2)
self.target_flux_pixels = data_save[:,self.targets == 1]
| python | {
"resource": ""
} |
q263833 | star.generate_panel | validation | def generate_panel(self, img):
"""
Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions
and generally not called by the user directly.
Args:
img: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard``
"""
plt.figure(figsize=(14,6))
ax = plt.gca()
fig = plt.gcf()
plt.subplot(122)
data_save = np.zeros_like(self.postcard)
self.roll_best = np.zeros((4,2))
for i in range(4):
g = np.where(self.qs == i)[0]
wh = np.where(self.times[g] > 54947)
self.roll_best[i] = self.do_rolltest(g, wh)
self.do_photometry()
for i in range(4):
g = np.where(self.qs == i)[0]
| python | {
"resource": ""
} |
q263834 | star.calc_centroids | validation | def calc_centroids(self):
"""
Identify the centroid positions for the target star at all epochs. Useful for verifying that there is
no correlation between flux and position, as might be expected for high proper motion stars.
"""
self.cm = np.zeros((len(self.postcard), 2))
| python | {
"resource": ""
} |
q263835 | star.define_spotsignal | validation | def define_spotsignal(self):
"""
Identify the "expected" flux value at the time of each observation based on the
Kepler long-cadence data, to ensure variations observed are not the effects of a single
large starspot. Only works if the target star was targeted for long or short cadence
observations during the primary mission.
"""
client = kplr.API()
star = client.star(self.kic)
lcs = star.get_light_curves(short_cadence=False)
time, flux, ferr, qual = [], [], [], []
for lc in lcs:
with lc.open() as f:
hdu_data = f[1].data
time.append(hdu_data["time"])
flux.append(hdu_data["pdcsap_flux"])
ferr.append(hdu_data["pdcsap_flux_err"])
qual.append(hdu_data["sap_quality"])
tout = np.array([])
fout = np.array([])
eout = np.array([])
for i in range(len(flux)):
t = time[i][qual[i] == 0]
f = flux[i][qual[i] == 0]
| python | {
"resource": ""
} |
q263836 | star.model_uncert | validation | def model_uncert(self):
"""
Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper.
Based on the kepcal package of Dan Foreman-Mackey.
"""
Y = self.photometry_array.T
Y /= np.median(Y, axis=1)[:, None]
C = np.median(Y, axis=0)
nstars, nobs = np.shape(Y)
Z | python | {
"resource": ""
} |
q263837 | Pbd._dump_field | validation | def _dump_field(self, fd):
"""Dump single field.
"""
v = {}
v['label'] = Pbd.LABELS[fd.label]
v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type]
v['name'] = fd.name
v['number'] = fd.number
v['default'] = '[default = {}]'.format(fd.default_value) if len(fd.default_value) > 0 else ''
| python | {
"resource": ""
} |
q263838 | Pbd.disassemble | validation | def disassemble(self):
"""Disassemble serialized protocol buffers file.
"""
ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file
fd = FileDescriptorProto()
fd.ParseFromString(ser_pb)
self.name = fd.name
self._print('// Reversed by pbd (https://github.com/rsc-dev/pbd)')
self._print('syntax = "proto2";')
| python | {
"resource": ""
} |
q263839 | Pbd.find_imports | validation | def find_imports(self, pbds):
"""Find all missing imports in list of Pbd instances.
"""
# List of types used, but not defined
imports = list(set(self.uses).difference(set(self.defines)))
# Clumpsy, but enought for now
for imp in imports:
for p in pbds:
if imp in p.defines:
| python | {
"resource": ""
} |
q263840 | fasta_dict_to_file | validation | def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None):
"""Write fasta_dict to fasta_file
:param fasta_dict: returned by fasta_file_to_dict
:param fasta_file: output file can be a string path or a file object
:param line_char_limit: None = no limit (default)
:return: None
"""
fasta_fp = fasta_file
if isinstance(fasta_file, str):
fasta_fp = | python | {
"resource": ""
} |
q263841 | Gff3.add_line_error | validation | def add_line_error(self, line_data, error_info, log_level=logging.ERROR):
"""Helper function to record and log an error message
:param line_data: dict
:param error_info: dict
:param logger:
:param log_level: int
:return:
"""
if not error_info: return
try:
line_data['line_errors'].append(error_info)
except KeyError:
line_data['line_errors'] = [error_info]
except TypeError: # no line_data
pass
| python | {
"resource": ""
} |
q263842 | Gff3.check_parent_boundary | validation | def check_parent_boundary(self):
"""
checks whether child features are within the coordinate boundaries of parent features
:return:
"""
for line in self.lines:
for parent_feature in line['parents']:
ok = False
for parent_line in parent_feature:
if parent_line['start'] <= line['start'] and line['end'] <= parent_line['end']:
ok = True
break
if | python | {
"resource": ""
} |
q263843 | Gff3.check_phase | validation | def check_phase(self):
"""
1. get a list of CDS with the same parent
2. sort according to strand
3. calculate and validate phase
"""
plus_minus = set(['+', '-'])
for k, g in groupby(sorted([line for line in self.lines if line['line_type'] == 'feature' and line['type'] == 'CDS' and 'Parent' in line['attributes']], key=lambda x: x['attributes']['Parent']), key=lambda x: x['attributes']['Parent']):
cds_list = list(g)
strand_set = list(set([line['strand'] for line in cds_list]))
if len(strand_set) != 1:
for line in cds_list:
self.add_line_error(line, {'message': 'Inconsistent CDS strand with parent: {0:s}'.format(k), 'error_type': 'STRAND'})
continue
if len(cds_list) == 1:
if cds_list[0]['phase'] != 0:
self.add_line_error(cds_list[0], {'message': 'Wrong phase {0:d}, should be {1:d}'.format(cds_list[0]['phase'], 0), 'error_type': 'PHASE'})
continue
strand = strand_set[0]
if strand not in plus_minus:
| python | {
"resource": ""
} |
q263844 | Gff3.adopt | validation | def adopt(self, old_parent, new_parent):
"""
Transfer children from old_parent to new_parent
:param old_parent: feature_id(str) or line_index(int) or line_data(dict) or feature
:param new_parent: feature_id(str) or line_index(int) or line_data(dict)
:return: List of children transferred
"""
try: # assume line_data(dict)
old_id = old_parent['attributes']['ID']
except TypeError:
try: # assume line_index(int)
old_id = self.lines[old_parent]['attributes']['ID']
except TypeError: # assume feature_id(str)
old_id = old_parent
old_feature = self.features[old_id]
old_indexes = [ld['line_index'] for ld in old_feature]
try: # assume line_data(dict)
new_id = new_parent['attributes']['ID']
except TypeError:
try: # assume line_index(int)
new_id = self.lines[new_parent]['attributes']['ID']
except TypeError: # assume feature_id(str)
new_id = new_parent
new_feature = self.features[new_id]
new_indexes = [ld['line_index'] for ld in new_feature]
# build a list of children to be moved
# add the child to the new parent's children list if its not already there
# update the child's parent list and parent attribute
| python | {
"resource": ""
} |
q263845 | Gff3.remove | validation | def remove(self, line_data, root_type=None):
"""
Marks line_data and all of its associated feature's 'line_status' as 'removed', does not actually remove the line_data from the data structure.
The write function checks the 'line_status' when writing the gff file.
Find the root parent of line_data of type root_type, remove all of its descendants.
If the root parent has a parent with no children after the remove, remove the root parent's parent recursively.
:param line_data:
:param root_type:
:return:
"""
roots = [ld for ld in self.ancestors(line_data) if (root_type and ld['line_type'] == root_type) or (not root_type and not ld['parents'])] or [line_data]
for root in roots:
root['line_status'] = 'removed'
| python | {
"resource": ""
} |
q263846 | abfIDfromFname | validation | def abfIDfromFname(fname):
"""given a filename, return the ABFs ID string."""
fname=os.path.abspath(fname)
| python | {
"resource": ""
} |
q263847 | abfProtocol | validation | def abfProtocol(fname):
"""Determine the protocol used to record an ABF file"""
f=open(fname,'rb')
raw=f.read(30*1000) #it should be in the first 30k of the file
f.close()
raw=raw.decode("utf-8","ignore")
raw=raw.split("Clampex")[1].split(".pro")[0]
| python | {
"resource": ""
} |
q263848 | headerHTML | validation | def headerHTML(header,fname):
"""given the bytestring ABF header, make and launch HTML."""
html="<html><body><code>"
html+="<h2>%s</h2>"%(fname)
html+=pprint.pformat(header, indent=1)
html=html.replace("\n",'<br>').replace(" "," ")
| python | {
"resource": ""
} |
q263849 | ABF.setsweeps | validation | def setsweeps(self):
"""iterate over every sweep"""
for sweep in range(self.sweeps):
| python | {
"resource": ""
} |
q263850 | ABF.comments_load | validation | def comments_load(self):
"""read the header and populate self with information about comments"""
self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[]
self.comments=0 # will be >0 if comments exist
self.comment_text=""
try:
# this used to work
self.comment_tags = list(self.ABFblock.segments[0].eventarrays[0].annotations['comments'])
self.comment_times = list(self.ABFblock.segments[0].eventarrays[0].times/self.trace.itemsize)
self.comment_sweeps = list(self.comment_times)
except:
# now this notation seems to work
for events in self.ABFblock.segments[0].events: # this should only happen once actually
| python | {
"resource": ""
} |
q263851 | ABF.get_protocol_sequence | validation | def get_protocol_sequence(self,sweep):
"""
given a sweep, return the protocol as condensed sequence.
This is better for comparing similarities and determining steps.
There should be no | python | {
"resource": ""
} |
q263852 | ABF.average | validation | def average(self,t1=0,t2=None,setsweep=False):
"""return the average of part of the current sweep."""
if setsweep:
self.setsweep(setsweep)
if t2 is None or t2>self.sweepLength:
t2=self.sweepLength
self.log.debug("resetting t2 to [%f]",t2)
t1=max(t1,0)
if t1>t2:
| python | {
"resource": ""
} |
q263853 | ABF.averageSweep | validation | def averageSweep(self,sweepFirst=0,sweepLast=None):
"""
Return a sweep which is the average of multiple sweeps.
For now, standard deviation is lost.
"""
if sweepLast is None:
sweepLast=self.sweeps-1
nSweeps=sweepLast-sweepFirst+1
runningSum=np.zeros(len(self.sweepY))
self.log.debug("averaging sweep %d to %d",sweepFirst,sweepLast)
| python | {
"resource": ""
} |
q263854 | ABF.kernel_gaussian | validation | def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):
"""create kernel based on this ABF info."""
sigmaMS=sizeMS/10 if sigmaMS is None else sigmaMS
| python | {
"resource": ""
} |
q263855 | ABF.sweepYfiltered | validation | def sweepYfiltered(self):
"""
Get the filtered sweepY of the current sweep.
Only works if self.kernel has been generated.
"""
| python | {
"resource": ""
} |
q263856 | dictFlat | validation | def dictFlat(l):
"""Given a list of list of dicts, return just the dicts."""
if type(l) is dict:
return [l]
if "numpy" in str(type(l)):
return l
dicts=[]
for item in l:
if type(item)==dict:
| python | {
"resource": ""
} |
q263857 | matrixValues | validation | def matrixValues(matrix,key):
"""given a key, return a list of values from the matrix with that key."""
assert key in matrix.dtype.names
col=matrix.dtype.names.index(key)
| python | {
"resource": ""
} |
q263858 | matrixToDicts | validation | def matrixToDicts(data):
"""given a recarray, return it as a list of dicts."""
# 1D array
if "float" in str(type(data[0])):
d={}
for x in range(len(data)):
d[data.dtype.names[x]]=data[x]
return d
# 2D array
l=[] | python | {
"resource": ""
} |
q263859 | html_temp_launch | validation | def html_temp_launch(html):
"""given text, make it a temporary HTML file and launch it."""
fname = tempfile.gettempdir()+"/swhlab/temp.html"
| python | {
"resource": ""
} |
q263860 | checkOut | validation | def checkOut(thing,html=True):
"""show everything we can about an object's projects and methods."""
msg=""
for name in sorted(dir(thing)):
if not "__" in name:
msg+="<b>%s</b>\n"%name
try:
msg+=" ^-VALUE: %s\n"%getattr(thing,name)()
except:
pass
if html: | python | {
"resource": ""
} |
q263861 | matrixToHTML | validation | def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None):
"""Put 2d numpy data into a temporary HTML file."""
if not names:
names=[""]*len(data[0])
if data.dtype.names:
names=list(data.dtype.names)
if not units:
units=[""]*len(data[0])
for i in range(len(units)):
if names[i] in UNITS.keys():
units[i]=UNITS[names[i]]
if 'recarray' in str(type(data)): #make it a regular array
data=data.view(float).reshape(data.shape + (-1,))
if xCol and xCol in names:
xCol=names.index(xCol)
names.insert(0,names[xCol])
units.insert(0,units[xCol])
data=np.insert(data,0,data[:,xCol],1)
htmlFname = tempfile.gettempdir()+"/swhlab/WKS-%s.%s.html"%(bookName,sheetName)
html="""<body>
<style>
body {
background-color: #ababab;
padding:20px;
}
table {
font-size:12px;
border-spacing: 0;
border-collapse: collapse;
//border:2px solid #000000;
}
.name {background-color:#fafac8;text-align:center;}
.units {background-color:#fafac8;text-align:center;}
| python | {
"resource": ""
} |
q263862 | XMLtoPython | validation | def XMLtoPython(xmlStr=r"C:\Apps\pythonModules\GSTemp.xml"):
"""
given a string or a path to an XML file, return an XML object.
"""
#TODO: this absolute file path crazy stuff needs to stop!
| python | {
"resource": ""
} |
q263863 | algo_exp | validation | def algo_exp(x, m, t, b):
"""mono-exponential curve."""
| python | {
"resource": ""
} |
q263864 | where_cross | validation | def where_cross(data,threshold):
"""return a list of Is where the data first crosses above threshold."""
Is=np.where(data>threshold)[0]
| python | {
"resource": ""
} |
q263865 | originFormat | validation | def originFormat(thing):
"""Try to format anything as a 2D matrix with column names."""
if type(thing) is list and type(thing[0]) is dict:
return originFormat_listOfDicts(thing)
if type(thing) is list and type(thing[0]) is list:
| python | {
"resource": ""
} |
q263866 | pickle_save | validation | def pickle_save(thing,fname):
"""save something to a pickle file"""
| python | {
"resource": ""
} |
q263867 | msgDict | validation | def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None):
"""convert a dictionary to a pretty formatted string."""
msg=""
if "record" in str(type(d)):
keys=d.dtype.names
else:
keys=d.keys()
if sort:
keys=sorted(keys)
for key in keys:
if key[0]=="_":
continue
if matching:
if not key in matching:
continue
| python | {
"resource": ""
} |
q263868 | determineProtocol | validation | def determineProtocol(fname):
"""determine the comment cooked in the protocol."""
f=open(fname,'rb')
raw=f.read(5000) #it should be in the first 5k of the file
f.close()
protoComment="unknown"
| python | {
"resource": ""
} |
q263869 | scanABFfolder | validation | def scanABFfolder(abfFolder):
"""
scan an ABF directory and subdirectory. Try to do this just once.
Returns ABF files, SWHLab files, and groups.
"""
assert os.path.isdir(abfFolder)
filesABF=forwardSlash(sorted(glob.glob(abfFolder+"/*.*")))
filesSWH=[]
if os.path.exists(abfFolder+"/swhlab4/"):
| python | {
"resource": ""
} |
q263870 | getParent | validation | def getParent(abfFname):
"""given an ABF file name, return the ABF of its parent."""
child=os.path.abspath(abfFname)
files=sorted(glob.glob(os.path.dirname(child)+"/*.*"))
parentID=abfFname #its | python | {
"resource": ""
} |
q263871 | getParent2 | validation | def getParent2(abfFname,groups):
"""given an ABF and the groups dict, return the ID of its parent."""
if ".abf" in abfFname:
| python | {
"resource": ""
} |
q263872 | getNotesForABF | validation | def getNotesForABF(abfFile):
"""given an ABF, find the parent, return that line of experiments.txt"""
parent=getParent(abfFile)
parent=os.path.basename(parent).replace(".abf","")
expFile=os.path.dirname(abfFile)+"/experiment.txt"
if not os.path.exists(expFile):
return "no experiment file"
with open(expFile) as f:
raw=f.readlines()
for line in raw:
if line[0]=='~':
line=line[1:].strip()
if line.startswith(parent):
| python | {
"resource": ""
} |
q263873 | getIDsFromFiles | validation | def getIDsFromFiles(files):
"""given a path or list of files, return ABF IDs."""
if type(files) is str:
files=glob.glob(files+"/*.*")
| python | {
"resource": ""
} |
q263874 | inspectABF | validation | def inspectABF(abf=exampleABF,saveToo=False,justPlot=False):
"""May be given an ABF object or filename."""
pylab.close('all')
print(" ~~ inspectABF()")
if type(abf) is str:
abf=swhlab.ABF(abf)
swhlab.plot.new(abf,forceNewFigure=True)
if abf.sweepInterval*abf.sweeps<60*5: #shorter than 5 minutes
pylab.subplot(211)
pylab.title("%s [%s]"%(abf.ID,abf.protoComment))
swhlab.plot.sweep(abf,'all')
| python | {
"resource": ""
} |
q263875 | ftp_login | validation | def ftp_login(folder=None):
"""return an "FTP" object after logging in."""
pwDir=os.path.realpath(__file__)
for i in range(3):
pwDir=os.path.dirname(pwDir)
pwFile = os.path.join(pwDir,"passwd.txt")
print(" -- looking for login information in:\n [%s]"%pwFile)
try:
with open(pwFile) as f:
lines=f.readlines()
username=lines[0].strip()
password=lines[1].strip()
print(" -- found a valid username/password")
except:
print(" -- password lookup FAILED.")
username=TK_askPassword("FTP LOGIN","enter FTP username")
password=TK_askPassword("FTP LOGIN","enter password for %s"%username)
if not username or not password:
print(" !! failed getting | python | {
"resource": ""
} |
q263876 | ftp_folder_match | validation | def ftp_folder_match(ftp,localFolder,deleteStuff=True):
"""upload everything from localFolder into the current FTP folder."""
| python | {
"resource": ""
} |
q263877 | version_upload | validation | def version_upload(fname,username="nibjb"):
"""Only scott should do this. Upload new version to site."""
print("popping up pasword window...")
password=TK_askPassword("FTP LOGIN","enter password for %s"%username)
if not password:
return
print("username:",username)
| python | {
"resource": ""
} |
q263878 | TK_askPassword | validation | def TK_askPassword(title="input",msg="type here:"):
"""use the GUI to ask for a string."""
root = tkinter.Tk()
root.withdraw() #hide | python | {
"resource": ""
} |
q263879 | TK_message | validation | def TK_message(title,msg):
"""use the GUI to pop up a message."""
root = tkinter.Tk()
root.withdraw() #hide | python | {
"resource": ""
} |
q263880 | TK_ask | validation | def TK_ask(title,msg):
"""use the GUI to ask YES or NO."""
root = tkinter.Tk()
root.attributes("-topmost", True) | python | {
"resource": ""
} |
q263881 | processArgs | validation | def processArgs():
"""check out the arguments and figure out what to do."""
if len(sys.argv)<2:
print("\n\nERROR:")
print("this script requires arguments!")
print('try "python command.py info"')
return
if sys.argv[1]=='info':
print("import paths:\n ","\n ".join(sys.path))
print()
print("python version:",sys.version)
print("SWHLab path:",__file__)
print("SWHLab version:",swhlab.__version__)
return
if sys.argv[1]=='glanceFolder':
abfFolder=swhlab.common.gui_getFolder()
if not abfFolder or not os.path.isdir(abfFolder):
print("bad path")
| python | {
"resource": ""
} |
q263882 | stats_first | validation | def stats_first(abf):
"""provide all stats on the first AP."""
msg=""
for sweep in range(abf.sweeps):
for AP in abf.APs[sweep]:
for key in sorted(AP.keys()):
if key[-1] is "I" | python | {
"resource": ""
} |
q263883 | getAvgBySweep | validation | def getAvgBySweep(abf,feature,T0=None,T1=None):
"""return average of a feature divided by sweep."""
if T1 is None:
T1=abf.sweepLength
if T0 is None:
T0=0
data = [np.empty((0))]*abf.sweeps
for AP in cm.dictFlat(cm.matrixToDicts(abf.APs)):
if T0<AP['sweepT']<T1:
val=AP[feature]
data[int(AP['sweep'])]=np.concatenate((data[int(AP['sweep'])],[val]))
| python | {
"resource": ""
} |
q263884 | lazygo | validation | def lazygo(watchFolder='../abfs/',reAnalyze=False,rebuildSite=False,
keepGoing=True,matching=False):
"""
continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies.
"""
abfsKnown=[]
while True:
print()
pagesNeeded=[]
for fname in glob.glob(watchFolder+"/*.abf"):
ID=os.path.basename(fname).replace(".abf","")
if not fname in abfsKnown:
if os.path.exists(fname.replace(".abf",".rsv")): #TODO: or something like this
continue
if matching and not matching in fname:
continue
abfsKnown.append(fname)
if os.path.exists(os.path.dirname(fname)+"/swhlab4/"+os.path.basename(fname).replace(".abf","_info.pkl")) and reAnalyze==False:
print("already analyzed",os.path.basename(fname))
| python | {
"resource": ""
} |
q263885 | gain | validation | def gain(abf):
"""easy way to plot a gain function."""
Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'freq'))
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
| python | {
"resource": ""
} |
q263886 | comments | validation | def comments(abf,minutes=False):
"""draw vertical lines at comment points. Defaults to seconds."""
if not len(abf.commentTimes):
return
for i in range(len(abf.commentTimes)):
t,c = abf.commentTimes[i],abf.commentTags[i]
if minutes:
t=t/60
pylab.axvline(t,lw=1,color='r',ls="--",alpha=.5)
X1,X2,Y1,Y2=pylab.axis()
| python | {
"resource": ""
} |
q263887 | annotate | validation | def annotate(abf):
"""stamp the bottom with file info."""
msg="SWHLab %s "%str(swhlab.VERSION)
msg+="ID:%s "%abf.ID
msg+="CH:%d "%abf.channel
msg+="PROTOCOL:%s "%abf.protoComment
msg+="COMMAND: %d%s "%(abf.holding,abf.units)
msg+="GENERATED:%s "%'{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
pylab.annotate(msg,(.001,.001),xycoords='figure fraction',ha='left',
va='bottom',color='#999999',family='monospace',size=8,
| python | {
"resource": ""
} |
q263888 | new | validation | def new(ABF,forceNewFigure=False,title=None,xlabel=None,ylabel=None):
"""
makes a new matplotlib figure with default dims and DPI.
Also labels it with pA or mV depending on ABF.
"""
if len(pylab.get_fignums()) and forceNewFigure==False:
#print("adding to existing figure")
return
pylab.figure(figsize=(8,6))
pylab.grid(alpha=.5)
pylab.title(ABF.ID) | python | {
"resource": ""
} |
q263889 | save | validation | def save(abf,fname=None,tag=None,width=700,close=True,facecolor='w',
resize=True):
"""
Save the pylab figure somewhere.
If fname==False, show it instead.
Height force > dpi force
if a tag is given instead of a filename, save it alongside the ABF
"""
if len(pylab.gca().get_lines())==0:
print("can't save, no figure!")
return
if resize:
pylab.tight_layout()
pylab.subplots_adjust(bottom=.1)
annotate(abf)
if tag:
fname = abf.outpath+abf.ID+"_"+tag+".png"
inchesX,inchesY = pylab.gcf().get_size_inches()
dpi=width/inchesX
if fname:
if | python | {
"resource": ""
} |
q263890 | tryLoadingFrom | validation | def tryLoadingFrom(tryPath,moduleName='swhlab'):
"""if the module is in this path, load it from the local folder."""
if not 'site-packages' in swhlab.__file__:
print("loaded custom swhlab module from",
os.path.dirname(swhlab.__file__))
return # no need to warn if it's already outside.
while len(tryPath)>5:
sp=tryPath+"/swhlab/" # imaginary swhlab module path
if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"):
| python | {
"resource": ""
} |
q263891 | DynamicArgs.update | validation | def update(self, tids, info):
"""
Called to update the state of the iterator. This methods
receives the set of task ids from the previous set of tasks
together with the launch information to allow the output
values to be parsed using the output_extractor. This data is then
used to determine the next desired point in the parameter
space by calling the _update_state method.
"""
outputs_dir = os.path.join(info['root_directory'], 'streams')
pattern = '%s_*_tid_*{tid}.o.{tid}*' % info['batch_name']
flist = os.listdir(outputs_dir)
try:
outputs = []
for tid in tids:
matches = fnmatch.filter(flist, pattern.format(tid=tid))
if len(matches) != 1:
self.warning("No unique output file for tid %d" % tid)
| python | {
"resource": ""
} |
q263892 | DynamicArgs.show | validation | def show(self):
"""
When dynamic, not all argument values may be available.
"""
copied = self.copy()
enumerated = [el for el in enumerate(copied)]
for (group_ind, specs) in enumerated:
if len(enumerated) > 1: print("Group %d" % group_ind)
ordering = self.constant_keys + self.varying_keys
# Ordered nicely by | python | {
"resource": ""
} |
q263893 | DynamicArgs._trace_summary | validation | def _trace_summary(self):
"""
Summarizes the trace of values used to update the DynamicArgs
and the arguments subsequently returned. May be used to
implement the summary method.
"""
for (i, (val, args)) in enumerate(self.trace):
if args is StopIteration:
info = "Terminated"
else:
pprint = ','.join('{' + ','.join('%s=%r' % (k,v)
| python | {
"resource": ""
} |
q263894 | SimpleGradientDescent._update_state | validation | def _update_state(self, vals):
"""
Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement.
"""
self._steps_complete += 1
if self._steps_complete == self.max_steps:
self._termination_info = (False, self._best_val, self._arg) | python | {
"resource": ""
} |
q263895 | analyze | validation | def analyze(fname=False,save=True,show=None):
"""given a filename or ABF object, try to analyze it."""
if fname and os.path.exists(fname.replace(".abf",".rst")):
print("SKIPPING DUE TO RST FILE")
return
swhlab.plotting.core.IMAGE_SAVE=save
if show is None:
if cm.isIpython():
swhlab.plotting.core.IMAGE_SHOW=True
else:
swhlab.plotting.core.IMAGE_SHOW=False
#swhlab.plotting.core.IMAGE_SHOW=show
abf=ABF(fname) # ensure it's a class
print(">>>>> PROTOCOL >>>>>",abf.protocomment)
runFunction="proto_unknown"
if "proto_"+abf.protocomment in globals():
| python | {
"resource": ""
} |
q263896 | frameAndSave | validation | def frameAndSave(abf,tag="",dataType="plot",saveAsFname=False,closeWhenDone=True):
"""
frame the current matplotlib plot with ABF info, and optionally save it.
Note that this is entirely independent of the ABFplot class object.
if saveImage is False, show it instead.
Datatype should be:
* plot
* experiment
"""
print("closeWhenDone",closeWhenDone)
plt.tight_layout()
plt.subplots_adjust(top=.93,bottom =.07)
plt.annotate(tag,(.01,.99),xycoords='figure fraction',ha='left',va='top',family='monospace',size=10,alpha=.5)
msgBot="%s [%s]"%(abf.ID,abf.protocomment)
plt.annotate(msgBot,(.01,.01),xycoords='figure fraction',ha='left',va='bottom',family='monospace',size=10,alpha=.5)
fname=tag.lower().replace(" ",'_')+".jpg"
| python | {
"resource": ""
} |
q263897 | ABFplot.figure | validation | def figure(self,forceNew=False):
"""make sure a figure is ready."""
if plt._pylab_helpers.Gcf.get_num_fig_managers()>0 and forceNew is False:
self.log.debug("figure already seen, not creating one.")
return
if self.subplot:
| python | {
"resource": ""
} |
q263898 | ABFplot.save | validation | def save(self,callit="misc",closeToo=True,fullpath=False):
"""save the existing figure. does not close it."""
if fullpath is False:
fname=self.abf.outPre+"plot_"+callit+".jpg"
else:
fname=callit
if not os.path.exists(os.path.dirname(fname)):
| python | {
"resource": ""
} |
q263899 | ABFplot.figure_sweeps | validation | def figure_sweeps(self, offsetX=0, offsetY=0):
"""plot every sweep of an ABF file."""
self.log.debug("creating overlayed sweeps plot")
self.figure()
for sweep in range(self.abf.sweeps):
self.abf.setsweep(sweep)
self.setColorBySweep()
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.