_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q263800
CIDR.set_ip
validation
def set_ip(self, ip): """Change the current IP.""" self.set(ip=ip, netmask=self._nm)
python
{ "resource": "" }
q263801
CIDR.set_netmask
validation
def set_netmask(self, netmask): """Change the current netmask.""" self.set(ip=self._ip, netmask=netmask)
python
{ "resource": "" }
q263802
CIDR.is_valid_ip
validation
def is_valid_ip(self, ip): """Return true if the given address in amongst the usable addresses, or if the given CIDR is contained in this one.""" if not isinstance(ip, (IPv4Address, CIDR)): if str(ip).find('/') == -1: ip = IPv4Address(ip) else: # Support for CIDR strings/objects, an idea of Nicola Novello. ip = CIDR(ip) if isinstance(ip, IPv4Address): if ip < self._first_ip or ip > self._last_ip: return False elif isinstance(ip, CIDR): # NOTE: manage /31 networks; 127.0.0.1/31 is considered to # be included in 127.0.0.1/8. if ip._nm._ip_dec == 0xFFFFFFFE \ and self._nm._ip_dec != 0xFFFFFFFE: compare_to_first = self._net_ip._ip_dec compare_to_last = self._bc_ip._ip_dec else: compare_to_first = self._first_ip._ip_dec compare_to_last = self._last_ip._ip_dec if ip._first_ip._ip_dec < compare_to_first or \ ip._last_ip._ip_dec > compare_to_last: return False return True
python
{ "resource": "" }
q263803
S3tools.upload_file
validation
async def upload_file(self, bucket, file, uploadpath=None, key=None, ContentType=None, **kw): """Upload a file to S3 possibly using the multi-part uploader Return the key uploaded """ is_filename = False if hasattr(file, 'read'): if hasattr(file, 'seek'): file.seek(0) file = file.read() size = len(file) elif key: size = len(file) else: is_filename = True size = os.stat(file).st_size key = os.path.basename(file) assert key, 'key not available' if not ContentType: ContentType, _ = mimetypes.guess_type(key) if uploadpath: if not uploadpath.endswith('/'): uploadpath = '%s/' % uploadpath key = '%s%s' % (uploadpath, key) params = dict(Bucket=bucket, Key=key) if not ContentType: ContentType = 'application/octet-stream' params['ContentType'] = ContentType if size > MULTI_PART_SIZE and is_filename: resp = await _multipart(self, file, params) elif is_filename: with open(file, 'rb') as fp: params['Body'] = fp.read() resp = await self.put_object(**params) else: params['Body'] = file resp = await self.put_object(**params) if 'Key' not in resp: resp['Key'] = key if 'Bucket' not in resp: resp['Bucket'] = bucket return resp
python
{ "resource": "" }
q263804
S3tools.copy_storage_object
validation
async def copy_storage_object(self, source_bucket, source_key, bucket, key): """Copy a file from one bucket into another """ info = await self.head_object(Bucket=source_bucket, Key=source_key) size = info['ContentLength'] if size > MULTI_PART_SIZE: result = await _multipart_copy(self, source_bucket, source_key, bucket, key, size) else: result = await self.copy_object( Bucket=bucket, Key=key, CopySource=_source_string(source_bucket, source_key) ) return result
python
{ "resource": "" }
q263805
S3tools.upload_folder
validation
def upload_folder(self, bucket, folder, key=None, skip=None, content_types=None): """Recursively upload a ``folder`` into a backet. :param bucket: bucket where to upload the folder to :param folder: the folder location in the local file system :param key: Optional key where the folder is uploaded :param skip: Optional list of files to skip :param content_types: Optional dictionary mapping suffixes to content types :return: a coroutine """ uploader = FolderUploader(self, bucket, folder, key, skip, content_types) return uploader.start()
python
{ "resource": "" }
q263806
FolderUploader._upload_file
validation
async def _upload_file(self, full_path): """Coroutine for uploading a single file """ rel_path = os.path.relpath(full_path, self.folder) key = s3_key(os.path.join(self.key, rel_path)) ct = self.content_types.get(key.split('.')[-1]) with open(full_path, 'rb') as fp: file = fp.read() try: await self.botocore.upload_file(self.bucket, file, key=key, ContentType=ct) except Exception as exc: LOGGER.error('Could not upload "%s": %s', key, exc) self.failures[key] = self.all.pop(full_path) return size = self.all.pop(full_path) self.success[key] = size self.total_size += size percentage = 100*(1 - len(self.all)/self.total_files) message = '{0:.0f}% completed - uploaded "{1}" - {2}'.format( percentage, key, convert_bytes(size)) LOGGER.info(message)
python
{ "resource": "" }
q263807
PusherChannel.trigger
validation
async def trigger(self, event, data=None, socket_id=None): '''Trigger an ``event`` on this channel ''' json_data = json.dumps(data, cls=self.pusher.encoder) query_string = self.signed_query(event, json_data, socket_id) signed_path = "%s?%s" % (self.path, query_string) pusher = self.pusher absolute_url = pusher.get_absolute_path(signed_path) response = await pusher.http.post( absolute_url, data=json_data, headers=[('Content-Type', 'application/json')]) response.raise_for_status() return response.status_code == 202
python
{ "resource": "" }
q263808
Pusher.connect
validation
async def connect(self): '''Connect to a Pusher websocket ''' if not self._consumer: waiter = self._waiter = asyncio.Future() try: address = self._websocket_host() self.logger.info('Connect to %s', address) self._consumer = await self.http.get(address) if self._consumer.status_code != 101: raise PusherError("Could not connect to websocket") except Exception as exc: waiter.set_exception(exc) raise else: await waiter return self._consumer
python
{ "resource": "" }
q263809
Pusher.on_message
validation
def on_message(self, websocket, message): '''Handle websocket incoming messages ''' waiter = self._waiter self._waiter = None encoded = json.loads(message) event = encoded.get('event') channel = encoded.get('channel') data = json.loads(encoded.get('data')) try: if event == PUSHER_ERROR: raise PusherError(data['message'], data['code']) elif event == PUSHER_CONNECTION: self.socket_id = data.get('socket_id') self.logger.info('Succesfully connected on socket %s', self.socket_id) waiter.set_result(self.socket_id) elif event == PUSHER_SUBSCRIBED: self.logger.info('Succesfully subscribed to %s', encoded.get('channel')) elif channel: self[channel]._event(event, data) except Exception as exc: if waiter: waiter.set_exception(exc) else: self.logger.exception('pusher error')
python
{ "resource": "" }
q263810
const_equal
validation
def const_equal(str_a, str_b): '''Constant time string comparison''' if len(str_a) != len(str_b): return False result = True for i in range(len(str_a)): result &= (str_a[i] == str_b[i]) return result
python
{ "resource": "" }
q263811
decode_html_entities
validation
def decode_html_entities(html): """ Decodes a limited set of HTML entities. """ if not html: return html for entity, char in six.iteritems(html_entity_map): html = html.replace(entity, char) return html
python
{ "resource": "" }
q263812
EncryptedPickle.set_signature_passphrases
validation
def set_signature_passphrases(self, signature_passphrases): '''Set signature passphrases''' self.signature_passphrases = self._update_dict(signature_passphrases, {}, replace_data=True)
python
{ "resource": "" }
q263813
EncryptedPickle.set_encryption_passphrases
validation
def set_encryption_passphrases(self, encryption_passphrases): '''Set encryption passphrases''' self.encryption_passphrases = self._update_dict(encryption_passphrases, {}, replace_data=True)
python
{ "resource": "" }
q263814
EncryptedPickle.set_algorithms
validation
def set_algorithms(self, signature=None, encryption=None, serialization=None, compression=None): '''Set algorithms used for sealing. Defaults can not be overridden.''' self.signature_algorithms = \ self._update_dict(signature, self.DEFAULT_SIGNATURE) self.encryption_algorithms = \ self._update_dict(encryption, self.DEFAULT_ENCRYPTION) self.serialization_algorithms = \ self._update_dict(serialization, self.DEFAULT_SERIALIZATION) self.compression_algorithms = \ self._update_dict(compression, self.DEFAULT_COMPRESSION)
python
{ "resource": "" }
q263815
EncryptedPickle.get_algorithms
validation
def get_algorithms(self): '''Get algorithms used for sealing''' return { 'signature': self.signature_algorithms, 'encryption': self.encryption_algorithms, 'serialization': self.serialization_algorithms, 'compression': self.compression_algorithms, }
python
{ "resource": "" }
q263816
EncryptedPickle._set_options
validation
def _set_options(self, options): '''Private function for setting options used for sealing''' if not options: return self.options.copy() options = options.copy() if 'magic' in options: self.set_magic(options['magic']) del(options['magic']) if 'flags' in options: flags = options['flags'] del(options['flags']) for key, value in flags.iteritems(): if not isinstance(value, bool): raise TypeError('Invalid flag type for: %s' % key) else: flags = self.options['flags'] if 'info' in options: del(options['info']) for key, value in options.iteritems(): if not isinstance(value, int): raise TypeError('Invalid option type for: %s' % key) if value < 0 or value > 255: raise ValueError('Option value out of range for: %s' % key) new_options = self.options.copy() new_options.update(options) new_options['flags'].update(flags) return new_options
python
{ "resource": "" }
q263817
EncryptedPickle.verify_signature
validation
def verify_signature(self, data): '''Verify sealed data signature''' data = self._remove_magic(data) data = urlsafe_nopadding_b64decode(data) options = self._read_header(data) data = self._add_magic(data) self._unsign_data(data, options)
python
{ "resource": "" }
q263818
EncryptedPickle._encode
validation
def _encode(self, data, algorithm, key=None): '''Encode data with specific algorithm''' if algorithm['type'] == 'hmac': return data + self._hmac_generate(data, algorithm, key) elif algorithm['type'] == 'aes': return self._aes_encrypt(data, algorithm, key) elif algorithm['type'] == 'no-serialization': return data elif algorithm['type'] == 'json': return json.dumps(data) elif algorithm['type'] == 'no-compression': return data elif algorithm['type'] == 'gzip': return self._zlib_compress(data, algorithm) else: raise Exception('Algorithm not supported: %s' % algorithm['type'])
python
{ "resource": "" }
q263819
EncryptedPickle._decode
validation
def _decode(self, data, algorithm, key=None): '''Decode data with specific algorithm''' if algorithm['type'] == 'hmac': verify_signature = data[-algorithm['hash_size']:] data = data[:-algorithm['hash_size']] signature = self._hmac_generate(data, algorithm, key) if not const_equal(verify_signature, signature): raise Exception('Invalid signature') return data elif algorithm['type'] == 'aes': return self._aes_decrypt(data, algorithm, key) elif algorithm['type'] == 'no-serialization': return data elif algorithm['type'] == 'json': return json.loads(data) elif algorithm['type'] == 'no-compression': return data elif algorithm['type'] == 'gzip': return self._zlib_decompress(data, algorithm) else: raise Exception('Algorithm not supported: %s' % algorithm['type'])
python
{ "resource": "" }
q263820
EncryptedPickle._sign_data
validation
def _sign_data(self, data, options): '''Add signature to data''' if options['signature_algorithm_id'] not in self.signature_algorithms: raise Exception('Unknown signature algorithm id: %d' % options['signature_algorithm_id']) signature_algorithm = \ self.signature_algorithms[options['signature_algorithm_id']] algorithm = self._get_algorithm_info(signature_algorithm) key_salt = get_random_bytes(algorithm['salt_size']) key = self._generate_key(options['signature_passphrase_id'], self.signature_passphrases, key_salt, algorithm) data = self._encode(data, algorithm, key) return data + key_salt
python
{ "resource": "" }
q263821
EncryptedPickle._unsign_data
validation
def _unsign_data(self, data, options): '''Verify and remove signature''' if options['signature_algorithm_id'] not in self.signature_algorithms: raise Exception('Unknown signature algorithm id: %d' % options['signature_algorithm_id']) signature_algorithm = \ self.signature_algorithms[options['signature_algorithm_id']] algorithm = self._get_algorithm_info(signature_algorithm) key_salt = '' if algorithm['salt_size']: key_salt = data[-algorithm['salt_size']:] data = data[:-algorithm['salt_size']] key = self._generate_key(options['signature_passphrase_id'], self.signature_passphrases, key_salt, algorithm) data = self._decode(data, algorithm, key) return data
python
{ "resource": "" }
q263822
EncryptedPickle._remove_magic
validation
def _remove_magic(self, data): '''Verify and remove magic''' if not self.magic: return data magic_size = len(self.magic) magic = data[:magic_size] if magic != self.magic: raise Exception('Invalid magic') data = data[magic_size:] return data
python
{ "resource": "" }
q263823
EncryptedPickle._add_header
validation
def _add_header(self, data, options): '''Add header to data''' # pylint: disable=W0142 version_info = self._get_version_info(options['version']) flags = options['flags'] header_flags = dict( (i, str(int(j))) for i, j in options['flags'].iteritems()) header_flags = ''.join(version_info['flags'](**header_flags)) header_flags = int(header_flags, 2) options['flags'] = header_flags header = version_info['header'] header = header(**options) header = pack(version_info['header_format'], *header) if 'timestamp' in flags and flags['timestamp']: timestamp = long(time()) timestamp = pack(version_info['timestamp_format'], timestamp) header = header + timestamp return header + data
python
{ "resource": "" }
q263824
EncryptedPickle._read_header
validation
def _read_header(self, data): '''Read header from data''' # pylint: disable=W0212 version = self._read_version(data) version_info = self._get_version_info(version) header_data = data[:version_info['header_size']] header = version_info['header'] header = header._make( unpack(version_info['header_format'], header_data)) header = dict(header._asdict()) flags = list("{0:0>8b}".format(header['flags'])) flags = dict(version_info['flags']._make(flags)._asdict()) flags = dict((i, bool(int(j))) for i, j in flags.iteritems()) header['flags'] = flags timestamp = None if flags['timestamp']: ts_start = version_info['header_size'] ts_end = ts_start + version_info['timestamp_size'] timestamp_data = data[ts_start:ts_end] timestamp = unpack( version_info['timestamp_format'], timestamp_data)[0] header['info'] = {'timestamp': timestamp} return header
python
{ "resource": "" }
q263825
EncryptedPickle._remove_header
validation
def _remove_header(self, data, options): '''Remove header from data''' version_info = self._get_version_info(options['version']) header_size = version_info['header_size'] if options['flags']['timestamp']: header_size += version_info['timestamp_size'] data = data[header_size:] return data
python
{ "resource": "" }
q263826
EncryptedPickle._read_version
validation
def _read_version(self, data): '''Read header version from data''' version = ord(data[0]) if version not in self.VERSIONS: raise Exception('Version not defined: %d' % version) return version
python
{ "resource": "" }
q263827
EncryptedPickle._get_algorithm_info
validation
def _get_algorithm_info(self, algorithm_info): '''Get algorithm info''' if algorithm_info['algorithm'] not in self.ALGORITHMS: raise Exception('Algorithm not supported: %s' % algorithm_info['algorithm']) algorithm = self.ALGORITHMS[algorithm_info['algorithm']] algorithm_info.update(algorithm) return algorithm_info
python
{ "resource": "" }
q263828
EncryptedPickle._generate_key
validation
def _generate_key(pass_id, passphrases, salt, algorithm): '''Generate and return PBKDF2 key''' if pass_id not in passphrases: raise Exception('Passphrase not defined for id: %d' % pass_id) passphrase = passphrases[pass_id] if len(passphrase) < 32: raise Exception('Passphrase less than 32 characters long') digestmod = EncryptedPickle._get_hashlib(algorithm['pbkdf2_algorithm']) encoder = PBKDF2(passphrase, salt, iterations=algorithm['pbkdf2_iterations'], digestmodule=digestmod) return encoder.read(algorithm['key_size'])
python
{ "resource": "" }
q263829
EncryptedPickle._update_dict
validation
def _update_dict(data, default_data, replace_data=False): '''Update algorithm definition type dictionaries''' if not data: data = default_data.copy() return data if not isinstance(data, dict): raise TypeError('Value not dict type') if len(data) > 255: raise ValueError('More than 255 values defined') for i in data.keys(): if not isinstance(i, int): raise TypeError('Index not int type') if i < 0 or i > 255: raise ValueError('Index value out of range') if not replace_data: data.update(default_data) return data
python
{ "resource": "" }
q263830
RemoteZip.getTableOfContents
validation
def getTableOfContents(self): """ This function populates the internal tableOfContents list with the contents of the zip file TOC. If the server does not support ranged requests, this will raise and exception. It will also throw an exception if the TOC cannot be found. """ self.directory_size = self.getDirectorySize() if self.directory_size > 65536: self.directory_size += 2 self.requestContentDirectory() # and find the offset from start of file where it can be found directory_start = unpack("i", self.raw_bytes[self.directory_end + 16: self.directory_end + 20])[0] # find the data in the raw_bytes self.raw_bytes = self.raw_bytes current_start = directory_start - self.start filestart = 0 compressedsize = 0 tableOfContents = [] try: while True: # get file name size (n), extra len (m) and comm len (k) zip_n = unpack("H", self.raw_bytes[current_start + 28: current_start + 28 + 2])[0] zip_m = unpack("H", self.raw_bytes[current_start + 30: current_start + 30 + 2])[0] zip_k = unpack("H", self.raw_bytes[current_start + 32: current_start + 32 + 2])[0] filename = self.raw_bytes[current_start + 46: current_start + 46 + zip_n] # check if this is the index file filestart = unpack("I", self.raw_bytes[current_start + 42: current_start + 42 + 4])[0] compressedsize = unpack("I", self.raw_bytes[current_start + 20: current_start + 20 + 4])[0] uncompressedsize = unpack("I", self.raw_bytes[current_start + 24: current_start + 24 + 4])[0] tableItem = { 'filename': filename, 'compressedsize': compressedsize, 'uncompressedsize': uncompressedsize, 'filestart': filestart } tableOfContents.append(tableItem) # not this file, move along current_start = current_start + 46 + zip_n + zip_m + zip_k except: pass self.tableOfContents = tableOfContents return tableOfContents
python
{ "resource": "" }
q263831
RemoteZip.extractFile
validation
def extractFile(self, filename): """ This function will extract a single file from the remote zip without downloading the entire zip file. The filename argument should match whatever is in the 'filename' key of the tableOfContents. """ files = [x for x in self.tableOfContents if x['filename'] == filename] if len(files) == 0: raise FileNotFoundException() fileRecord = files[0] # got here? need to fetch the file size metaheadroom = 1024 # should be enough request = urllib2.Request(self.zipURI) start = fileRecord['filestart'] end = fileRecord['filestart'] + fileRecord['compressedsize'] + metaheadroom request.headers['Range'] = "bytes=%s-%s" % (start, end) handle = urllib2.urlopen(request) # make sure the response is ranged return_range = handle.headers.get('Content-Range') if return_range != "bytes %d-%d/%s" % (start, end, self.filesize): raise Exception("Ranged requests are not supported for this URI") filedata = handle.read() # find start of raw file data zip_n = unpack("H", filedata[26:28])[0] zip_m = unpack("H", filedata[28:30])[0] # check compressed size has_data_descriptor = bool(unpack("H", filedata[6:8])[0] & 8) comp_size = unpack("I", filedata[18:22])[0] if comp_size == 0 and has_data_descriptor: # assume compressed size in the Central Directory is correct comp_size = fileRecord['compressedsize'] elif comp_size != fileRecord['compressedsize']: raise Exception("Something went wrong. Directory and file header disagree of compressed file size") raw_zip_data = filedata[30 + zip_n + zip_m: 30 + zip_n + zip_m + comp_size] uncompressed_data = "" # can't decompress if stored without compression compression_method = unpack("H", filedata[8:10])[0] if compression_method == 0: return raw_zip_data dec = zlib.decompressobj(-zlib.MAX_WBITS) for chunk in raw_zip_data: rv = dec.decompress(chunk) if rv: uncompressed_data = uncompressed_data + rv return uncompressed_data
python
{ "resource": "" }
q263832
star.do_photometry
validation
def do_photometry(self): """ Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data in each orientation. This function is called by other functions and generally the user will not need to interact with it directly. """ std_f = np.zeros(4) data_save = np.zeros_like(self.postcard) self.obs_flux = np.zeros_like(self.reference_flux) for i in range(4): g = np.where(self.qs == i)[0] wh = np.where(self.times[g] > 54947) data_save[g] = np.roll(self.postcard[g], int(self.roll_best[i,0]), axis=1) data_save[g] = np.roll(data_save[g], int(self.roll_best[i,1]), axis=2) self.target_flux_pixels = data_save[:,self.targets == 1] self.target_flux = np.sum(self.target_flux_pixels, axis=1) self.obs_flux[g] = self.target_flux[g] / self.reference_flux[g] self.obs_flux[g] /= np.median(self.obs_flux[g[wh]]) fitline = np.polyfit(self.times[g][wh], self.obs_flux[g][wh], 1) std_f[i] = np.max([np.std(self.obs_flux[g][wh]/(fitline[0]*self.times[g][wh]+fitline[1])), 0.001]) self.flux_uncert = std_f
python
{ "resource": "" }
q263833
star.generate_panel
validation
def generate_panel(self, img): """ Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions and generally not called by the user directly. Args: img: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard`` """ plt.figure(figsize=(14,6)) ax = plt.gca() fig = plt.gcf() plt.subplot(122) data_save = np.zeros_like(self.postcard) self.roll_best = np.zeros((4,2)) for i in range(4): g = np.where(self.qs == i)[0] wh = np.where(self.times[g] > 54947) self.roll_best[i] = self.do_rolltest(g, wh) self.do_photometry() for i in range(4): g = np.where(self.qs == i)[0] plt.errorbar(self.times[g], self.obs_flux[g], yerr=self.flux_uncert[i], fmt=fmt[i]) plt.xlabel('Time', fontsize=20) plt.ylabel('Relative Flux', fontsize=20) plt.subplot(121) implot = plt.imshow(img, interpolation='nearest', cmap='gray', vmin=98000*52, vmax=104000*52) cid = fig.canvas.mpl_connect('button_press_event', self.onclick) plt.show(block=True)
python
{ "resource": "" }
q263834
star.calc_centroids
validation
def calc_centroids(self): """ Identify the centroid positions for the target star at all epochs. Useful for verifying that there is no correlation between flux and position, as might be expected for high proper motion stars. """ self.cm = np.zeros((len(self.postcard), 2)) for i in range(len(self.postcard)): target = self.postcard[i] target[self.targets != 1] = 0.0 self.cm[i] = center_of_mass(target)
python
{ "resource": "" }
q263835
star.define_spotsignal
validation
def define_spotsignal(self): """ Identify the "expected" flux value at the time of each observation based on the Kepler long-cadence data, to ensure variations observed are not the effects of a single large starspot. Only works if the target star was targeted for long or short cadence observations during the primary mission. """ client = kplr.API() star = client.star(self.kic) lcs = star.get_light_curves(short_cadence=False) time, flux, ferr, qual = [], [], [], [] for lc in lcs: with lc.open() as f: hdu_data = f[1].data time.append(hdu_data["time"]) flux.append(hdu_data["pdcsap_flux"]) ferr.append(hdu_data["pdcsap_flux_err"]) qual.append(hdu_data["sap_quality"]) tout = np.array([]) fout = np.array([]) eout = np.array([]) for i in range(len(flux)): t = time[i][qual[i] == 0] f = flux[i][qual[i] == 0] e = ferr[i][qual[i] == 0] t = t[np.isfinite(f)] e = e[np.isfinite(f)] f = f[np.isfinite(f)] e /= np.median(f) f /= np.median(f) tout = np.append(tout, t[50:]+54833) fout = np.append(fout, f[50:]) eout = np.append(eout, e[50:]) self.spot_signal = np.zeros(52) for i in range(len(self.times)): if self.times[i] < 55000: self.spot_signal[i] = 1.0 else: self.spot_signal[i] = fout[np.abs(self.times[i] - tout) == np.min(np.abs(self.times[i] - tout))]
python
{ "resource": "" }
q263836
star.model_uncert
validation
def model_uncert(self): """ Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper. Based on the kepcal package of Dan Foreman-Mackey. """ Y = self.photometry_array.T Y /= np.median(Y, axis=1)[:, None] C = np.median(Y, axis=0) nstars, nobs = np.shape(Y) Z = np.empty((nstars, 4)) qs = self.qs.astype(int) for s in range(4): Z[:, s] = np.median((Y / C)[:, qs == s], axis=1) resid2 = (Y - Z[:, qs] * C)**2 z = Z[:, qs] trend = z * C[None, :] lnS = np.log(np.nanmedian(resid2, axis=0)) jitter = np.log(0.1*np.nanmedian(np.abs(np.diff(Y, axis=1)))) cal_ferr = np.sqrt(np.exp(2*(jitter/trend))+z**2*np.exp(lnS)[None, :]) self.modeled_uncert = cal_ferr self.target_uncert = cal_ferr[0]
python
{ "resource": "" }
q263837
Pbd._dump_field
validation
def _dump_field(self, fd): """Dump single field. """ v = {} v['label'] = Pbd.LABELS[fd.label] v['type'] = fd.type_name if len(fd.type_name) > 0 else Pbd.TYPES[fd.type] v['name'] = fd.name v['number'] = fd.number v['default'] = '[default = {}]'.format(fd.default_value) if len(fd.default_value) > 0 else '' f = '{label} {type} {name} = {number} {default};'.format(**v) f = ' '.join(f.split()) self._print(f) if len(fd.type_name) > 0: self.uses.append(fd.type_name)
python
{ "resource": "" }
q263838
Pbd.disassemble
validation
def disassemble(self): """Disassemble serialized protocol buffers file. """ ser_pb = open(self.input_file, 'rb').read() # Read serialized pb file fd = FileDescriptorProto() fd.ParseFromString(ser_pb) self.name = fd.name self._print('// Reversed by pbd (https://github.com/rsc-dev/pbd)') self._print('syntax = "proto2";') self._print('') if len(fd.package) > 0: self._print('package {};'.format(fd.package)) self.package = fd.package else: self._print('// Package not defined') self._walk(fd)
python
{ "resource": "" }
q263839
Pbd.find_imports
validation
def find_imports(self, pbds): """Find all missing imports in list of Pbd instances. """ # List of types used, but not defined imports = list(set(self.uses).difference(set(self.defines))) # Clumpsy, but enought for now for imp in imports: for p in pbds: if imp in p.defines: self.imports.append(p.name) break self.imports = list(set(self.imports)) for import_file in self.imports: self.lines.insert(2, 'import "{}";'.format(import_file))
python
{ "resource": "" }
q263840
fasta_dict_to_file
validation
def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None): """Write fasta_dict to fasta_file :param fasta_dict: returned by fasta_file_to_dict :param fasta_file: output file can be a string path or a file object :param line_char_limit: None = no limit (default) :return: None """ fasta_fp = fasta_file if isinstance(fasta_file, str): fasta_fp = open(fasta_file, 'wb') for key in fasta_dict: seq = fasta_dict[key]['seq'] if line_char_limit: seq = '\n'.join([seq[i:i+line_char_limit] for i in range(0, len(seq), line_char_limit)]) fasta_fp.write(u'{0:s}\n{1:s}\n'.format(fasta_dict[key]['header'], seq))
python
{ "resource": "" }
q263841
Gff3.add_line_error
validation
def add_line_error(self, line_data, error_info, log_level=logging.ERROR): """Helper function to record and log an error message :param line_data: dict :param error_info: dict :param logger: :param log_level: int :return: """ if not error_info: return try: line_data['line_errors'].append(error_info) except KeyError: line_data['line_errors'] = [error_info] except TypeError: # no line_data pass try: self.logger.log(log_level, Gff3.error_format.format(current_line_num=line_data['line_index'] + 1, error_type=error_info['error_type'], message=error_info['message'], line=line_data['line_raw'].rstrip())) except AttributeError: # no logger pass
python
{ "resource": "" }
q263842
Gff3.check_parent_boundary
validation
def check_parent_boundary(self): """ checks whether child features are within the coordinate boundaries of parent features :return: """ for line in self.lines: for parent_feature in line['parents']: ok = False for parent_line in parent_feature: if parent_line['start'] <= line['start'] and line['end'] <= parent_line['end']: ok = True break if not ok: self.add_line_error(line, {'message': 'This feature is not contained within the feature boundaries of parent: {0:s}: {1:s}'.format( parent_feature[0]['attributes']['ID'], ','.join(['({0:s}, {1:d}, {2:d})'.format(line['seqid'], line['start'], line['end']) for line in parent_feature]) ), 'error_type': 'BOUNDS', 'location': 'parent_boundary'})
python
{ "resource": "" }
q263843
Gff3.check_phase
validation
def check_phase(self): """ 1. get a list of CDS with the same parent 2. sort according to strand 3. calculate and validate phase """ plus_minus = set(['+', '-']) for k, g in groupby(sorted([line for line in self.lines if line['line_type'] == 'feature' and line['type'] == 'CDS' and 'Parent' in line['attributes']], key=lambda x: x['attributes']['Parent']), key=lambda x: x['attributes']['Parent']): cds_list = list(g) strand_set = list(set([line['strand'] for line in cds_list])) if len(strand_set) != 1: for line in cds_list: self.add_line_error(line, {'message': 'Inconsistent CDS strand with parent: {0:s}'.format(k), 'error_type': 'STRAND'}) continue if len(cds_list) == 1: if cds_list[0]['phase'] != 0: self.add_line_error(cds_list[0], {'message': 'Wrong phase {0:d}, should be {1:d}'.format(cds_list[0]['phase'], 0), 'error_type': 'PHASE'}) continue strand = strand_set[0] if strand not in plus_minus: # don't process unknown strands continue if strand == '-': # sort end descending sorted_cds_list = sorted(cds_list, key=lambda x: x['end'], reverse=True) else: sorted_cds_list = sorted(cds_list, key=lambda x: x['start']) phase = 0 for line in sorted_cds_list: if line['phase'] != phase: self.add_line_error(line, {'message': 'Wrong phase {0:d}, should be {1:d}'.format(line['phase'], phase), 'error_type': 'PHASE'}) phase = (3 - ((line['end'] - line['start'] + 1 - phase) % 3)) % 3
python
{ "resource": "" }
q263844
Gff3.adopt
validation
def adopt(self, old_parent, new_parent): """ Transfer children from old_parent to new_parent :param old_parent: feature_id(str) or line_index(int) or line_data(dict) or feature :param new_parent: feature_id(str) or line_index(int) or line_data(dict) :return: List of children transferred """ try: # assume line_data(dict) old_id = old_parent['attributes']['ID'] except TypeError: try: # assume line_index(int) old_id = self.lines[old_parent]['attributes']['ID'] except TypeError: # assume feature_id(str) old_id = old_parent old_feature = self.features[old_id] old_indexes = [ld['line_index'] for ld in old_feature] try: # assume line_data(dict) new_id = new_parent['attributes']['ID'] except TypeError: try: # assume line_index(int) new_id = self.lines[new_parent]['attributes']['ID'] except TypeError: # assume feature_id(str) new_id = new_parent new_feature = self.features[new_id] new_indexes = [ld['line_index'] for ld in new_feature] # build a list of children to be moved # add the child to the new parent's children list if its not already there # update the child's parent list and parent attribute # finally remove the old parent's children list children = old_feature[0]['children'] new_parent_children_set = set([ld['line_index'] for ld in new_feature[0]['children']]) for child in children: if child['line_index'] not in new_parent_children_set: new_parent_children_set.add(child['line_index']) for new_ld in new_feature: new_ld['children'].append(child) child['parents'].append(new_feature) child['attributes']['Parent'].append(new_id) # remove multiple, list.remove() only removes 1 child['parents'] = [f for f in child['parents'] if f[0]['attributes']['ID'] != old_id] child['attributes']['Parent'] = [d for d in child['attributes']['Parent'] if d != old_id] for old_ld in old_feature: old_ld['children'] = [] return children
python
{ "resource": "" }
q263845
Gff3.remove
validation
def remove(self, line_data, root_type=None): """ Marks line_data and all of its associated feature's 'line_status' as 'removed', does not actually remove the line_data from the data structure. The write function checks the 'line_status' when writing the gff file. Find the root parent of line_data of type root_type, remove all of its descendants. If the root parent has a parent with no children after the remove, remove the root parent's parent recursively. :param line_data: :param root_type: :return: """ roots = [ld for ld in self.ancestors(line_data) if (root_type and ld['line_type'] == root_type) or (not root_type and not ld['parents'])] or [line_data] for root in roots: root['line_status'] = 'removed' root_descendants = self.descendants(root) for root_descendant in root_descendants: root_descendant['line_status'] = 'removed' root_ancestors = self.ancestors(root) # BFS, so we will process closer ancestors first for root_ancestor in root_ancestors: if len([ld for ld in root_ancestor['children'] if ld['line_status'] != 'removed']) == 0: # if all children of a root_ancestor is removed # remove this root_ancestor root_ancestor['line_status'] = 'removed'
python
{ "resource": "" }
q263846
abfIDfromFname
validation
def abfIDfromFname(fname): """given a filename, return the ABFs ID string.""" fname=os.path.abspath(fname) basename=os.path.basename(fname) return os.path.splitext(basename)[0]
python
{ "resource": "" }
q263847
abfProtocol
validation
def abfProtocol(fname): """Determine the protocol used to record an ABF file""" f=open(fname,'rb') raw=f.read(30*1000) #it should be in the first 30k of the file f.close() raw=raw.decode("utf-8","ignore") raw=raw.split("Clampex")[1].split(".pro")[0] protocol = os.path.basename(raw) # the whole protocol filename protocolID = protocol.split(" ")[0] # just the first number return protocolID
python
{ "resource": "" }
q263848
headerHTML
validation
def headerHTML(header,fname): """given the bytestring ABF header, make and launch HTML.""" html="<html><body><code>" html+="<h2>%s</h2>"%(fname) html+=pprint.pformat(header, indent=1) html=html.replace("\n",'<br>').replace(" ","&nbsp;") html=html.replace(r"\x00","") html+="</code></body></html>" print("saving header file:",fname) f=open(fname,'w') f.write(html) f.close() webbrowser.open(fname)
python
{ "resource": "" }
q263849
ABF.setsweeps
validation
def setsweeps(self): """iterate over every sweep""" for sweep in range(self.sweeps): self.setsweep(sweep) yield self.sweep
python
{ "resource": "" }
q263850
ABF.comments_load
validation
def comments_load(self): """read the header and populate self with information about comments""" self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[] self.comments=0 # will be >0 if comments exist self.comment_text="" try: # this used to work self.comment_tags = list(self.ABFblock.segments[0].eventarrays[0].annotations['comments']) self.comment_times = list(self.ABFblock.segments[0].eventarrays[0].times/self.trace.itemsize) self.comment_sweeps = list(self.comment_times) except: # now this notation seems to work for events in self.ABFblock.segments[0].events: # this should only happen once actually self.comment_tags = events.annotations['comments'].tolist() self.comment_times = np.array(events.times.magnitude/self.trace.itemsize) self.comment_sweeps = self.comment_times/self.sweepInterval for i,c in enumerate(self.comment_tags): self.comment_tags[i]=c.decode("utf-8")
python
{ "resource": "" }
q263851
ABF.get_protocol_sequence
validation
def get_protocol_sequence(self,sweep): """ given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. """ self.setsweep(sweep) return list(self.protoSeqX),list(self.protoSeqY)
python
{ "resource": "" }
q263852
ABF.average
validation
def average(self,t1=0,t2=None,setsweep=False): """return the average of part of the current sweep.""" if setsweep: self.setsweep(setsweep) if t2 is None or t2>self.sweepLength: t2=self.sweepLength self.log.debug("resetting t2 to [%f]",t2) t1=max(t1,0) if t1>t2: self.log.error("t1 cannot be larger than t2") return False I1,I2=int(t1*self.pointsPerSec),int(t2*self.pointsPerSec) if I1==I2: return np.nan return np.average(self.sweepY[I1:I2])
python
{ "resource": "" }
q263853
ABF.averageSweep
validation
def averageSweep(self,sweepFirst=0,sweepLast=None): """ Return a sweep which is the average of multiple sweeps. For now, standard deviation is lost. """ if sweepLast is None: sweepLast=self.sweeps-1 nSweeps=sweepLast-sweepFirst+1 runningSum=np.zeros(len(self.sweepY)) self.log.debug("averaging sweep %d to %d",sweepFirst,sweepLast) for sweep in np.arange(nSweeps)+sweepFirst: self.setsweep(sweep) runningSum+=self.sweepY.flatten() average=runningSum/nSweeps #TODO: standard deviation? return average
python
{ "resource": "" }
q263854
ABF.kernel_gaussian
validation
def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False): """create kernel based on this ABF info.""" sigmaMS=sizeMS/10 if sigmaMS is None else sigmaMS size,sigma=sizeMS*self.pointsPerMs,sigmaMS*self.pointsPerMs self.kernel=swhlab.common.kernel_gaussian(size,sigma,forwardOnly) return self.kernel
python
{ "resource": "" }
q263855
ABF.sweepYfiltered
validation
def sweepYfiltered(self): """ Get the filtered sweepY of the current sweep. Only works if self.kernel has been generated. """ assert self.kernel is not None return swhlab.common.convolve(self.sweepY,self.kernel)
python
{ "resource": "" }
q263856
dictFlat
validation
def dictFlat(l): """Given a list of list of dicts, return just the dicts.""" if type(l) is dict: return [l] if "numpy" in str(type(l)): return l dicts=[] for item in l: if type(item)==dict: dicts.append(item) elif type(item)==list: for item2 in item: dicts.append(item2) return dicts
python
{ "resource": "" }
q263857
matrixValues
validation
def matrixValues(matrix,key): """given a key, return a list of values from the matrix with that key.""" assert key in matrix.dtype.names col=matrix.dtype.names.index(key) values=np.empty(len(matrix))*np.nan for i in range(len(matrix)): values[i]=matrix[i][col] return values
python
{ "resource": "" }
q263858
matrixToDicts
validation
def matrixToDicts(data): """given a recarray, return it as a list of dicts.""" # 1D array if "float" in str(type(data[0])): d={} for x in range(len(data)): d[data.dtype.names[x]]=data[x] return d # 2D array l=[] for y in range(len(data)): d={} for x in range(len(data[y])): d[data.dtype.names[x]]=data[y][x] l.append(d) return l
python
{ "resource": "" }
q263859
html_temp_launch
validation
def html_temp_launch(html): """given text, make it a temporary HTML file and launch it.""" fname = tempfile.gettempdir()+"/swhlab/temp.html" with open(fname,'w') as f: f.write(html) webbrowser.open(fname)
python
{ "resource": "" }
q263860
checkOut
validation
def checkOut(thing,html=True): """show everything we can about an object's projects and methods.""" msg="" for name in sorted(dir(thing)): if not "__" in name: msg+="<b>%s</b>\n"%name try: msg+=" ^-VALUE: %s\n"%getattr(thing,name)() except: pass if html: html='<html><body><code>'+msg+'</code></body></html>' html=html.replace(" ","&nbsp;").replace("\n","<br>") fname = tempfile.gettempdir()+"/swhlab/checkout.html" with open(fname,'w') as f: f.write(html) webbrowser.open(fname) print(msg.replace('<b>','').replace('</b>',''))
python
{ "resource": "" }
q263861
matrixToHTML
validation
def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None): """Put 2d numpy data into a temporary HTML file.""" if not names: names=[""]*len(data[0]) if data.dtype.names: names=list(data.dtype.names) if not units: units=[""]*len(data[0]) for i in range(len(units)): if names[i] in UNITS.keys(): units[i]=UNITS[names[i]] if 'recarray' in str(type(data)): #make it a regular array data=data.view(float).reshape(data.shape + (-1,)) if xCol and xCol in names: xCol=names.index(xCol) names.insert(0,names[xCol]) units.insert(0,units[xCol]) data=np.insert(data,0,data[:,xCol],1) htmlFname = tempfile.gettempdir()+"/swhlab/WKS-%s.%s.html"%(bookName,sheetName) html="""<body> <style> body { background-color: #ababab; padding:20px; } table { font-size:12px; border-spacing: 0; border-collapse: collapse; //border:2px solid #000000; } .name {background-color:#fafac8;text-align:center;} .units {background-color:#fafac8;text-align:center;} .data0 {background-color:#FFFFFF;font-family: monospace;text-align:center;} .data1 {background-color:#FAFAFA;font-family: monospace;text-align:center;} .labelRow {background-color:#e0dfe4; text-align:right;border:1px solid #000000;} .labelCol {background-color:#e0dfe4; text-align:center;border:1px solid #000000;} td { border:1px solid #c0c0c0; padding:5px; //font-family: Verdana, Geneva, sans-serif; font-family: Arial, Helvetica, sans-serif } </style> <html>""" html+="<h1>FauxRigin</h1>" if bookName or sheetName: html+='<code><b>%s / %s</b></code><br><br>'%(bookName,sheetName) html+="<table>" #cols=list(range(len(names))) colNames=[''] for i in range(len(units)): label="%s (%d)"%(chr(i+ord('A')),i) colNames.append(label) html+=htmlListToTR(colNames,'labelCol','labelCol') html+=htmlListToTR(['Long Name']+list(names),'name',td1Class='labelRow') html+=htmlListToTR(['Units']+list(units),'units',td1Class='labelRow') cutOff=False for y in range(len(data)): html+=htmlListToTR([y+1]+list(data[y]),trClass='data%d'%(y%2),td1Class='labelRow') if y>=200: cutOff=True break html+="</table>" html=html.replace(">nan<",">--<") html=html.replace(">None<","><") if cutOff: html+="<h3>... showing only %d of %d rows ...</h3>"%(y,len(data)) html+="</body></html>" with open(htmlFname,'w') as f: f.write(html) webbrowser.open(htmlFname) return
python
{ "resource": "" }
q263862
XMLtoPython
validation
def XMLtoPython(xmlStr=r"C:\Apps\pythonModules\GSTemp.xml"): """ given a string or a path to an XML file, return an XML object. """ #TODO: this absolute file path crazy stuff needs to stop! if os.path.exists(xmlStr): with open(xmlStr) as f: xmlStr=f.read() print(xmlStr) print("DONE") return
python
{ "resource": "" }
q263863
algo_exp
validation
def algo_exp(x, m, t, b): """mono-exponential curve.""" return m*np.exp(-t*x)+b
python
{ "resource": "" }
q263864
where_cross
validation
def where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
python
{ "resource": "" }
q263865
originFormat
validation
def originFormat(thing): """Try to format anything as a 2D matrix with column names.""" if type(thing) is list and type(thing[0]) is dict: return originFormat_listOfDicts(thing) if type(thing) is list and type(thing[0]) is list: return originFormat_listOfDicts(dictFlat(thing)) else: print(" !! I don't know how to format this object!") print(thing)
python
{ "resource": "" }
q263866
pickle_save
validation
def pickle_save(thing,fname): """save something to a pickle file""" pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL) return thing
python
{ "resource": "" }
q263867
msgDict
validation
def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None): """convert a dictionary to a pretty formatted string.""" msg="" if "record" in str(type(d)): keys=d.dtype.names else: keys=d.keys() if sort: keys=sorted(keys) for key in keys: if key[0]=="_": continue if matching: if not key in matching: continue if cantEndWith and key[-len(cantEndWith)]==cantEndWith: continue if 'float' in str(type(d[key])): s="%.02f"%d[key] else: s=str(d[key]) if "object" in s: s='<object>' msg+=key+sep1+s+sep2 return msg.strip()
python
{ "resource": "" }
q263868
determineProtocol
validation
def determineProtocol(fname): """determine the comment cooked in the protocol.""" f=open(fname,'rb') raw=f.read(5000) #it should be in the first 5k of the file f.close() protoComment="unknown" if b"SWHLab4[" in raw: protoComment=raw.split(b"SWHLab4[")[1].split(b"]",1)[0] elif b"SWH[" in raw: protoComment=raw.split(b"SWH[")[1].split(b"]",1)[0] else: protoComment="?" if not type(protoComment) is str: protoComment=protoComment.decode("utf-8") return protoComment
python
{ "resource": "" }
q263869
scanABFfolder
validation
def scanABFfolder(abfFolder): """ scan an ABF directory and subdirectory. Try to do this just once. Returns ABF files, SWHLab files, and groups. """ assert os.path.isdir(abfFolder) filesABF=forwardSlash(sorted(glob.glob(abfFolder+"/*.*"))) filesSWH=[] if os.path.exists(abfFolder+"/swhlab4/"): filesSWH=forwardSlash(sorted(glob.glob(abfFolder+"/swhlab4/*.*"))) groups=getABFgroups(filesABF) return filesABF,filesSWH,groups
python
{ "resource": "" }
q263870
getParent
validation
def getParent(abfFname): """given an ABF file name, return the ABF of its parent.""" child=os.path.abspath(abfFname) files=sorted(glob.glob(os.path.dirname(child)+"/*.*")) parentID=abfFname #its own parent for fname in files: if fname.endswith(".abf") and fname.replace(".abf",".TIF") in files: parentID=os.path.basename(fname).replace(".abf","") if os.path.basename(child) in fname: break return parentID
python
{ "resource": "" }
q263871
getParent2
validation
def getParent2(abfFname,groups): """given an ABF and the groups dict, return the ID of its parent.""" if ".abf" in abfFname: abfFname=os.path.basename(abfFname).replace(".abf","") for parentID in groups.keys(): if abfFname in groups[parentID]: return parentID return abfFname
python
{ "resource": "" }
q263872
getNotesForABF
validation
def getNotesForABF(abfFile): """given an ABF, find the parent, return that line of experiments.txt""" parent=getParent(abfFile) parent=os.path.basename(parent).replace(".abf","") expFile=os.path.dirname(abfFile)+"/experiment.txt" if not os.path.exists(expFile): return "no experiment file" with open(expFile) as f: raw=f.readlines() for line in raw: if line[0]=='~': line=line[1:].strip() if line.startswith(parent): while "\t\t" in line: line=line.replace("\t\t","\t") line=line.replace("\t","\n") return line return "experiment.txt found, but didn't contain %s"%parent
python
{ "resource": "" }
q263873
getIDsFromFiles
validation
def getIDsFromFiles(files): """given a path or list of files, return ABF IDs.""" if type(files) is str: files=glob.glob(files+"/*.*") IDs=[] for fname in files: if fname[-4:].lower()=='.abf': ext=fname.split('.')[-1] IDs.append(os.path.basename(fname).replace('.'+ext,'')) return sorted(IDs)
python
{ "resource": "" }
q263874
inspectABF
validation
def inspectABF(abf=exampleABF,saveToo=False,justPlot=False): """May be given an ABF object or filename.""" pylab.close('all') print(" ~~ inspectABF()") if type(abf) is str: abf=swhlab.ABF(abf) swhlab.plot.new(abf,forceNewFigure=True) if abf.sweepInterval*abf.sweeps<60*5: #shorter than 5 minutes pylab.subplot(211) pylab.title("%s [%s]"%(abf.ID,abf.protoComment)) swhlab.plot.sweep(abf,'all') pylab.subplot(212) swhlab.plot.sweep(abf,'all',continuous=True) swhlab.plot.comments(abf) else: print(" -- plotting as long recording") swhlab.plot.sweep(abf,'all',continuous=True,minutes=True) swhlab.plot.comments(abf,minutes=True) pylab.title("%s [%s]"%(abf.ID,abf.protoComment)) swhlab.plot.annotate(abf) if justPlot: return if saveToo: path=os.path.split(abf.fname)[0] basename=os.path.basename(abf.fname) pylab.savefig(os.path.join(path,"_"+basename.replace(".abf",".png"))) pylab.show() return
python
{ "resource": "" }
q263875
ftp_login
validation
def ftp_login(folder=None): """return an "FTP" object after logging in.""" pwDir=os.path.realpath(__file__) for i in range(3): pwDir=os.path.dirname(pwDir) pwFile = os.path.join(pwDir,"passwd.txt") print(" -- looking for login information in:\n [%s]"%pwFile) try: with open(pwFile) as f: lines=f.readlines() username=lines[0].strip() password=lines[1].strip() print(" -- found a valid username/password") except: print(" -- password lookup FAILED.") username=TK_askPassword("FTP LOGIN","enter FTP username") password=TK_askPassword("FTP LOGIN","enter password for %s"%username) if not username or not password: print(" !! failed getting login info. aborting FTP effort.") return print(" username:",username) print(" password:","*"*(len(password))) print(" -- logging in to FTP ...") try: ftp = ftplib.FTP("swharden.com") ftp.login(username, password) if folder: ftp.cwd(folder) return ftp except: print(" !! login failure !!") return False
python
{ "resource": "" }
q263876
ftp_folder_match
validation
def ftp_folder_match(ftp,localFolder,deleteStuff=True): """upload everything from localFolder into the current FTP folder.""" for fname in glob.glob(localFolder+"/*.*"): ftp_upload(ftp,fname) return
python
{ "resource": "" }
q263877
version_upload
validation
def version_upload(fname,username="nibjb"): """Only scott should do this. Upload new version to site.""" print("popping up pasword window...") password=TK_askPassword("FTP LOGIN","enter password for %s"%username) if not password: return print("username:",username) print("password:","*"*(len(password))) print("connecting...") ftp = ftplib.FTP("swharden.com") ftp.login(username, password) print("successful login!") ftp.cwd("/software/swhlab/versions") #IMMEDIATELY GO HERE!!! print("uploading",os.path.basename(fname)) ftp.storbinary("STOR " + os.path.basename(fname), open(fname, "rb"), 1024) #for binary files print("disconnecting...") ftp.quit()
python
{ "resource": "" }
q263878
TK_askPassword
validation
def TK_askPassword(title="input",msg="type here:"): """use the GUI to ask for a string.""" root = tkinter.Tk() root.withdraw() #hide tk window root.attributes("-topmost", True) #always on top root.lift() #bring to top value=tkinter.simpledialog.askstring(title,msg) root.destroy() return value
python
{ "resource": "" }
q263879
TK_message
validation
def TK_message(title,msg): """use the GUI to pop up a message.""" root = tkinter.Tk() root.withdraw() #hide tk window root.attributes("-topmost", True) #always on top root.lift() #bring to top tkinter.messagebox.showwarning(title, msg) root.destroy()
python
{ "resource": "" }
q263880
TK_ask
validation
def TK_ask(title,msg): """use the GUI to ask YES or NO.""" root = tkinter.Tk() root.attributes("-topmost", True) #always on top root.withdraw() #hide tk window result=tkinter.messagebox.askyesno(title,msg) root.destroy() return result
python
{ "resource": "" }
q263881
processArgs
validation
def processArgs(): """check out the arguments and figure out what to do.""" if len(sys.argv)<2: print("\n\nERROR:") print("this script requires arguments!") print('try "python command.py info"') return if sys.argv[1]=='info': print("import paths:\n ","\n ".join(sys.path)) print() print("python version:",sys.version) print("SWHLab path:",__file__) print("SWHLab version:",swhlab.__version__) return if sys.argv[1]=='glanceFolder': abfFolder=swhlab.common.gui_getFolder() if not abfFolder or not os.path.isdir(abfFolder): print("bad path") return fnames=sorted(glob.glob(abfFolder+"/*.abf")) outFolder=tempfile.gettempdir()+"/swhlab/" if os.path.exists(outFolder): shutil.rmtree(outFolder) os.mkdir(outFolder) outFile=outFolder+"/index.html" out='<html><body>' out+='<h2>%s</h2>'%abfFolder for i,fname in enumerate(fnames): print("\n\n### PROCESSING %d of %d"%(i,len(fnames))) saveAs=os.path.join(os.path.dirname(outFolder),os.path.basename(fname))+".png" out+='<br><br><br><code>%s</code><br>'%os.path.abspath(fname) out+='<a href="%s"><img src="%s"></a><br>'%(saveAs,saveAs) swhlab.analysis.glance.processAbf(fname,saveAs) out+='</body></html>' with open(outFile,'w') as f: f.write(out) webbrowser.open_new_tab(outFile) return print("\n\nERROR:\nI'm not sure how to process these arguments!") print(sys.argv)
python
{ "resource": "" }
q263882
stats_first
validation
def stats_first(abf): """provide all stats on the first AP.""" msg="" for sweep in range(abf.sweeps): for AP in abf.APs[sweep]: for key in sorted(AP.keys()): if key[-1] is "I" or key[-2:] in ["I1","I2"]: continue msg+="%s = %s\n"%(key,AP[key]) return msg
python
{ "resource": "" }
q263883
getAvgBySweep
validation
def getAvgBySweep(abf,feature,T0=None,T1=None): """return average of a feature divided by sweep.""" if T1 is None: T1=abf.sweepLength if T0 is None: T0=0 data = [np.empty((0))]*abf.sweeps for AP in cm.dictFlat(cm.matrixToDicts(abf.APs)): if T0<AP['sweepT']<T1: val=AP[feature] data[int(AP['sweep'])]=np.concatenate((data[int(AP['sweep'])],[val])) for sweep in range(abf.sweeps): if len(data[sweep])>1 and np.any(data[sweep]): data[sweep]=np.nanmean(data[sweep]) elif len(data[sweep])==1: data[sweep]=data[sweep][0] else: data[sweep]=np.nan return data
python
{ "resource": "" }
q263884
lazygo
validation
def lazygo(watchFolder='../abfs/',reAnalyze=False,rebuildSite=False, keepGoing=True,matching=False): """ continuously monitor a folder for new abfs and try to analyze them. This is intended to watch only one folder, but can run multiple copies. """ abfsKnown=[] while True: print() pagesNeeded=[] for fname in glob.glob(watchFolder+"/*.abf"): ID=os.path.basename(fname).replace(".abf","") if not fname in abfsKnown: if os.path.exists(fname.replace(".abf",".rsv")): #TODO: or something like this continue if matching and not matching in fname: continue abfsKnown.append(fname) if os.path.exists(os.path.dirname(fname)+"/swhlab4/"+os.path.basename(fname).replace(".abf","_info.pkl")) and reAnalyze==False: print("already analyzed",os.path.basename(fname)) if rebuildSite: pagesNeeded.append(ID) else: handleNewABF(fname) pagesNeeded.append(ID) if len(pagesNeeded): print(" -- rebuilding index page") indexing.genIndex(os.path.dirname(fname),forceIDs=pagesNeeded) if not keepGoing: return for i in range(50): print('.',end='') time.sleep(.2)
python
{ "resource": "" }
q263885
gain
validation
def gain(abf): """easy way to plot a gain function.""" Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'freq')) Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)]) swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)", ylabel="average inst. freq. (Hz)") pylab.plot(Xs,Ys,'.-',ms=20,alpha=.5,color='b') pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--") pylab.margins(.1,.1)
python
{ "resource": "" }
q263886
comments
validation
def comments(abf,minutes=False): """draw vertical lines at comment points. Defaults to seconds.""" if not len(abf.commentTimes): return for i in range(len(abf.commentTimes)): t,c = abf.commentTimes[i],abf.commentTags[i] if minutes: t=t/60 pylab.axvline(t,lw=1,color='r',ls="--",alpha=.5) X1,X2,Y1,Y2=pylab.axis() Y2=Y2-abs(Y2-Y1)*.02 pylab.text(t,Y2,c,size=8,color='r',rotation='vertical', ha='right',va='top',weight='bold',alpha=.5) if minutes: pylab.xlabel("minutes") else: pylab.xlabel("seconds")
python
{ "resource": "" }
q263887
annotate
validation
def annotate(abf): """stamp the bottom with file info.""" msg="SWHLab %s "%str(swhlab.VERSION) msg+="ID:%s "%abf.ID msg+="CH:%d "%abf.channel msg+="PROTOCOL:%s "%abf.protoComment msg+="COMMAND: %d%s "%(abf.holding,abf.units) msg+="GENERATED:%s "%'{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()) pylab.annotate(msg,(.001,.001),xycoords='figure fraction',ha='left', va='bottom',color='#999999',family='monospace',size=8, weight='bold') if abf.nADC>1: msg="Ch %d/%d"%(abf.channel+1,abf.nADC) pylab.annotate(msg,(.01,.99),xycoords='figure fraction',ha='left', va='top',color='#FF0000',family='monospace',size=12, weight='bold')
python
{ "resource": "" }
q263888
new
validation
def new(ABF,forceNewFigure=False,title=None,xlabel=None,ylabel=None): """ makes a new matplotlib figure with default dims and DPI. Also labels it with pA or mV depending on ABF. """ if len(pylab.get_fignums()) and forceNewFigure==False: #print("adding to existing figure") return pylab.figure(figsize=(8,6)) pylab.grid(alpha=.5) pylab.title(ABF.ID) pylab.ylabel(ABF.units) pylab.xlabel("seconds") if xlabel: pylab.xlabel(xlabel) if ylabel: pylab.ylabel(ylabel) if title: pylab.title(title) annotate(ABF)
python
{ "resource": "" }
q263889
save
validation
def save(abf,fname=None,tag=None,width=700,close=True,facecolor='w', resize=True): """ Save the pylab figure somewhere. If fname==False, show it instead. Height force > dpi force if a tag is given instead of a filename, save it alongside the ABF """ if len(pylab.gca().get_lines())==0: print("can't save, no figure!") return if resize: pylab.tight_layout() pylab.subplots_adjust(bottom=.1) annotate(abf) if tag: fname = abf.outpath+abf.ID+"_"+tag+".png" inchesX,inchesY = pylab.gcf().get_size_inches() dpi=width/inchesX if fname: if not os.path.exists(abf.outpath): os.mkdir(abf.outpath) print(" <- saving [%s] at %d DPI (%dx%d)"%(os.path.basename(fname),dpi,inchesX*dpi,inchesY*dpi)) pylab.savefig(fname,dpi=dpi,facecolor=facecolor) else: pylab.show() if close: pylab.close()
python
{ "resource": "" }
q263890
tryLoadingFrom
validation
def tryLoadingFrom(tryPath,moduleName='swhlab'): """if the module is in this path, load it from the local folder.""" if not 'site-packages' in swhlab.__file__: print("loaded custom swhlab module from", os.path.dirname(swhlab.__file__)) return # no need to warn if it's already outside. while len(tryPath)>5: sp=tryPath+"/swhlab/" # imaginary swhlab module path if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"): if not os.path.dirname(tryPath) in sys.path: sys.path.insert(0,os.path.dirname(tryPath)) print("#"*80) print("# WARNING: using site-packages swhlab module") print("#"*80) tryPath=os.path.dirname(tryPath) return
python
{ "resource": "" }
q263891
DynamicArgs.update
validation
def update(self, tids, info): """ Called to update the state of the iterator. This methods receives the set of task ids from the previous set of tasks together with the launch information to allow the output values to be parsed using the output_extractor. This data is then used to determine the next desired point in the parameter space by calling the _update_state method. """ outputs_dir = os.path.join(info['root_directory'], 'streams') pattern = '%s_*_tid_*{tid}.o.{tid}*' % info['batch_name'] flist = os.listdir(outputs_dir) try: outputs = [] for tid in tids: matches = fnmatch.filter(flist, pattern.format(tid=tid)) if len(matches) != 1: self.warning("No unique output file for tid %d" % tid) contents = open(os.path.join(outputs_dir, matches[0]),'r').read() outputs.append(self.output_extractor(contents)) self._next_val = self._update_state(outputs) self.trace.append((outputs, self._next_val)) except: self.warning("Cannot load required output files. Cannot continue.") self._next_val = StopIteration
python
{ "resource": "" }
q263892
DynamicArgs.show
validation
def show(self): """ When dynamic, not all argument values may be available. """ copied = self.copy() enumerated = [el for el in enumerate(copied)] for (group_ind, specs) in enumerated: if len(enumerated) > 1: print("Group %d" % group_ind) ordering = self.constant_keys + self.varying_keys # Ordered nicely by varying_keys definition. spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering]) for s in specs] print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)])) print('Remaining arguments not available for %s' % self.__class__.__name__)
python
{ "resource": "" }
q263893
DynamicArgs._trace_summary
validation
def _trace_summary(self): """ Summarizes the trace of values used to update the DynamicArgs and the arguments subsequently returned. May be used to implement the summary method. """ for (i, (val, args)) in enumerate(self.trace): if args is StopIteration: info = "Terminated" else: pprint = ','.join('{' + ','.join('%s=%r' % (k,v) for (k,v) in arg.items()) + '}' for arg in args) info = ("exploring arguments [%s]" % pprint ) if i == 0: print("Step %d: Initially %s." % (i, info)) else: print("Step %d: %s after receiving input(s) %s." % (i, info.capitalize(), val))
python
{ "resource": "" }
q263894
SimpleGradientDescent._update_state
validation
def _update_state(self, vals): """ Takes as input a list or tuple of two elements. First the value returned by incrementing by 'stepsize' followed by the value returned after a 'stepsize' decrement. """ self._steps_complete += 1 if self._steps_complete == self.max_steps: self._termination_info = (False, self._best_val, self._arg) return StopIteration arg_inc, arg_dec = vals best_val = min(arg_inc, arg_dec, self._best_val) if best_val == self._best_val: self._termination_info = (True, best_val, self._arg) return StopIteration self._arg += self.stepsize if (arg_dec > arg_inc) else -self.stepsize self._best_val= best_val return [{self.key:self._arg+self.stepsize}, {self.key:self._arg-self.stepsize}]
python
{ "resource": "" }
q263895
analyze
validation
def analyze(fname=False,save=True,show=None): """given a filename or ABF object, try to analyze it.""" if fname and os.path.exists(fname.replace(".abf",".rst")): print("SKIPPING DUE TO RST FILE") return swhlab.plotting.core.IMAGE_SAVE=save if show is None: if cm.isIpython(): swhlab.plotting.core.IMAGE_SHOW=True else: swhlab.plotting.core.IMAGE_SHOW=False #swhlab.plotting.core.IMAGE_SHOW=show abf=ABF(fname) # ensure it's a class print(">>>>> PROTOCOL >>>>>",abf.protocomment) runFunction="proto_unknown" if "proto_"+abf.protocomment in globals(): runFunction="proto_"+abf.protocomment abf.log.debug("running %s()"%(runFunction)) plt.close('all') # get ready globals()[runFunction](abf) # run that function try: globals()[runFunction](abf) # run that function except: abf.log.error("EXCEPTION DURING PROTOCOL FUNCTION") abf.log.error(sys.exc_info()[0]) return "ERROR" plt.close('all') # clean up return "SUCCESS"
python
{ "resource": "" }
q263896
frameAndSave
validation
def frameAndSave(abf,tag="",dataType="plot",saveAsFname=False,closeWhenDone=True): """ frame the current matplotlib plot with ABF info, and optionally save it. Note that this is entirely independent of the ABFplot class object. if saveImage is False, show it instead. Datatype should be: * plot * experiment """ print("closeWhenDone",closeWhenDone) plt.tight_layout() plt.subplots_adjust(top=.93,bottom =.07) plt.annotate(tag,(.01,.99),xycoords='figure fraction',ha='left',va='top',family='monospace',size=10,alpha=.5) msgBot="%s [%s]"%(abf.ID,abf.protocomment) plt.annotate(msgBot,(.01,.01),xycoords='figure fraction',ha='left',va='bottom',family='monospace',size=10,alpha=.5) fname=tag.lower().replace(" ",'_')+".jpg" fname=dataType+"_"+fname plt.tight_layout() if IMAGE_SAVE: abf.log.info("saving [%s]",fname) try: if saveAsFname: saveAs=os.path.abspath(saveAsFname) else: saveAs=os.path.abspath(abf.outPre+fname) if not os.path.exists(abf.outFolder): os.mkdir(abf.outFolder) plt.savefig(saveAs) except Exception as E: abf.log.error("saving [%s] failed! 'pip install pillow'?",fname) print(E) if IMAGE_SHOW==True: if closeWhenDone==False: print("NOT SHOWING (because closeWhenDone==True and showing would mess things up)") else: abf.log.info("showing [%s]",fname) plt.show() if closeWhenDone: print("closing figure") plt.close('all')
python
{ "resource": "" }
q263897
ABFplot.figure
validation
def figure(self,forceNew=False): """make sure a figure is ready.""" if plt._pylab_helpers.Gcf.get_num_fig_managers()>0 and forceNew is False: self.log.debug("figure already seen, not creating one.") return if self.subplot: self.log.debug("subplot mode enabled, not creating new figure") else: self.log.debug("creating new figure") plt.figure(figsize=(self.figure_width,self.figure_height))
python
{ "resource": "" }
q263898
ABFplot.save
validation
def save(self,callit="misc",closeToo=True,fullpath=False): """save the existing figure. does not close it.""" if fullpath is False: fname=self.abf.outPre+"plot_"+callit+".jpg" else: fname=callit if not os.path.exists(os.path.dirname(fname)): os.mkdir(os.path.dirname(fname)) plt.savefig(fname) self.log.info("saved [%s]",os.path.basename(fname)) if closeToo: plt.close()
python
{ "resource": "" }
q263899
ABFplot.figure_sweeps
validation
def figure_sweeps(self, offsetX=0, offsetY=0): """plot every sweep of an ABF file.""" self.log.debug("creating overlayed sweeps plot") self.figure() for sweep in range(self.abf.sweeps): self.abf.setsweep(sweep) self.setColorBySweep() plt.plot(self.abf.sweepX2+sweep*offsetX, self.abf.sweepY+sweep*offsetY, **self.kwargs) if offsetX: self.marginX=.05 self.decorate()
python
{ "resource": "" }