text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def measureSize(self, diff, chunkSize): """ Spend some time to get an accurate size. """ self._fileSystemSync() sendContext = self.butter.send( self.getSendPath(diff.toVol), self.getSendPath(diff.fromVol), diff, showProgress=self.showProgress is not False, allowDryRun=False, ) class _Measure(io.RawIOBase): def __init__(self, estimatedSize, showProgress): self.totalSize = None self.progress = progress.DisplayProgress(estimatedSize) if showProgress else None def __enter__(self): self.totalSize = 0 if self.progress: self.progress.__enter__() return self def __exit__(self, exceptionType, exceptionValue, traceback): if self.progress: self.progress.__exit__(exceptionType, exceptionValue, traceback) return False # Don't supress exception def writable(self): return True def write(self, bytes): self.totalSize += len(bytes) if self.progress: self.progress.update(self.totalSize) logger.info("Measuring %s", diff) measure = _Measure(diff.size, self.showProgress is not False) Store.transfer(sendContext, measure, chunkSize) diff.setSize(measure.totalSize, False) for path in self.getPaths(diff.toVol): path = self._fullPath(path) + Store.theInfoExtension with open(path, "a") as infoFile: diff.toVol.writeInfoLine(infoFile, diff.fromUUID, measure.totalSize)
[ "def", "measureSize", "(", "self", ",", "diff", ",", "chunkSize", ")", ":", "self", ".", "_fileSystemSync", "(", ")", "sendContext", "=", "self", ".", "butter", ".", "send", "(", "self", ".", "getSendPath", "(", "diff", ".", "toVol", ")", ",", "self", ...
34.346939
20.020408
def purge_queues(self, queues): """ Purge all messages from one or more queues. :param list queues: A list of ('qname', 'vhost') tuples. :returns: True on success """ for name, vhost in queues: vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['purge_queue'] % (vhost, name) self._call(path, 'DELETE') return True
[ "def", "purge_queues", "(", "self", ",", "queues", ")", ":", "for", "name", ",", "vhost", "in", "queues", ":", "vhost", "=", "quote", "(", "vhost", ",", "''", ")", "name", "=", "quote", "(", "name", ",", "''", ")", "path", "=", "Client", ".", "ur...
30.214286
13.642857
def align_and_update_state_dicts(model_state_dict, loaded_state_dict): """ Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ current_keys = sorted(list(model_state_dict.keys())) loaded_keys = sorted(list(loaded_state_dict.keys())) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # loaded_key string, if it matches match_matrix = [ len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys ] match_matrix = torch.as_tensor(match_matrix).view( len(current_keys), len(loaded_keys) ) max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 # used for logging max_size = max([len(key) for key in current_keys]) if current_keys else 1 max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1 log_str_template = "{: <{}} loaded from {: <{}} of shape {}" logger = logging.getLogger(__name__) for idx_new, idx_old in enumerate(idxs.tolist()): if idx_old == -1: continue key = current_keys[idx_new] key_old = loaded_keys[idx_old] model_state_dict[key] = loaded_state_dict[key_old] logger.info( log_str_template.format( key, max_size, key_old, max_size_loaded, tuple(loaded_state_dict[key_old].shape), ) )
[ "def", "align_and_update_state_dicts", "(", "model_state_dict", ",", "loaded_state_dict", ")", ":", "current_keys", "=", "sorted", "(", "list", "(", "model_state_dict", ".", "keys", "(", ")", ")", ")", "loaded_keys", "=", "sorted", "(", "list", "(", "loaded_stat...
48.183673
23.367347
def read_manifest(self, encrypted=None): """ Read an existing manifest. """ with open(self.manifest_path, 'r') as input_file: self.manifest = yaml.safe_load(input_file) if 'env' not in self.manifest: self.manifest['env'] = {} if 'services' not in self.manifest: self.manifest['services'] = [] # If manifest is encrypted, use manifest key to # decrypt each value before storing in memory. if 'PREDIXPY_ENCRYPTED' in self.manifest['env']: self.encrypted = True if encrypted or self.encrypted: key = predix.config.get_crypt_key(self.manifest_key) f = Fernet(key) for var in self.manifest['env'].keys(): value = f.decrypt(bytes(self.manifest['env'][var], 'utf-8')) self.manifest['env'][var] = value.decode('utf-8') self.app_name = self.manifest['applications'][0]['name'] input_file.close()
[ "def", "read_manifest", "(", "self", ",", "encrypted", "=", "None", ")", ":", "with", "open", "(", "self", ".", "manifest_path", ",", "'r'", ")", "as", "input_file", ":", "self", ".", "manifest", "=", "yaml", ".", "safe_load", "(", "input_file", ")", "...
37.178571
18.392857
def get_links(html, outformat): """Return a list of reference links from the html. Parameters ---------- html : str outformat : int the output format of the citations Returns ------- List[str] the links to the references """ if outformat == FORMAT_BIBTEX: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)') elif outformat == FORMAT_ENDNOTE: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"') elif outformat == FORMAT_REFMAN: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"') elif outformat == FORMAT_WENXIANWANG: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"') reflist = refre.findall(html) # escape html entities reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: chr(name2codepoint[m.group(1)]), s) for s in reflist] return reflist
[ "def", "get_links", "(", "html", ",", "outformat", ")", ":", "if", "outformat", "==", "FORMAT_BIBTEX", ":", "refre", "=", "re", ".", "compile", "(", "r'<a href=\"https://scholar.googleusercontent.com(/scholar\\.bib\\?[^\"]*)'", ")", "elif", "outformat", "==", "FORMAT_...
36.857143
24.25
def scheduleMeasurement(self, measurementId, duration, start): """ Schedules the requested measurement session with all INITIALISED devices. :param measurementId: :param duration: :param start: :return: a dict of device vs status. """ # TODO subtract 1s from start and format results = {} for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name): logger.info('Sending measurement ' + measurementId + ' to ' + device.payload['serviceURL']) try: resp = self.httpclient.put(device.payload['serviceURL'] + '/measurements/' + measurementId, json={'duration': duration, 'at': start.strftime(DATETIME_FORMAT)}) logger.info('Response for ' + measurementId + ' from ' + device.payload['serviceURL'] + ' is ' + str(resp.status_code)) results[device] = resp.status_code except Exception as e: logger.exception(e) results[device] = 500 return results
[ "def", "scheduleMeasurement", "(", "self", ",", "measurementId", ",", "duration", ",", "start", ")", ":", "# TODO subtract 1s from start and format", "results", "=", "{", "}", "for", "device", "in", "self", ".", "getDevices", "(", "RecordingDeviceStatus", ".", "IN...
50.227273
23.954545
def delete_customer(self, handle): """Delete a customer.""" self.request(E.deleteCustomerRequest(E.handle(handle))) return True
[ "def", "delete_customer", "(", "self", ",", "handle", ")", ":", "self", ".", "request", "(", "E", ".", "deleteCustomerRequest", "(", "E", ".", "handle", "(", "handle", ")", ")", ")", "return", "True" ]
24.666667
21.666667
def commit(self): """ Put the document into the new state. """ if self.after is None: # If this is the first 'commit' call then do not make # any changes but store the current document state. self.after = self.qteWidget.toHtml() else: # Put the document into the edited state. pos = self.qteWidget.textCursor().position() self.qteWidget.setHtml(self.after) self.placeCursor(pos)
[ "def", "commit", "(", "self", ")", ":", "if", "self", ".", "after", "is", "None", ":", "# If this is the first 'commit' call then do not make", "# any changes but store the current document state.", "self", ".", "after", "=", "self", ".", "qteWidget", ".", "toHtml", "...
37.692308
12.461538
def _escape(s): """ Helper method that escapes parameters to a SQL query. """ e = s e = e.replace('\\', '\\\\') e = e.replace('\n', '\\n') e = e.replace('\r', '\\r') e = e.replace("'", "\\'") e = e.replace('"', '\\"') return e
[ "def", "_escape", "(", "s", ")", ":", "e", "=", "s", "e", "=", "e", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "e", "=", "e", ".", "replace", "(", "'\\n'", ",", "'\\\\n'", ")", "e", "=", "e", ".", "replace", "(", "'\\r'", ",", "'\...
27.777778
15
def ConfigureRequest(self, upload_config, http_request, url_builder): """Configure the request and url for this upload.""" # Validate total_size vs. max_size if (self.total_size and upload_config.max_size and self.total_size > upload_config.max_size): raise exceptions.InvalidUserInputError( 'Upload too big: %s larger than max size %s' % ( self.total_size, upload_config.max_size)) # Validate mime type if not util.AcceptableMimeType(upload_config.accept, self.mime_type): raise exceptions.InvalidUserInputError( 'MIME type %s does not match any accepted MIME ranges %s' % ( self.mime_type, upload_config.accept)) self.__SetDefaultUploadStrategy(upload_config, http_request) if self.strategy == SIMPLE_UPLOAD: url_builder.relative_path = upload_config.simple_path if http_request.body: url_builder.query_params['uploadType'] = 'multipart' self.__ConfigureMultipartRequest(http_request) else: url_builder.query_params['uploadType'] = 'media' self.__ConfigureMediaRequest(http_request) # Once the entire body is written, compress the body if configured # to. Both multipart and media request uploads will read the # entire stream into memory, which means full compression is also # safe to perform. Because the strategy is set to SIMPLE_UPLOAD, # StreamInChunks throws an exception, meaning double compression # cannot happen. if self.__gzip_encoded: http_request.headers['Content-Encoding'] = 'gzip' # Turn the body into a stream so that we can compress it, then # read the compressed bytes. In the event of a retry (e.g. if # our access token has expired), we need to be able to re-read # the body, which we can't do with a stream. So, we consume the # bytes from the stream now and store them in a re-readable # bytes container. http_request.body = ( compression.CompressStream( six.BytesIO(http_request.body))[0].read()) else: url_builder.relative_path = upload_config.resumable_path url_builder.query_params['uploadType'] = 'resumable' self.__ConfigureResumableRequest(http_request)
[ "def", "ConfigureRequest", "(", "self", ",", "upload_config", ",", "http_request", ",", "url_builder", ")", ":", "# Validate total_size vs. max_size", "if", "(", "self", ".", "total_size", "and", "upload_config", ".", "max_size", "and", "self", ".", "total_size", ...
57.113636
22.795455
def output_kernels(gandi, flavor, name_list, justify=14): """ Helper to output kernel flavor versions.""" output_line(gandi, 'flavor', flavor, justify) for name in name_list: output_line(gandi, 'version', name, justify)
[ "def", "output_kernels", "(", "gandi", ",", "flavor", ",", "name_list", ",", "justify", "=", "14", ")", ":", "output_line", "(", "gandi", ",", "'flavor'", ",", "flavor", ",", "justify", ")", "for", "name", "in", "name_list", ":", "output_line", "(", "gan...
47
10.4
def generate_metimage_file(platform_name): """Retrieve original RSR data and convert to internal hdf5 format. """ import h5py bandnames = METIMAGE_BAND_NAMES instr = MetImageRSR(bandnames[0], platform_name) instr_name = instr.instrument.replace('/', '') filename = os.path.join(instr.output_dir, "rsr_{0}_{1}.h5".format(instr_name, platform_name)) with h5py.File(filename, "w") as h5f: h5f.attrs['description'] = ('Relative Spectral Responses for ' + instr.instrument.upper()) h5f.attrs['platform_name'] = platform_name h5f.attrs['band_names'] = bandnames for chname in bandnames: metimage = MetImageRSR(chname, platform_name) grp = h5f.create_group(chname) grp.attrs['number_of_detectors'] = len(metimage.rsr.keys()) # Loop over each detector to check if the sampling wavelengths are # identical: det_names = list(metimage.rsr.keys()) wvl = metimage.rsr[det_names[0]]['wavelength'] wvl, idx = np.unique(wvl, return_index=True) wvl_is_constant = True for det in det_names[1:]: det_wvl = np.unique(metimage.rsr[det]['wavelength']) if not np.alltrue(wvl == det_wvl): LOG.warning( "Wavelngth arrays are not the same among detectors!") wvl_is_constant = False if wvl_is_constant: arr = wvl dset = grp.create_dataset('wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr # Loop over each detector: for det in metimage.rsr: det_grp = grp.create_group(det) wvl = metimage.rsr[det]['wavelength'][ ~np.isnan(metimage.rsr[det]['wavelength'])] rsp = metimage.rsr[det]['response'][ ~np.isnan(metimage.rsr[det]['wavelength'])] wvl, idx = np.unique(wvl, return_index=True) rsp = np.take(rsp, idx) LOG.debug("wvl.shape: %s", str(wvl.shape)) det_grp.attrs[ 'central_wavelength'] = get_central_wave(wvl, rsp) if not wvl_is_constant: arr = wvl dset = det_grp.create_dataset( 'wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = 1e-06 dset[...] = arr dset = det_grp.create_dataset('response', rsp.shape, dtype='f') dset[...] = rsp
[ "def", "generate_metimage_file", "(", "platform_name", ")", ":", "import", "h5py", "bandnames", "=", "METIMAGE_BAND_NAMES", "instr", "=", "MetImageRSR", "(", "bandnames", "[", "0", "]", ",", "platform_name", ")", "instr_name", "=", "instr", ".", "instrument", "....
43.609375
15.75
def query_search_api(min_lat, max_lat, min_lon, max_lon, max_results): ''' Send query to the search API and get dict with image data. ''' # Create URL params = urllib.urlencode(zip( ['client_id', 'bbox', 'per_page'], [CLIENT_ID, ','.join([str(min_lon), str(min_lat), str( max_lon), str(max_lat)]), str(max_results)] )) print(MAPILLARY_API_IM_SEARCH_URL + params) # Get data from server, then parse JSON query = urllib2.urlopen(MAPILLARY_API_IM_SEARCH_URL + params).read() query = json.loads(query)['features'] print("Result: {0} images in area.".format(len(query))) return query
[ "def", "query_search_api", "(", "min_lat", ",", "max_lat", ",", "min_lon", ",", "max_lon", ",", "max_results", ")", ":", "# Create URL", "params", "=", "urllib", ".", "urlencode", "(", "zip", "(", "[", "'client_id'", ",", "'bbox'", ",", "'per_page'", "]", ...
33.631579
22.473684
def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01, nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97, winfunc=lambda x:numpy.ones((x,))): """Compute log Mel-filterbank energy features from an audio signal. :param signal: the audio signal from which to compute features. Should be an N*1 array :param samplerate: the sample rate of the signal we are working with, in Hz. :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds) :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds) :param nfilt: the number of filters in the filterbank, default 26. :param nfft: the FFT size. Default is 512. :param lowfreq: lowest band edge of mel filters. In Hz, default is 0. :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2 :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. :param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. """ feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph,winfunc) return numpy.log(feat)
[ "def", "logfbank", "(", "signal", ",", "samplerate", "=", "16000", ",", "winlen", "=", "0.025", ",", "winstep", "=", "0.01", ",", "nfilt", "=", "26", ",", "nfft", "=", "512", ",", "lowfreq", "=", "0", ",", "highfreq", "=", "None", ",", "preemph", "...
74.157895
39.947368
def init_from_files( vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1e6, reserved_tokens=None): """Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string tokens that are guaranteed to be at the beginning of the subtoken vocabulary list. Returns: Subtokenizer object """ if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS if tf.gfile.Exists(vocab_file): tf.logging.info("Vocab file already exists (%s)" % vocab_file) else: tf.logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) tf.logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file)
[ "def", "init_from_files", "(", "vocab_file", ",", "files", ",", "target_vocab_size", ",", "threshold", ",", "min_count", "=", "None", ",", "file_byte_limit", "=", "1e6", ",", "reserved_tokens", "=", "None", ")", ":", "if", "reserved_tokens", "is", "None", ":",...
47.325
21.575
def getKwdArgs(self, flatten = False): """ Return a dict of all normal dict parameters - that is, all parameters NOT marked with "pos=N" in the .cfgspc file. This will also exclude all hidden parameters (metadata, rules, etc). """ # Start with a full deep-copy. What complicates this method is the # idea of sub-sections. This dict can have dicts as values, and so on. dcopy = self.dict() # ConfigObj docs say this is a deep-copy # First go through the dict removing all positional args for idx,scope,name in self._posArgs: theDict, val = findScopedPar(dcopy, scope, name) # 'theDict' may be dcopy, or it may be a dict under it theDict.pop(name) # Then go through the dict removing all hidden items ('_item_name_') for k in list(dcopy.keys()): if isHiddenName(k): dcopy.pop(k) # Done with the nominal operation if not flatten: return dcopy # They have asked us to flatten the structure - to bring all parameters # up to the top level, even if they are in sub-sections. So we look # for values that are dicts. We will throw something if we end up # with name collisions at the top level as a result of this. return flattenDictTree(dcopy)
[ "def", "getKwdArgs", "(", "self", ",", "flatten", "=", "False", ")", ":", "# Start with a full deep-copy. What complicates this method is the", "# idea of sub-sections. This dict can have dicts as values, and so on.", "dcopy", "=", "self", ".", "dict", "(", ")", "# ConfigObj ...
46.137931
22.827586
def getRloc(self): """get router locator unicast IPv6 address""" print '%s call getRloc' % self.port prefix = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v IPv6:MeshLocalPrefix')[0]) mlprefix = prefix.split('/')[0] rloc16 = self.__sendCommand(WPANCTL_CMD + 'getprop -v Thread:RLOC16')[0].lstrip('0x') print "prefix: " + prefix print "mlprefix: " + mlprefix print "rloc16: " + rloc16 rloc = self.__padIp6Addr(mlprefix + '00ff:fe00:' + rloc16) print "rloc: " + rloc return rloc
[ "def", "getRloc", "(", "self", ")", ":", "print", "'%s call getRloc'", "%", "self", ".", "port", "prefix", "=", "self", ".", "__stripValue", "(", "self", ".", "__sendCommand", "(", "WPANCTL_CMD", "+", "'getprop -v IPv6:MeshLocalPrefix'", ")", "[", "0", "]", ...
43.769231
20
def on_change(self, value): '''Calles modifier on instance with passed value''' self._modifier(self.inst, self.prop, value)
[ "def", "on_change", "(", "self", ",", "value", ")", ":", "self", ".", "_modifier", "(", "self", ".", "inst", ",", "self", ".", "prop", ",", "value", ")" ]
45.666667
14.333333
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None): """ Hessian at y, given link(f), w.r.t link(f) i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j) The hessian will be 0 unless i == j .. math:: \\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = \\frac{(v+1)((y_{i}-\lambda(f_{i}))^{2} - \\sigma^{2}v)}{((y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v)^{2}} :param inv_link_f: latent variables inv_link(f) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f) :rtype: Nx1 array .. Note:: Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases (the distribution for y_i depends only on link(f_i) not on link(f_(j!=i)) """ e = y - inv_link_f hess = ((self.v + 1)*(e**2 - self.v*self.sigma2)) / ((self.sigma2*self.v + e**2)**2) return hess
[ "def", "d2logpdf_dlink2", "(", "self", ",", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "e", "=", "y", "-", "inv_link_f", "hess", "=", "(", "(", "self", ".", "v", "+", "1", ")", "*", "(", "e", "**", "2", "-", "self", ".", ...
48.833333
32.333333
def parse_compound(s, global_compartment=None): """Parse a compound specification. If no compartment is specified in the string, the global compartment will be used. """ m = re.match(r'^\|(.*)\|$', s) if m: s = m.group(1) m = re.match(r'^(.+)\[(\S+)\]$', s) if m: compound_id = m.group(1) compartment = m.group(2) else: compound_id = s compartment = global_compartment return Compound(compound_id, compartment=compartment)
[ "def", "parse_compound", "(", "s", ",", "global_compartment", "=", "None", ")", ":", "m", "=", "re", ".", "match", "(", "r'^\\|(.*)\\|$'", ",", "s", ")", "if", "m", ":", "s", "=", "m", ".", "group", "(", "1", ")", "m", "=", "re", ".", "match", ...
25.631579
18.421053
def m_b(mbmb, scale, f, alphasMZ=0.1185, loop=3): r"""Get running b quark mass in the MSbar scheme at the scale `scale` in the theory with `f` dynamical quark flavours starting from $m_b(m_b)$""" if scale == mbmb and f == 5: return mbmb # nothing to do _sane(scale, f) alphas_mb = alpha_s(mbmb, 5, alphasMZ=alphasMZ, loop=loop) crd = rundec.CRunDec() if f == 5: alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop) return crd.mMS2mMS(mbmb, alphas_mb, alphas_scale, f, loop) elif f == 4: crd.nfMmu.Mth = 4.8 crd.nfMmu.muth = 4.8 crd.nfMmu.nf = 5 return crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop) elif f == 3: mc = 1.3 crd.nfMmu.Mth = 4.8 crd.nfMmu.muth = 4.8 crd.nfMmu.nf = 5 mbmc = crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, mc, loop) crd.nfMmu.Mth = mc crd.nfMmu.muth = mc crd.nfMmu.nf = 4 alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop) return crd.mH2mL(mbmc, alphas_mc, mc, crd.nfMmu, scale, loop) elif f == 6: crd.nfMmu.Mth = 170 crd.nfMmu.muth = 170 crd.nfMmu.nf = 6 return crd.mL2mH(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop) else: raise ValueError("Invalid input: f={}, scale={}".format(f, scale))
[ "def", "m_b", "(", "mbmb", ",", "scale", ",", "f", ",", "alphasMZ", "=", "0.1185", ",", "loop", "=", "3", ")", ":", "if", "scale", "==", "mbmb", "and", "f", "==", "5", ":", "return", "mbmb", "# nothing to do", "_sane", "(", "scale", ",", "f", ")"...
39.117647
18.529412
def load_data_with_word2vec(word2vec_list): """Loads and preprocessed data for the MR dataset. Returns input vectors, labels, vocabulary, and inverse vocabulary. """ # Load and preprocess data sentences, labels = load_data_and_labels() sentences_padded = pad_sentences(sentences) # vocabulary, vocabulary_inv = build_vocab(sentences_padded) return build_input_data_with_word2vec(sentences_padded, labels, word2vec_list)
[ "def", "load_data_with_word2vec", "(", "word2vec_list", ")", ":", "# Load and preprocess data", "sentences", ",", "labels", "=", "load_data_and_labels", "(", ")", "sentences_padded", "=", "pad_sentences", "(", "sentences", ")", "# vocabulary, vocabulary_inv = build_vocab(sent...
49.222222
13.555556
def p_string_expr_lp(p): """ string : LP expr RP substr """ if p[2].type_ != TYPE.string: syntax_error(p.lexer.lineno, "Expected a string type expression. " "Got %s type instead" % TYPE.to_string(p[2].type_)) p[0] = None else: p[0] = make_strslice(p.lexer.lineno, p[2], p[4][0], p[4][1])
[ "def", "p_string_expr_lp", "(", "p", ")", ":", "if", "p", "[", "2", "]", ".", "type_", "!=", "TYPE", ".", "string", ":", "syntax_error", "(", "p", ".", "lexer", ".", "lineno", ",", "\"Expected a string type expression. \"", "\"Got %s type instead\"", "%", "T...
36
15.7
def store_archiver_index(config, archiver, index): """ Store an archiver's index record for faster search. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param index: The archiver index record :type index: ``dict`` :rtype: `pathlib.Path` """ root = pathlib.Path(config.cache_path) / archiver.name if not root.exists(): root.mkdir() logger.debug("Created archiver directory") index = sorted(index, key=lambda k: k["date"], reverse=True) filename = root / "index.json" with open(filename, "w") as out: out.write(json.dumps(index, indent=2)) logger.debug(f"Created index output") return filename
[ "def", "store_archiver_index", "(", "config", ",", "archiver", ",", "index", ")", ":", "root", "=", "pathlib", ".", "Path", "(", "config", ".", "cache_path", ")", "/", "archiver", ".", "name", "if", "not", "root", ".", "exists", "(", ")", ":", "root", ...
28.142857
18.071429
def get_trans(self, out_vec=None): """Return the translation portion of the matrix as a vector. If out_vec is provided, store in out_vec instead of creating a new Vec3. """ if out_vec: return out_vec.set(*self.data[3][:3]) return Vec3(*self.data[3][:3])
[ "def", "get_trans", "(", "self", ",", "out_vec", "=", "None", ")", ":", "if", "out_vec", ":", "return", "out_vec", ".", "set", "(", "*", "self", ".", "data", "[", "3", "]", "[", ":", "3", "]", ")", "return", "Vec3", "(", "*", "self", ".", "data...
29.9
19.8
def savestate(self, state, chain=-1): """Store a dictionnary containing the state of the Model and its StepMethods.""" cur_chain = self._chains[chain] if hasattr(cur_chain, '_state_'): cur_chain._state_[0] = state else: s = self._h5file.create_vlarray( cur_chain, '_state_', tables.ObjectAtom(), title='The saved state of the sampler', filters=self.filter) s.append(state) self._h5file.flush()
[ "def", "savestate", "(", "self", ",", "state", ",", "chain", "=", "-", "1", ")", ":", "cur_chain", "=", "self", ".", "_chains", "[", "chain", "]", "if", "hasattr", "(", "cur_chain", ",", "'_state_'", ")", ":", "cur_chain", ".", "_state_", "[", "0", ...
36.2
7.466667
def print_results(results): """Print `results` (the results of validation) to stdout. Args: results: A list of FileValidationResults or ObjectValidationResults instances. """ if not isinstance(results, list): results = [results] for r in results: try: r.log() except AttributeError: raise ValueError('Argument to print_results() must be a list of ' 'FileValidationResults or ObjectValidationResults.')
[ "def", "print_results", "(", "results", ")", ":", "if", "not", "isinstance", "(", "results", ",", "list", ")", ":", "results", "=", "[", "results", "]", "for", "r", "in", "results", ":", "try", ":", "r", ".", "log", "(", ")", "except", "AttributeErro...
30
22.588235
def patch_get_deferred_fields(model): """ Django >= 1.8: patch detecting deferred fields. Crucial for only/defer to work. """ if not hasattr(model, 'get_deferred_fields'): return old_get_deferred_fields = model.get_deferred_fields def new_get_deferred_fields(self): sup = old_get_deferred_fields(self) if hasattr(self, '_fields_were_deferred'): sup.update(self._fields_were_deferred) return sup model.get_deferred_fields = new_get_deferred_fields
[ "def", "patch_get_deferred_fields", "(", "model", ")", ":", "if", "not", "hasattr", "(", "model", ",", "'get_deferred_fields'", ")", ":", "return", "old_get_deferred_fields", "=", "model", ".", "get_deferred_fields", "def", "new_get_deferred_fields", "(", "self", ")...
36.142857
14.142857
def atomic(self, func): """A decorator that wraps a function in an atomic block. Example:: db = CustomSQLAlchemy() @db.atomic def f(): write_to_db('a message') return 'OK' assert f() == 'OK' This code defines the function ``f``, which is wrapped in an atomic block. Wrapping a function in an atomic block gives several guarantees: 1. The database transaction will be automatically committed if the function returns normally, and automatically rolled back if the function raises unhandled exception. 2. When the transaction is committed, all objects in ``db.session`` will be expunged. This means that no lazy loading will be performed on them. 3. If a transaction serialization error occurs during the execution of the function, the function will be re-executed. (It might be re-executed several times.) Atomic blocks can be nested, but in this case the outermost block takes full control of transaction's life-cycle, and inner blocks do nothing. """ @wraps(func) def wrapper(*args, **kwargs): session = self.session session_info = session.info if session_info.get(_ATOMIC_FLAG_SESSION_INFO_KEY): return func(*args, **kwargs) f = retry_on_deadlock(session)(func) session_info[_ATOMIC_FLAG_SESSION_INFO_KEY] = True try: result = f(*args, **kwargs) session.flush() session.expunge_all() session.commit() return result except Exception: session.rollback() raise finally: session_info[_ATOMIC_FLAG_SESSION_INFO_KEY] = False return wrapper
[ "def", "atomic", "(", "self", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "session", "=", "self", ".", "session", "session_info", "=", "session", ".", "info", "if", ...
33.052632
20.438596
def convert_to_shape(self, origin_x=0, origin_y=0): """Return new freeform shape positioned relative to specified offset. *origin_x* and *origin_y* locate the origin of the local coordinate system in slide coordinates (EMU), perhaps most conveniently by use of a |Length| object. Note that this method may be called more than once to add multiple shapes of the same geometry in different locations on the slide. """ sp = self._add_freeform_sp(origin_x, origin_y) path = self._start_path(sp) for drawing_operation in self: drawing_operation.apply_operation_to(path) return self._shapes._shape_factory(sp)
[ "def", "convert_to_shape", "(", "self", ",", "origin_x", "=", "0", ",", "origin_y", "=", "0", ")", ":", "sp", "=", "self", ".", "_add_freeform_sp", "(", "origin_x", ",", "origin_y", ")", "path", "=", "self", ".", "_start_path", "(", "sp", ")", "for", ...
46.066667
18.6
def import_image_tags(self, name, tags, repository, insecure=False): """Import image tags from specified container repository. :param name: str, name of ImageStream object :param tags: iterable, tags to be imported :param repository: str, remote location of container image in the format <registry>/<repository> :param insecure: bool, indicates whenever registry is secure :return: bool, whether tags were imported """ stream_import_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream_import.json') with open(stream_import_file) as f: stream_import = json.load(f) return self.os.import_image_tags(name, stream_import, tags, repository, insecure)
[ "def", "import_image_tags", "(", "self", ",", "name", ",", "tags", ",", "repository", ",", "insecure", "=", "False", ")", ":", "stream_import_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "os_conf", ".", "get_build_json_store", "(", ")", ...
50.411765
20.058824
def unregister_controllers(self): """Destroy controller objects and clear internal registry. This will be called after each test class. """ # TODO(xpconanfan): actually record these errors instead of just # logging them. for name, module in self._controller_modules.items(): logging.debug('Destroying %s.', name) with expects.expect_no_raises( 'Exception occurred destroying %s.' % name): module.destroy(self._controller_objects[name]) self._controller_objects = collections.OrderedDict() self._controller_modules = {}
[ "def", "unregister_controllers", "(", "self", ")", ":", "# TODO(xpconanfan): actually record these errors instead of just", "# logging them.", "for", "name", ",", "module", "in", "self", ".", "_controller_modules", ".", "items", "(", ")", ":", "logging", ".", "debug", ...
45
14.785714
def init_app(self, app, env_file=None, verbose_mode=False): """Imports .env file.""" if self.app is None: self.app = app self.verbose_mode = verbose_mode if env_file is None: env_file = os.path.join(os.getcwd(), ".env") if not os.path.exists(env_file): warnings.warn("can't read {0} - it doesn't exist".format(env_file)) else: self.__import_vars(env_file)
[ "def", "init_app", "(", "self", ",", "app", ",", "env_file", "=", "None", ",", "verbose_mode", "=", "False", ")", ":", "if", "self", ".", "app", "is", "None", ":", "self", ".", "app", "=", "app", "self", ".", "verbose_mode", "=", "verbose_mode", "if"...
36.75
14.916667
def mark_rewrite(self, *names): """Mark import names as needing to be re-written. The named module or package as well as any nested modules will be re-written on import. """ already_imported = set(names).intersection(set(sys.modules)) if already_imported: for name in already_imported: if name not in self._rewritten_names: self._warn_already_imported(name) self._must_rewrite.update(names)
[ "def", "mark_rewrite", "(", "self", ",", "*", "names", ")", ":", "already_imported", "=", "set", "(", "names", ")", ".", "intersection", "(", "set", "(", "sys", ".", "modules", ")", ")", "if", "already_imported", ":", "for", "name", "in", "already_import...
40.333333
12.833333
def get_field_list(fields, schema): """ Convert a field list spec into a real list of field names. For tables, we return only the top-level non-RECORD fields as Google charts can't handle nested data. """ # If the fields weren't supplied get them from the schema. if schema: all_fields = [f['name'] for f in schema._bq_schema if f['type'] != 'RECORD'] if isinstance(fields, list): if schema: # validate fields exist for f in fields: if f not in all_fields: raise Exception('Cannot find field %s in given schema' % f) return fields if isinstance(fields, basestring) and fields != '*': if schema: # validate fields exist for f in fields.split(','): if f not in all_fields: raise Exception('Cannot find field %s in given schema' % f) return fields.split(',') if not schema: return [] return all_fields
[ "def", "get_field_list", "(", "fields", ",", "schema", ")", ":", "# If the fields weren't supplied get them from the schema.", "if", "schema", ":", "all_fields", "=", "[", "f", "[", "'name'", "]", "for", "f", "in", "schema", ".", "_bq_schema", "if", "f", "[", ...
32.888889
19.518519
def GetUnreachableHosts(hostnames, ssh_key): """ Returns list of hosts unreachable via ssh. """ ssh_status = AreHostsReachable(hostnames, ssh_key) assert(len(hostnames) == len(ssh_status)) nonresponsive_hostnames = [host for (host, ssh_ok) in zip(hostnames, ssh_status) if not ssh_ok] return nonresponsive_hostnames
[ "def", "GetUnreachableHosts", "(", "hostnames", ",", "ssh_key", ")", ":", "ssh_status", "=", "AreHostsReachable", "(", "hostnames", ",", "ssh_key", ")", "assert", "(", "len", "(", "hostnames", ")", "==", "len", "(", "ssh_status", ")", ")", "nonresponsive_hostn...
49.714286
10.285714
def init_app(self, app, **kwargs): """ Initializes the Flask-Bouncer extension for the specified application. :param app: The application. """ self.app = app self._init_extension() self.app.before_request(self.check_implicit_rules) if kwargs.get('ensure_authorization', False): self.app.after_request(self.check_authorization)
[ "def", "init_app", "(", "self", ",", "app", ",", "*", "*", "kwargs", ")", ":", "self", ".", "app", "=", "app", "self", ".", "_init_extension", "(", ")", "self", ".", "app", ".", "before_request", "(", "self", ".", "check_implicit_rules", ")", "if", "...
29.692308
19.153846
def get_logs(self): """ print logs from pod :return: str or None """ try: api_response = self.core_api.read_namespaced_pod_log(self.name, self.namespace) logger.debug("Logs from pod: %s in namespace: %s", self.name, self.namespace) for line in api_response.split('\n'): logger.debug(line) return api_response except ApiException as e: # no reason to throw exception when logs cannot be obtain, just notify user logger.debug("Cannot get pod logs because of " "exception during calling Kubernetes API %s\n", e) return None
[ "def", "get_logs", "(", "self", ")", ":", "try", ":", "api_response", "=", "self", ".", "core_api", ".", "read_namespaced_pod_log", "(", "self", ".", "name", ",", "self", ".", "namespace", ")", "logger", ".", "debug", "(", "\"Logs from pod: %s in namespace: %s...
39.647059
21.529412
def postinit(self, exc=None, cause=None): """Do some setup after initialisation. :param exc: What is being raised. :type exc: NodeNG or None :param cause: The exception being used to raise this one. :type cause: NodeNG or None """ self.exc = exc self.cause = cause
[ "def", "postinit", "(", "self", ",", "exc", "=", "None", ",", "cause", "=", "None", ")", ":", "self", ".", "exc", "=", "exc", "self", ".", "cause", "=", "cause" ]
29.090909
13.727273
def matchmaker_matches(institute_id, case_name): """Show all MatchMaker matches for a given case""" # check that only authorized users can access MME patients matches user_obj = store.user(current_user.email) if 'mme_submitter' not in user_obj['roles']: flash('unauthorized request', 'warning') return redirect(request.referrer) # Required params for getting matches from MME server: mme_base_url = current_app.config.get('MME_URL') mme_token = current_app.config.get('MME_TOKEN') if not mme_base_url or not mme_token: flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger') return redirect(request.referrer) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) data = controllers.mme_matches(case_obj, institute_obj, mme_base_url, mme_token) if data and data.get('server_errors'): flash('MatchMaker server returned error:{}'.format(data['server_errors']), 'danger') return redirect(request.referrer) elif not data: data = { 'institute' : institute_obj, 'case' : case_obj } return data
[ "def", "matchmaker_matches", "(", "institute_id", ",", "case_name", ")", ":", "# check that only authorized users can access MME patients matches", "user_obj", "=", "store", ".", "user", "(", "current_user", ".", "email", ")", "if", "'mme_submitter'", "not", "in", "user...
48.958333
17.791667
def _is_robot(): """ Return `True` if the current visitor is a robot or spider, or `False` otherwise. This function works by comparing the request's user agent with a regular expression. The regular expression can be configured with the ``SPLIT_ROBOT_REGEX`` setting. """ robot_regex = current_app.config['SPLIT_ROBOT_REGEX'] user_agent = request.headers.get('User-Agent', '') return re.search(robot_regex, user_agent, flags=re.VERBOSE)
[ "def", "_is_robot", "(", ")", ":", "robot_regex", "=", "current_app", ".", "config", "[", "'SPLIT_ROBOT_REGEX'", "]", "user_agent", "=", "request", ".", "headers", ".", "get", "(", "'User-Agent'", ",", "''", ")", "return", "re", ".", "search", "(", "robot_...
38.916667
19.083333
def delete_db_entry(self, release): """Delete the db entries for releasefile and comment of the given release :param release: the release with the releasefile and comment db entries :type release: :class:`Release` :returns: an action status :rtype: :class:`ActionStatus` :raises: None """ log.info("Delete database entry for file.") release._releasedbentry.delete() log.info("Delete database entry for comment.") release._commentdbentry.delete() return ActionStatus(ActionStatus.SUCCESS, msg="Deleted database entries for releasefile and comment")
[ "def", "delete_db_entry", "(", "self", ",", "release", ")", ":", "log", ".", "info", "(", "\"Delete database entry for file.\"", ")", "release", ".", "_releasedbentry", ".", "delete", "(", ")", "log", ".", "info", "(", "\"Delete database entry for comment.\"", ")"...
43.866667
12.933333
def log_stats(self): """Output the stats to the LOGGER.""" if not self.stats.get('counts'): if self.consumers: LOGGER.info('Did not receive any stats data from children') return if self.poll_data['processes']: LOGGER.warning('%i process(es) did not respond with stats: %r', len(self.poll_data['processes']), self.poll_data['processes']) if self.stats['counts']['processes'] > 1: LOGGER.info('%i consumers processed %i messages with %i errors', self.stats['counts']['processes'], self.stats['counts']['processed'], self.stats['counts']['failed']) for key in self.stats['consumers'].keys(): LOGGER.info('%i %s %s processed %i messages with %i errors', self.stats['consumers'][key]['processes'], key, self.consumer_keyword(self.stats['consumers'][key]), self.stats['consumers'][key]['processed'], self.stats['consumers'][key]['failed'])
[ "def", "log_stats", "(", "self", ")", ":", "if", "not", "self", ".", "stats", ".", "get", "(", "'counts'", ")", ":", "if", "self", ".", "consumers", ":", "LOGGER", ".", "info", "(", "'Did not receive any stats data from children'", ")", "return", "if", "se...
47.958333
22.166667
def normaliseWV(wV, normFac=1.0): """ make char probs divisible by one """ f = sum(wV) / normFac return [ i/f for i in wV ]
[ "def", "normaliseWV", "(", "wV", ",", "normFac", "=", "1.0", ")", ":", "f", "=", "sum", "(", "wV", ")", "/", "normFac", "return", "[", "i", "/", "f", "for", "i", "in", "wV", "]" ]
23
6
def dn(self, fraction, n=None): r'''Computes the diameter at which a specified `fraction` of the distribution falls under. Utilizes a bounded solver to search for the desired diameter. Parameters ---------- fraction : float Fraction of the distribution which should be under the calculated diameter, [-] n : int, optional None (for the `order` specified when the distribution was created), 0 (number), 1 (length), 2 (area), 3 (volume/mass), or any integer, [-] Returns ------- d : float Particle size diameter, [m] Examples -------- >>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3) >>> psd.dn(.5) 5e-06 >>> psd.dn(1) 0.00029474365335233776 >>> psd.dn(0) 0.0 ''' if fraction == 1.0: # Avoid returning the maximum value of the search interval fraction = 1.0 - epsilon if fraction < 0: raise ValueError('Fraction must be more than 0') elif fraction == 0: # pragma: no cover if self.truncated: return self.d_min return 0.0 # Solve to float prevision limit - works well, but is there a real # point when with mpmath it would never happen? # dist.cdf(dist.dn(0)-1e-35) == 0 # dist.cdf(dist.dn(0)-1e-36) == input # dn(0) == 1.9663615597466143e-20 # def err(d): # cdf = self.cdf(d, n=n) # if cdf == 0: # cdf = -1 # return cdf # return brenth(err, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200) elif fraction > 1: raise ValueError('Fraction less than 1') # As the dn may be incredibly small, it is required for the absolute # tolerance to not be happy - it needs to continue iterating as long # as necessary to pin down the answer return brenth(lambda d:self.cdf(d, n=n) -fraction, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200)
[ "def", "dn", "(", "self", ",", "fraction", ",", "n", "=", "None", ")", ":", "if", "fraction", "==", "1.0", ":", "# Avoid returning the maximum value of the search interval", "fraction", "=", "1.0", "-", "epsilon", "if", "fraction", "<", "0", ":", "raise", "V...
37.448276
20.931034
def to_tuple(param, low=None, bias=None): """Convert input argument to min-max tuple Args: param (scalar, tuple or list of 2+ elements): Input value. If value is scalar, return value would be (offset - value, offset + value). If value is tuple, return value would be value + offset (broadcasted). low: Second element of tuple can be passed as optional argument bias: An offset factor added to each element """ if low is not None and bias is not None: raise ValueError('Arguments low and bias are mutually exclusive') if param is None: return param if isinstance(param, (int, float)): if low is None: param = - param, + param else: param = (low, param) if low < param else (param, low) elif isinstance(param, (list, tuple)): param = tuple(param) else: raise ValueError('Argument param must be either scalar (int,float) or tuple') if bias is not None: return tuple([bias + x for x in param]) return tuple(param)
[ "def", "to_tuple", "(", "param", ",", "low", "=", "None", ",", "bias", "=", "None", ")", ":", "if", "low", "is", "not", "None", "and", "bias", "is", "not", "None", ":", "raise", "ValueError", "(", "'Arguments low and bias are mutually exclusive'", ")", "if...
36.344828
21.758621
def _submit(self, client, config, osutil, request_executor, io_executor, transfer_future, bandwidth_limiter=None): """ :param client: The client associated with the transfer manager :type config: s3transfer.manager.TransferConfig :param config: The transfer config associated with the transfer manager :type osutil: s3transfer.utils.OSUtil :param osutil: The os utility associated to the transfer manager :type request_executor: s3transfer.futures.BoundedExecutor :param request_executor: The request executor associated with the transfer manager :type io_executor: s3transfer.futures.BoundedExecutor :param io_executor: The io executor associated with the transfer manager :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for :type bandwidth_limiter: s3transfer.bandwidth.BandwidthLimiter :param bandwidth_limiter: The bandwidth limiter to use when downloading streams """ if transfer_future.meta.size is None: # If a size was not provided figure out the size for the # user. response = client.head_object( Bucket=transfer_future.meta.call_args.bucket, Key=transfer_future.meta.call_args.key, **transfer_future.meta.call_args.extra_args ) transfer_future.meta.provide_transfer_size( response['ContentLength']) download_output_manager = self._get_download_output_manager_cls( transfer_future, osutil)(osutil, self._transfer_coordinator, io_executor) # If it is greater than threshold do a ranged download, otherwise # do a regular GetObject download. if transfer_future.meta.size < config.multipart_threshold: self._submit_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter) else: self._submit_ranged_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter)
[ "def", "_submit", "(", "self", ",", "client", ",", "config", ",", "osutil", ",", "request_executor", ",", "io_executor", ",", "transfer_future", ",", "bandwidth_limiter", "=", "None", ")", ":", "if", "transfer_future", ".", "meta", ".", "size", "is", "None",...
45.226415
23.264151
def write_sources_file(): """Write a sources.yaml file to current working dir.""" file_content = ( 'schemes: ' 'https://github.com/chriskempson/base16-schemes-source.git\n' 'templates: ' 'https://github.com/chriskempson/base16-templates-source.git' ) file_path = rel_to_cwd('sources.yaml') with open(file_path, 'w') as file_: file_.write(file_content)
[ "def", "write_sources_file", "(", ")", ":", "file_content", "=", "(", "'schemes: '", "'https://github.com/chriskempson/base16-schemes-source.git\\n'", "'templates: '", "'https://github.com/chriskempson/base16-templates-source.git'", ")", "file_path", "=", "rel_to_cwd", "(", "'sourc...
36.454545
16.181818
def get_new_document(self, cursor_pos=None): """ Create a `Document` instance that contains the resulting text. """ lines = [] # Original text, before cursor. if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) # Selected entries from the history. for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) # Original text, after cursor. if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) # Create `Document` with cursor at the right position. text = '\n'.join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
[ "def", "get_new_document", "(", "self", ",", "cursor_pos", "=", "None", ")", ":", "lines", "=", "[", "]", "# Original text, before cursor.", "if", "self", ".", "original_document", ".", "text_before_cursor", ":", "lines", ".", "append", "(", "self", ".", "orig...
36.782609
16.695652
def instruction_ASR_memory(self, opcode, ea, m): """ Arithmetic shift memory right """ r = self.ASR(m) # log.debug("$%x ASR memory value $%x >> 1 | Carry = $%x and write it to $%x \t| %s" % ( # self.program_counter, # m, r, ea, # self.cfg.mem_info.get_shortest(ea) # )) return ea, r & 0xff
[ "def", "instruction_ASR_memory", "(", "self", ",", "opcode", ",", "ea", ",", "m", ")", ":", "r", "=", "self", ".", "ASR", "(", "m", ")", "# log.debug(\"$%x ASR memory value $%x >> 1 | Carry = $%x and write it to $%x \\t| %s\" % (", "# self.program_counter,...
39.111111
17
def bytes2human(n, format="%(value).1f%(symbol)s"): """ >>> bytes2human(10000) '9K' >>> bytes2human(100001221) '95M' """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
[ "def", "bytes2human", "(", "n", ",", "format", "=", "\"%(value).1f%(symbol)s\"", ")", ":", "symbols", "=", "(", "'B'", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", ",", "'Y'", ")", "prefix", "=", "{", "}", "...
30.75
11
def analyze(self): """Return a list giving the parameters required by a query.""" class MockBindings(dict): def __contains__(self, key): self[key] = None return True bindings = MockBindings() used = {} ancestor = self.ancestor if isinstance(ancestor, ParameterizedThing): ancestor = ancestor.resolve(bindings, used) filters = self.filters if filters is not None: filters = filters.resolve(bindings, used) return sorted(used)
[ "def", "analyze", "(", "self", ")", ":", "class", "MockBindings", "(", "dict", ")", ":", "def", "__contains__", "(", "self", ",", "key", ")", ":", "self", "[", "key", "]", "=", "None", "return", "True", "bindings", "=", "MockBindings", "(", ")", "use...
30
14.625
def apparent_temp(temp, rh, wind): """Compute apparent temperature (real feel), using formula from http://www.bom.gov.au/info/thermal_stress/ """ if temp is None or rh is None or wind is None: return None vap_press = (float(rh) / 100.0) * 6.105 * math.exp( 17.27 * temp / (237.7 + temp)) return temp + (0.33 * vap_press) - (0.70 * wind) - 4.00
[ "def", "apparent_temp", "(", "temp", ",", "rh", ",", "wind", ")", ":", "if", "temp", "is", "None", "or", "rh", "is", "None", "or", "wind", "is", "None", ":", "return", "None", "vap_press", "=", "(", "float", "(", "rh", ")", "/", "100.0", ")", "*"...
37.5
11.9
def addClass(self, alias, klass): """ Creates a reference to C{class_def}. @param alias: C{ClassDefinition} instance. """ ref = self.class_idx self.class_ref[ref] = alias cd = self.classes[klass] = alias cd.reference = ref self.class_idx += 1 return ref
[ "def", "addClass", "(", "self", ",", "alias", ",", "klass", ")", ":", "ref", "=", "self", ".", "class_idx", "self", ".", "class_ref", "[", "ref", "]", "=", "alias", "cd", "=", "self", ".", "classes", "[", "klass", "]", "=", "alias", "cd", ".", "r...
20.1875
17.9375
def insert_before(self, parent, sibling, row=None): """insert_before(parent, sibling, row=None) :param parent: A valid :obj:`Gtk.TreeIter`, or :obj:`None` :type parent: :obj:`Gtk.TreeIter` or :obj:`None` :param sibling: A valid :obj:`Gtk.TreeIter`, or :obj:`None` :type sibling: :obj:`Gtk.TreeIter` or :obj:`None` :param row: a list of values to apply to the newly inserted row or :obj:`None` :type row: [:obj:`object`] or :obj:`None` :returns: a :obj:`Gtk.TreeIter` pointing to the new row :rtype: :obj:`Gtk.TreeIter` Inserts a new row before `sibling`. If `sibling` is :obj:`None`, then the row will be appended to `parent` 's children. If `parent` and `sibling` are :obj:`None`, then the row will be appended to the toplevel. If both `sibling` and `parent` are set, then `parent` must be the parent of `sibling`. When `sibling` is set, `parent` is optional. The returned iterator will point to this new row. The row will be empty after this function is called if `row` is :obj:`None`. To fill in values, you need to call :obj:`Gtk.TreeStore.set`\\() or :obj:`Gtk.TreeStore.set_value`\\(). If `row` isn't :obj:`None` it has to be a list of values which will be used to fill the row. """ treeiter = Gtk.TreeStore.insert_before(self, parent, sibling) if row is not None: self.set_row(treeiter, row) return treeiter
[ "def", "insert_before", "(", "self", ",", "parent", ",", "sibling", ",", "row", "=", "None", ")", ":", "treeiter", "=", "Gtk", ".", "TreeStore", ".", "insert_before", "(", "self", ",", "parent", ",", "sibling", ")", "if", "row", "is", "not", "None", ...
40.621622
26.513514
def list_releases(): """ Lists all releases published on pypi. """ response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME)) if response: data = response.json() releases_dict = data.get('releases', {}) if releases_dict: for version, release in releases_dict.items(): release_formats = [] published_on_date = None for fmt in release: release_formats.append(fmt.get('packagetype')) published_on_date = fmt.get('upload_time') release_formats = ' | '.join(release_formats) print('{:<10}{:>15}{:>25}'.format(version, published_on_date, release_formats)) else: print('No releases found for {}'.format(PYPI_PACKAGE_NAME)) else: print('Package "{}" not found on Pypi.org'.format(PYPI_PACKAGE_NAME))
[ "def", "list_releases", "(", ")", ":", "response", "=", "requests", ".", "get", "(", "PYPI_URL", ".", "format", "(", "package", "=", "PYPI_PACKAGE_NAME", ")", ")", "if", "response", ":", "data", "=", "response", ".", "json", "(", ")", "releases_dict", "=...
40.090909
22.954545
def register_code_edit(cls, code_edit_class): """ Register an additional code edit **class** .. warning: This method expect a class, not an instance! :param code_edit_class: code edit class to register. """ if not inspect.isclass(code_edit_class): raise TypeError('must be a class, not an instance.') for mimetype in code_edit_class.mimetypes: if mimetype in cls.editors: _logger().warn('editor for mimetype already registered, ' 'skipping') cls.editors[mimetype] = code_edit_class _logger().log(5, 'registered editors: %r', cls.editors)
[ "def", "register_code_edit", "(", "cls", ",", "code_edit_class", ")", ":", "if", "not", "inspect", ".", "isclass", "(", "code_edit_class", ")", ":", "raise", "TypeError", "(", "'must be a class, not an instance.'", ")", "for", "mimetype", "in", "code_edit_class", ...
41.9375
15.6875
def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): return np.nanmean(FM) return mn
[ "def", "tenengrad", "(", "img", ",", "ksize", "=", "3", ")", ":", "Gx", "=", "cv2", ".", "Sobel", "(", "img", ",", "ddepth", "=", "cv2", ".", "CV_64F", ",", "dx", "=", "1", ",", "dy", "=", "0", ",", "ksize", "=", "ksize", ")", "Gy", "=", "c...
35.111111
17.333333
def erase_hardware_breakpoint(self, dwThreadId, address): """ Erases the hardware breakpoint at the given address. @see: L{define_hardware_breakpoint}, L{has_hardware_breakpoint}, L{get_hardware_breakpoint}, L{enable_hardware_breakpoint}, L{enable_one_shot_hardware_breakpoint}, L{disable_hardware_breakpoint} @type dwThreadId: int @param dwThreadId: Thread global ID. @type address: int @param address: Memory address of breakpoint. """ bp = self.get_hardware_breakpoint(dwThreadId, address) if not bp.is_disabled(): self.disable_hardware_breakpoint(dwThreadId, address) bpSet = self.__hardwareBP[dwThreadId] bpSet.remove(bp) if not bpSet: del self.__hardwareBP[dwThreadId]
[ "def", "erase_hardware_breakpoint", "(", "self", ",", "dwThreadId", ",", "address", ")", ":", "bp", "=", "self", ".", "get_hardware_breakpoint", "(", "dwThreadId", ",", "address", ")", "if", "not", "bp", ".", "is_disabled", "(", ")", ":", "self", ".", "dis...
34.2
13.72
async def get_all_tracks(self) -> List[PlaylistTrack]: """Get all playlist tracks from the playlist. Returns ------- tracks : List[PlaylistTrack] The playlists tracks. """ if isinstance(self._tracks, PartialTracks): return await self._tracks.build() _tracks = [] offset = 0 while len(self.tracks) < self.total_tracks: data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset) _tracks += [PlaylistTrack(self.__client, item) for item in data['items']] offset += 50 self.total_tracks = len(self._tracks) return list(self._tracks)
[ "async", "def", "get_all_tracks", "(", "self", ")", "->", "List", "[", "PlaylistTrack", "]", ":", "if", "isinstance", "(", "self", ".", "_tracks", ",", "PartialTracks", ")", ":", "return", "await", "self", ".", "_tracks", ".", "build", "(", ")", "_tracks...
33.380952
21.380952
def layer(self, layer_name): """Get QGIS layer. :param layer_name: The name of the layer to fetch. :type layer_name: str :return: The QGIS layer. :rtype: QgsMapLayer .. versionadded:: 4.0 """ uri = self.layer_uri(layer_name) layer = QgsVectorLayer(uri, layer_name, 'ogr') if not layer.isValid(): layer = QgsRasterLayer(uri, layer_name) if not layer.isValid(): return False monkey_patch_keywords(layer) return layer
[ "def", "layer", "(", "self", ",", "layer_name", ")", ":", "uri", "=", "self", ".", "layer_uri", "(", "layer_name", ")", "layer", "=", "QgsVectorLayer", "(", "uri", ",", "layer_name", ",", "'ogr'", ")", "if", "not", "layer", ".", "isValid", "(", ")", ...
25.47619
16.571429
def get_default_config(self): """ Returns the default collector settings """ config = super(OpenVPNCollector, self).get_default_config() config.update({ 'path': 'openvpn', 'instances': 'file:///var/log/openvpn/status.log', 'timeout': '10', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "OpenVPNCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'openvpn'", ",", "'instances'", ":", "'file:///var/...
31.363636
13.363636
def get_available_networks(self, **kwargs): """ Retrieves the list of Ethernet networks, Fiber Channel networks, and network sets that are available to a server profile, along with their respective ports. Args: enclosureGroupUri (str): The URI of the enclosure group associated with the resource. functionType (str): The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned. serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource. serverHardwareUri (str): The URI of the server hardware associated with the resource. view (str): Returns a specific subset of the attributes of the resource or collection, by specifying the name of a predefined view. The default view is expand (show all attributes of the resource and all elements of collections of resources). Values: Ethernet Specifies that the connection is to an Ethernet network or a network set. FibreChannel Specifies that the connection is to a Fibre Channel network. profileUri (str): If the URI of the server profile is provided the list of available networks will include only networks that share a scope with the server profile. scopeUris (str): An expression to restrict the resources returned according to the scopes to which they are assigned Returns: list: Available networks. """ uri = self._helper.build_uri_with_query_string(kwargs, '/available-networks') return self._helper.do_get(uri)
[ "def", "get_available_networks", "(", "self", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_helper", ".", "build_uri_with_query_string", "(", "kwargs", ",", "'/available-networks'", ")", "return", "self", ".", "_helper", ".", "do_get", "(", "...
58.896552
35.724138
def _configure_users(self, site=None, full=0, only_data=0): """ Installs and configures RabbitMQ. """ site = site or ALL full = int(full) if full and not only_data: packager = self.get_satchel('packager') packager.install_required(type=SYSTEM, service=self.name) r = self.local_renderer params = self.get_user_vhosts(site=site) # [(user, password, vhost)] with settings(warn_only=True): self.add_admin_user() params = sorted(list(params)) if not only_data: for user, password, vhost in params: r.env.broker_user = user r.env.broker_password = password r.env.broker_vhost = vhost with settings(warn_only=True): r.sudo('rabbitmqctl add_user {broker_user} {broker_password}') r.sudo('rabbitmqctl add_vhost {broker_vhost}') r.sudo('rabbitmqctl set_permissions -p {broker_vhost} {broker_user} ".*" ".*" ".*"') r.sudo('rabbitmqctl set_permissions -p {broker_vhost} {admin_username} ".*" ".*" ".*"') return params
[ "def", "_configure_users", "(", "self", ",", "site", "=", "None", ",", "full", "=", "0", ",", "only_data", "=", "0", ")", ":", "site", "=", "site", "or", "ALL", "full", "=", "int", "(", "full", ")", "if", "full", "and", "not", "only_data", ":", "...
35.454545
22.121212
def throw_random_intervals( lengths, regions, save_interval_func=None, allow_overlap=False ): """ Generates a set of non-overlapping random intervals from a length distribution. `lengths`: list containing the length of each interval to be generated. We expect this to be sorted by decreasing length to minimize the chance of failure (MaxtriesException) and for some performance gains when allow_overlap==True and there are duplicate lengths. `regions`: A list of regions in which intervals can be placed. Elements are tuples or lists of the form (start, end, ...), where ... indicates any number of items (including zero). `save_interval_func`: A function accepting three arguments which will be passed the (start,stop,region) for each generated interval, where region is an entry in the regions list. If this is None, the generated intervals will be returned as a list of elements copied from the region with start and end modified. """ # Copy regions regions = [( x[1]-x[0], x[0], x ) for x in regions] # Sort (long regions first) regions.sort() regions.reverse() # Throw if (save_interval_func != None): throw_random_private( lengths, regions, save_interval_func, allow_overlap ) return else: intervals = [] save_interval_func = lambda s, e, rgn: intervals.append( overwrite_start_end ( s, e, rgn ) ) throw_random_private( lengths, regions, save_interval_func, allow_overlap ) return intervals
[ "def", "throw_random_intervals", "(", "lengths", ",", "regions", ",", "save_interval_func", "=", "None", ",", "allow_overlap", "=", "False", ")", ":", "# Copy regions", "regions", "=", "[", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ",", "x", "[...
50.147059
27.264706
def from_filename(file_name): """ Look up the BioPython file type corresponding to an input file name. """ base, extension = os.path.splitext(file_name) if extension in COMPRESS_EXT: # Compressed file extension = os.path.splitext(base)[1] return from_extension(extension)
[ "def", "from_filename", "(", "file_name", ")", ":", "base", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "if", "extension", "in", "COMPRESS_EXT", ":", "# Compressed file", "extension", "=", "os", ".", "path", ".", "spli...
33.666667
9.222222
def http_post(url, data=None, opt=opt_default): """ Shortcut for urlopen (POST) + read. We'll probably want to add a nice timeout here later too. """ return _http_request(url, method='POST', data=_marshalled(data), opt=opt)
[ "def", "http_post", "(", "url", ",", "data", "=", "None", ",", "opt", "=", "opt_default", ")", ":", "return", "_http_request", "(", "url", ",", "method", "=", "'POST'", ",", "data", "=", "_marshalled", "(", "data", ")", ",", "opt", "=", "opt", ")" ]
39.666667
13.333333
def e161(number, alphabet=PHONE_E161_ALPHABET): ''' Printable a 26 Latin letters (A to Z) phone number to the 12-key telephone keypad number. :param number: string :param alphabet: dict >>> print(e161('0800-PIZZA123')) 080074992123 >>> e161('0800^PIZZA123') Traceback (most recent call last): ... ValueError: Character "^" (0x5e) is not in the E.161 alphabet ''' digits = [] for char in strip(number, '+-. ()').lower(): length = len(digits) for group, digit in alphabet.items(): if char in group: digits.append(digit) break if len(digits) == length: raise ValueError( _('Character "%s" (0x%02x) is not in the E.161 alphabet') % (char, ord(char)) ) return u''.join(digits)
[ "def", "e161", "(", "number", ",", "alphabet", "=", "PHONE_E161_ALPHABET", ")", ":", "digits", "=", "[", "]", "for", "char", "in", "strip", "(", "number", ",", "'+-. ()'", ")", ".", "lower", "(", ")", ":", "length", "=", "len", "(", "digits", ")", ...
26.903226
20.709677
def run(self): """Starts the blotter Connects to the TWS/GW, processes and logs market data, and broadcast it over TCP via ZeroMQ (which algo subscribe to) """ self._check_unique_blotter() # connect to mysql self.mysql_connect() self.context = zmq.Context(zmq.REP) self.socket = self.context.socket(zmq.PUB) self.socket.bind("tcp://*:" + str(self.args['zmqport'])) db_modified = 0 contracts = [] prev_contracts = [] first_run = True self.log_blotter.info("Connecting to Interactive Brokers...") self.ibConn = ezIBpy() self.ibConn.ibCallback = self.ibCallback while not self.ibConn.connected: self.ibConn.connect(clientId=int(self.args['ibclient']), port=int(self.args['ibport']), host=str(self.args['ibserver'])) time.sleep(1) if not self.ibConn.connected: print('*', end="", flush=True) self.log_blotter.info("Connection established...") try: while True: if not os.path.exists(self.args['symbols']): pd.DataFrame(columns=['symbol', 'sec_type', 'exchange', 'currency', 'expiry', 'strike', 'opt_type'] ).to_csv(self.args['symbols'], header=True, index=False) tools.chmod(self.args['symbols']) else: time.sleep(0.1) # read db properties db_data = os.stat(self.args['symbols']) db_size = db_data.st_size db_last_modified = db_data.st_mtime # empty file if db_size == 0: if prev_contracts: self.log_blotter.info('Cancel market data...') self.ibConn.cancelMarketData() time.sleep(0.1) prev_contracts = [] continue # modified? if not first_run and db_last_modified == db_modified: continue # continue... db_modified = db_last_modified # read contructs db df = pd.read_csv(self.args['symbols'], header=0) if df.empty: continue # removed expired df = df[( (df['expiry'] < 1000000) & ( df['expiry'] >= int(datetime.now().strftime('%Y%m')))) | ( (df['expiry'] >= 1000000) & ( df['expiry'] >= int(datetime.now().strftime('%Y%m%d')))) | np_isnan(df['expiry']) ] # fix expiry formatting (no floats) df['expiry'] = df['expiry'].fillna( 0).astype(int).astype(str) df.loc[df['expiry'] == "0", 'expiry'] = "" df = df[df['sec_type'] != 'BAG'] df.fillna("", inplace=True) df.to_csv(self.args['symbols'], header=True, index=False) tools.chmod(self.args['symbols']) # ignore commentee df = df[~df['symbol'].str.contains("#")] contracts = [tuple(x) for x in df.values] if first_run: first_run = False else: if contracts != prev_contracts: # cancel market data for removed contracts for contract in prev_contracts: if contract not in contracts: self.ibConn.cancelMarketData( self.ibConn.createContract(contract)) if self.args['orderbook']: self.ibConn.cancelMarketDepth( self.ibConn.createContract(contract)) time.sleep(0.1) contract_string = self.ibConn.contractString( contract).split('_')[0] self.log_blotter.info( 'Contract Removed [%s]', contract_string) # request market data for contract in contracts: if contract not in prev_contracts: self.ibConn.requestMarketData( self.ibConn.createContract(contract)) if self.args['orderbook']: self.ibConn.requestMarketDepth( self.ibConn.createContract(contract)) time.sleep(0.1) contract_string = self.ibConn.contractString( contract).split('_')[0] self.log_blotter.info( 'Contract Added [%s]', contract_string) # update latest contracts prev_contracts = contracts time.sleep(2) except (KeyboardInterrupt, SystemExit): self.quitting = True # don't display connection errors on ctrl+c print( "\n\n>>> Interrupted with Ctrl-c...\n(waiting for running tasks to be completed)\n") # asynctools.multitasking.killall() # stop now asynctools.multitasking.wait_for_tasks() # wait for threads to complete sys.exit(1)
[ "def", "run", "(", "self", ")", ":", "self", ".", "_check_unique_blotter", "(", ")", "# connect to mysql", "self", ".", "mysql_connect", "(", ")", "self", ".", "context", "=", "zmq", ".", "Context", "(", "zmq", ".", "REP", ")", "self", ".", "socket", "...
42.15942
21.782609
def _save_group(self, group_id, result): """Store the result of an executed group.""" self.TaskSetModel._default_manager.store_result(group_id, result) return result
[ "def", "_save_group", "(", "self", ",", "group_id", ",", "result", ")", ":", "self", ".", "TaskSetModel", ".", "_default_manager", ".", "store_result", "(", "group_id", ",", "result", ")", "return", "result" ]
46.5
13
def where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
[ "def", "where_cross", "(", "data", ",", "threshold", ")", ":", "Is", "=", "np", ".", "where", "(", "data", ">", "threshold", ")", "[", "0", "]", "Is", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "Is", ")", ")", "Ds", "=", "Is...
37.833333
8
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data["properties"] ip_configurations = [] for raw_content in properties.get("ipConfigurations", []): resource = Resource.from_raw_data(raw_content) ip_configurations.append(resource) properties["ipConfigurations"] = ip_configurations subnetworks = [] for raw_subnet in properties.get("subnets", []): subnetworks.append(Resource.from_raw_data(raw_subnet)) properties["subnets"] = subnetworks acl_rules = [] for raw_rule in properties.get("aclRules", []): raw_rule["parentResourceID"] = raw_data["resourceId"] acl_rules.append(ACLRules.from_raw_data(raw_rule)) properties["aclRules"] = acl_rules return super(AccessControlLists, cls).process_raw_data(raw_data)
[ "def", "process_raw_data", "(", "cls", ",", "raw_data", ")", ":", "properties", "=", "raw_data", "[", "\"properties\"", "]", "ip_configurations", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"ipConfigurations\"", ",", "[", "]", ...
40.909091
19.090909
def quote_single_identifier(self, string): """ Quotes a single identifier (no dot chain separation). :param string: The identifier name to be quoted. :type string: str :return: The quoted identifier string. :rtype: str """ c = self.get_identifier_quote_character() return "%s%s%s" % (c, string.replace(c, c + c), c)
[ "def", "quote_single_identifier", "(", "self", ",", "string", ")", ":", "c", "=", "self", ".", "get_identifier_quote_character", "(", ")", "return", "\"%s%s%s\"", "%", "(", "c", ",", "string", ".", "replace", "(", "c", ",", "c", "+", "c", ")", ",", "c"...
29.076923
17.538462
def parse_dom(self): """Parse xml information in :attr:`data` and return. Returns: dict: Parsed xml data. An empty dictionary when content is blank. """ if self.dom is None: return {} else: domain = {} for child in self.dom: if len(child) == 0: domain[child.tag] = child.text else: domain[child.tag] = [c.items() for c in child] return domain
[ "def", "parse_dom", "(", "self", ")", ":", "if", "self", ".", "dom", "is", "None", ":", "return", "{", "}", "else", ":", "domain", "=", "{", "}", "for", "child", "in", "self", ".", "dom", ":", "if", "len", "(", "child", ")", "==", "0", ":", "...
29.411765
18.647059
def DeleteOldCronJobRuns(self, cutoff_timestamp, cursor=None): """Deletes cron job runs that are older then the given timestamp.""" query = "DELETE FROM cron_job_runs WHERE write_time < FROM_UNIXTIME(%s)" cursor.execute(query, [mysql_utils.RDFDatetimeToTimestamp(cutoff_timestamp)])
[ "def", "DeleteOldCronJobRuns", "(", "self", ",", "cutoff_timestamp", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"DELETE FROM cron_job_runs WHERE write_time < FROM_UNIXTIME(%s)\"", "cursor", ".", "execute", "(", "query", ",", "[", "mysql_utils", ".", "RDFDat...
61.8
21.4
def get_shared_files_from_shake(self, shake_id=None, before=None, after=None): """ Returns a list of SharedFile objects from a particular shake. Args: shake_id (int): Shake from which to get a list of SharedFiles before (str): get 10 SharedFile objects before (but not including) the SharedFile given by `before` for the given Shake. after (str): get 10 SharedFile objects after (but not including) the SharedFile give by `after' for the given Shake. Returns: List (list) of SharedFiles. """ if before and after: raise Exception("You cannot specify both before and after keys") endpoint = '/api/shakes' if shake_id: endpoint += '/{0}'.format(shake_id) if before: endpoint += '/before/{0}'.format(before) elif after: endpoint += '/after/{0}'.format(after) data = self._make_request(verb="GET", endpoint=endpoint) return [SharedFile.NewFromJSON(f) for f in data['sharedfiles']]
[ "def", "get_shared_files_from_shake", "(", "self", ",", "shake_id", "=", "None", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "if", "before", "and", "after", ":", "raise", "Exception", "(", "\"You cannot specify both before and after keys\"", ...
36.9375
22.375
def save(self, block_id, consensus_hash, ops_hash, accepted_ops, virtualchain_ops_hints, backup=False): """ Write out all state to the working directory. Calls the implementation's 'db_save' method to store any state for this block. Calls the implementation's 'db_continue' method at the very end, to signal to the implementation that all virtualchain state has been saved. This method can return False, in which case, indexing stops Return True on success Return False if the implementation wants to exit. Aborts on fatal error """ assert self.setup, "Not set up yet. Call .db_setup() first!" assert len(accepted_ops) == len(virtualchain_ops_hints) if self.read_only: log.error("FATAL: StateEngine is read only") traceback.print_stack() os.abort() if block_id < self.lastblock: log.error("FATAL: Already processed up to block {} (got {})".format(self.lastblock, block_id)) traceback.print_stack() os.abort() # ask the implementation to save if hasattr(self.impl, 'db_save'): rc = False try: rc = self.impl.db_save(block_id, consensus_hash, ops_hash, accepted_ops, virtualchain_ops_hints, db_state=self.state) except Exception as e: log.exception(e) rc = False if not rc: log.error("FATAL: Implementation failed to save state at block {}".format(block_id)) traceback.print_stack() os.abort() # save new chainstate self.lastblock = block_id # start a transaction to store the new data db_con = self.db_open(self.impl, self.working_dir) cur = db_con.cursor() self.db_query_execute(cur, "BEGIN", (), verbose=False) # add chainstate for i, (accepted_op, virtualchain_op_hints) in enumerate(zip(accepted_ops, virtualchain_ops_hints)): # unpack virtualchain hints senders = virtualchain_op_hints['virtualchain_senders'] data_hex = virtualchain_op_hints['virtualchain_data_hex'] tx_hex = virtualchain_op_hints['virtualchain_txhex'] txid = virtualchain_op_hints['virtualchain_txid'] fee = virtualchain_op_hints['virtualchain_fee'] opcode = virtualchain_op_hints['virtualchain_opcode'] txindex = virtualchain_op_hints['virtualchain_txindex'] vtxindex = i merkle_path = virtualchain_op_hints['virtualchain_tx_merkle_path'] vtx_data = { 'txid': txid, 'senders': simplejson.dumps(senders), 'data_hex': data_hex, 'tx_hex': tx_hex, 'tx_merkle_path': merkle_path, 'fee': fee, 'opcode': opcode, 'txindex': txindex, 'vtxindex': vtxindex, 'block_id': block_id } self.db_chainstate_append(cur, **vtx_data) # update snapshot info self.db_snapshot_append(cur, block_id, consensus_hash, ops_hash, int(time.time())) self.db_query_execute(cur, "END", (), verbose=False) db_con.close() # make new backups and clear old ones self.make_backups(block_id) self.clear_old_backups(block_id) # ask the implementation if we should continue continue_indexing = True if hasattr(self.impl, "db_continue"): try: continue_indexing = self.impl.db_continue( block_id, consensus_hash ) except Exception, e: log.exception(e) traceback.print_stack() log.error("FATAL: implementation failed db_continue") os.abort() return continue_indexing
[ "def", "save", "(", "self", ",", "block_id", ",", "consensus_hash", ",", "ops_hash", ",", "accepted_ops", ",", "virtualchain_ops_hints", ",", "backup", "=", "False", ")", ":", "assert", "self", ".", "setup", ",", "\"Not set up yet. Call .db_setup() first!\"", "as...
39.704082
21.357143
def get_history(self): """Return history parameters carried by the stanza. :returntype: `HistoryParameters`""" for child in xml_element_iter(self.xmlnode.children): if get_node_ns_uri(child) == MUC_NS and child.name == "history": maxchars = from_utf8(child.prop("maxchars")) if maxchars is not None: maxchars = int(maxchars) maxstanzas = from_utf8(child.prop("maxstanzas")) if maxstanzas is not None: maxstanzas = int(maxstanzas) maxseconds = from_utf8(child.prop("maxseconds")) if maxseconds is not None: maxseconds = int(maxseconds) # TODO: since -- requires parsing of Jabber dateTime profile since = None return HistoryParameters(maxchars, maxstanzas, maxseconds, since)
[ "def", "get_history", "(", "self", ")", ":", "for", "child", "in", "xml_element_iter", "(", "self", ".", "xmlnode", ".", "children", ")", ":", "if", "get_node_ns_uri", "(", "child", ")", "==", "MUC_NS", "and", "child", ".", "name", "==", "\"history\"", "...
49.888889
16.444444
def _insert_breaklines(self): """Inserts a breakline instead of a trailing space if the chunk is in CJK. """ target_chunks = ChunkList() for chunk in self: if chunk.word[-1] == ' ' and chunk.has_cjk(): chunk.word = chunk.word[:-1] target_chunks.append(chunk) target_chunks.append(chunk.breakline()) else: target_chunks.append(chunk) self.list = target_chunks
[ "def", "_insert_breaklines", "(", "self", ")", ":", "target_chunks", "=", "ChunkList", "(", ")", "for", "chunk", "in", "self", ":", "if", "chunk", ".", "word", "[", "-", "1", "]", "==", "' '", "and", "chunk", ".", "has_cjk", "(", ")", ":", "chunk", ...
34.25
9.166667
def _read_text(self, filename): """ Helper that reads the UTF-8 content of the specified file, or None if the file doesn't exist. This returns a unicode string. """ with io.open(filename, 'rt', encoding='utf-8') as f: return f.read()
[ "def", "_read_text", "(", "self", ",", "filename", ")", ":", "with", "io", ".", "open", "(", "filename", ",", "'rt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
39.857143
14.428571
def restrict_bond_dict(self, bond_dict): """Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary """ return {j: bond_dict[j] & set(self.index) for j in self.index}
[ "def", "restrict_bond_dict", "(", "self", ",", "bond_dict", ")", ":", "return", "{", "j", ":", "bond_dict", "[", "j", "]", "&", "set", "(", "self", ".", "index", ")", "for", "j", "in", "self", ".", "index", "}" ]
31.818182
20.090909
def config(): ''' Display fault manager configuration CLI Example: .. code-block:: bash salt '*' fmadm.config ''' ret = {} fmadm = _check_fmadm() cmd = '{cmd} config'.format( cmd=fmadm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = {} if retcode != 0: result['Error'] = 'error executing fmadm config' else: result = _parse_fmadm_config(res['stdout']) return result
[ "def", "config", "(", ")", ":", "ret", "=", "{", "}", "fmadm", "=", "_check_fmadm", "(", ")", "cmd", "=", "'{cmd} config'", ".", "format", "(", "cmd", "=", "fmadm", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "retcode", ...
19.208333
23.041667
def create_group(self, title, parent=None, icon=1, expires=None): """ This method creates a new group. A group title is needed or no group will be created. If a parent is given, the group will be created as a sub-group. title must be a string, image an unsigned int >0 and parent a Group. :return: The newly created group. :rtype: :class:`keepassdb.model.Group` """ if parent and not isinstance(parent, Group): raise TypeError("Parent must be of type Group") if expires is None: expires = const.NEVER if self.groups: group_id = max([g.id for g in self.groups]) + 1 else: group_id = 1 group = Group(id=group_id, title=title, icon=icon, db=self, created=util.now(), modified=util.now(), accessed=util.now(), expires=expires) # If no parent is given, just append the new group at the end if parent is None: group.parent = self.root self.root.children.append(group) group.level = 0 self.groups.append(group) # Else insert the group behind the parent else: if parent not in self.groups: raise ValueError("Group doesn't exist / is not bound to this database.") parent.children.append(group) group.parent = parent group.level = parent.level + 1 self.groups.insert(self.groups.index(parent) + 1, group) return group
[ "def", "create_group", "(", "self", ",", "title", ",", "parent", "=", "None", ",", "icon", "=", "1", ",", "expires", "=", "None", ")", ":", "if", "parent", "and", "not", "isinstance", "(", "parent", ",", "Group", ")", ":", "raise", "TypeError", "(", ...
35.6
19.2
def print_maps(map_type=None, number=None): """ Print maps by type and/or number of defined colors. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'}, optional Filter output by map type. By default all maps are printed. number : int, optional Filter output by number of defined colors. By default there is no numeric filtering. """ if not map_type and not number: print_all_maps() elif map_type: print_maps_by_type(map_type, number) else: s = ('Invalid parameter combination. ' 'number without map_type is not supported.') raise ValueError(s)
[ "def", "print_maps", "(", "map_type", "=", "None", ",", "number", "=", "None", ")", ":", "if", "not", "map_type", "and", "not", "number", ":", "print_all_maps", "(", ")", "elif", "map_type", ":", "print_maps_by_type", "(", "map_type", ",", "number", ")", ...
28.652174
19.695652
def token(self): '''Attempt to return the auth header token. :return: token related to request ''' auth_header = self.headers.get('Authorization', '') if 'Token ' in auth_header: return auth_header.partition('Token ')[-1] else: return auth_header
[ "def", "token", "(", "self", ")", ":", "auth_header", "=", "self", ".", "headers", ".", "get", "(", "'Authorization'", ",", "''", ")", "if", "'Token '", "in", "auth_header", ":", "return", "auth_header", ".", "partition", "(", "'Token '", ")", "[", "-", ...
31
18
def clean_list_of_twitter_list(list_of_twitter_lists, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extracts the sets of keywords for each Twitter list. Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format. - list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords. """ list_of_keyword_sets = list() append_keyword_set = list_of_keyword_sets.append list_of_lemma_to_keywordbags = list() append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append if list_of_twitter_lists is not None: for twitter_list in list_of_twitter_lists: if twitter_list is not None: keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) append_keyword_set(keyword_set) append_lemma_to_keywordbag(lemma_to_keywordbag) return list_of_keyword_sets, list_of_lemma_to_keywordbags
[ "def", "clean_list_of_twitter_list", "(", "list_of_twitter_lists", ",", "sent_tokenize", ",", "_treebank_word_tokenize", ",", "tagger", ",", "lemmatizer", ",", "lemmatize", ",", "stopset", ",", "first_cap_re", ",", "all_cap_re", ",", "digits_punctuation_whitespace_re", ",...
56.46875
31.46875
def read(self): """ Read a given file path and return its content. :return: The content of the given file path. :rtype: str """ try: with open(self.file, "r", encoding="utf-8") as file: # We open and read a file. # We get the file content. funilrys = file.read() except UnicodeDecodeError: # pragma: no cover with open(self.file, "r") as file: # We open and read a file. # We get the file content. funilrys = file.read() # We return the file content. return funilrys
[ "def", "read", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "file", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file", ":", "# We open and read a file.", "# We get the file content.", "funilrys", "=", "file", ".", "...
27.913043
16.347826
def get_absorbing_atom_symbol_index(absorbing_atom, structure): """ Return the absorbing atom symboll and site index in the given structure. Args: absorbing_atom (str/int): symbol or site index structure (Structure) Returns: str, int: symbol and site index """ if isinstance(absorbing_atom, str): return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0] elif isinstance(absorbing_atom, int): return str(structure[absorbing_atom].specie), absorbing_atom else: raise ValueError("absorbing_atom must be either specie symbol or site index")
[ "def", "get_absorbing_atom_symbol_index", "(", "absorbing_atom", ",", "structure", ")", ":", "if", "isinstance", "(", "absorbing_atom", ",", "str", ")", ":", "return", "absorbing_atom", ",", "structure", ".", "indices_from_symbol", "(", "absorbing_atom", ")", "[", ...
36.294118
21.705882
def dump(self): """Dump the cache (for debugging)""" for key in self._store.keys(): print(key, ':', self.get(key))
[ "def", "dump", "(", "self", ")", ":", "for", "key", "in", "self", ".", "_store", ".", "keys", "(", ")", ":", "print", "(", "key", ",", "':'", ",", "self", ".", "get", "(", "key", ")", ")" ]
34.75
7.25
def get_app_n_model(settings_entry_name): """Returns tuple with application and tree[item] model class names. :param str|unicode settings_entry_name: :rtype: tuple """ try: app_name, model_name = getattr(settings, settings_entry_name).split('.') except ValueError: raise ImproperlyConfigured( '`SITETREE_%s` must have the following format: `app_name.model_name`.' % settings_entry_name) return app_name, model_name
[ "def", "get_app_n_model", "(", "settings_entry_name", ")", ":", "try", ":", "app_name", ",", "model_name", "=", "getattr", "(", "settings", ",", "settings_entry_name", ")", ".", "split", "(", "'.'", ")", "except", "ValueError", ":", "raise", "ImproperlyConfigure...
38.333333
19.666667
def private_encrypt(key, message): ''' Generate an M2Crypto-compatible signature :param Crypto.PublicKey.RSA._RSAobj key: The RSA key object :param str message: The message to sign :rtype: str :return: The signature, or an empty string if the signature operation failed ''' if HAS_M2: return key.private_encrypt(message, salt.utils.rsax931.RSA_X931_PADDING) else: signer = salt.utils.rsax931.RSAX931Signer(key.exportKey('PEM')) return signer.sign(message)
[ "def", "private_encrypt", "(", "key", ",", "message", ")", ":", "if", "HAS_M2", ":", "return", "key", ".", "private_encrypt", "(", "message", ",", "salt", ".", "utils", ".", "rsax931", ".", "RSA_X931_PADDING", ")", "else", ":", "signer", "=", "salt", "."...
35.928571
24.357143
def p_file_chk_sum(self, f_term, predicate): """Sets file checksum. Assumes SHA1 algorithm without checking.""" try: for _s, _p, checksum in self.graph.triples((f_term, predicate, None)): for _, _, value in self.graph.triples((checksum, self.spdx_namespace['checksumValue'], None)): self.builder.set_file_chksum(self.doc, six.text_type(value)) except CardinalityError: self.more_than_one_error('File checksum')
[ "def", "p_file_chk_sum", "(", "self", ",", "f_term", ",", "predicate", ")", ":", "try", ":", "for", "_s", ",", "_p", ",", "checksum", "in", "self", ".", "graph", ".", "triples", "(", "(", "f_term", ",", "predicate", ",", "None", ")", ")", ":", "for...
60.875
25.625
def update(self, id): """PUT /layertemplates/id: Update an existing item.""" # url('LayerTemplates', id=ID) lt = meta.Session.query(LayerTemplate).get(id) # use following query for getting a layertemplate owned by current user #lt = self._get_lt_from_user_by_id(c.user, id) if lt is None: abort(404) content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH'])) content = content.decode('utf8') content = simplejson.loads(content) lt.name = content['name'] lt.comment = content['comment'] lt.json = content['json'] meta.Session.commit() response.status = 201
[ "def", "update", "(", "self", ",", "id", ")", ":", "# url('LayerTemplates', id=ID)", "lt", "=", "meta", ".", "Session", ".", "query", "(", "LayerTemplate", ")", ".", "get", "(", "id", ")", "# use following query for getting a layertemplate owned by current user", "#...
43.625
14
def factor_cmap(field_name, palette, factors, start=0, end=None, nan_color="gray"): ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") Returns: dict ''' return field(field_name, CategoricalColorMapper(palette=palette, factors=factors, start=start, end=end, nan_color=nan_color))
[ "def", "factor_cmap", "(", "field_name", ",", "palette", ",", "factors", ",", "start", "=", "0", ",", "end", "=", "None", ",", "nan_color", "=", "\"gray\"", ")", ":", "return", "field", "(", "field_name", ",", "CategoricalColorMapper", "(", "palette", "=",...
40.451613
31.806452
def train_batch(self, batch_info: BatchInfo): """ Single, most atomic 'step' of learning this reinforcer can perform """ batch_info['sub_batch_data'] = [] self.on_policy_train_batch(batch_info) if self.settings.experience_replay > 0 and self.env_roller.is_ready_for_sampling(): if self.settings.stochastic_experience_replay: experience_replay_count = np.random.poisson(self.settings.experience_replay) else: experience_replay_count = self.settings.experience_replay for i in range(experience_replay_count): self.off_policy_train_batch(batch_info) # Even with all the experience replay, we count the single rollout as a single batch batch_info.aggregate_key('sub_batch_data')
[ "def", "train_batch", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "batch_info", "[", "'sub_batch_data'", "]", "=", "[", "]", "self", ".", "on_policy_train_batch", "(", "batch_info", ")", "if", "self", ".", "settings", ".", "experience_replay", ...
46.705882
25.764706
def make_legacy_input_and_empty_witness(outpoint, stack_script, redeem_script, sequence): ''' Outpoint, byte-like, byte-like, int -> (TxIn, InputWitness) ''' return (make_legacy_input(outpoint=outpoint, stack_script=stack_script, redeem_script=redeem_script, sequence=sequence), make_empty_witness())
[ "def", "make_legacy_input_and_empty_witness", "(", "outpoint", ",", "stack_script", ",", "redeem_script", ",", "sequence", ")", ":", "return", "(", "make_legacy_input", "(", "outpoint", "=", "outpoint", ",", "stack_script", "=", "stack_script", ",", "redeem_script", ...
44.9
19.5
def estimate_hz(self,R,z,dz=10.**-8.,**kwargs): """ NAME: estimate_hz PURPOSE: estimate the exponential scale height at R INPUT: R - Galactocentric radius (can be Quantity) dz - z range to use (can be Quantity) density kwargs OUTPUT: estimated hz HISTORY: 2012-08-30 - Written - Bovy (IAS) 2013-01-28 - Re-written - Bovy """ if z == 0.: zs= [z,z+dz] else: zs= [z-dz/2.,z+dz/2.] sf= numpy.array([self.density(R,zz,use_physical=False, **kwargs) for zz in zs]) lsf= numpy.log(sf) return -dz/(lsf[1]-lsf[0])
[ "def", "estimate_hz", "(", "self", ",", "R", ",", "z", ",", "dz", "=", "10.", "**", "-", "8.", ",", "*", "*", "kwargs", ")", ":", "if", "z", "==", "0.", ":", "zs", "=", "[", "z", ",", "z", "+", "dz", "]", "else", ":", "zs", "=", "[", "z...
19.621622
23.72973
def save_driver_script(driver, script_save=None): # noqa: E501 """Save a script Save a script # noqa: E501 :param driver: The driver to use for the request. ie. github :type driver: str :param script_save: The data needed to save this script :type script_save: dict | bytes :rtype: Response """ if connexion.request.is_json: script_save = ScriptSave.from_dict(connexion.request.get_json()) # noqa: E501 response = errorIfUnauthorized(role='developer') if response: return response else: response = ApitaxResponse() driver: Driver = LoadedDrivers.getDriver(driver) driver.saveDriverScript(script_save.script.name, script_save.script.content) return Response(status=200, body=response.getResponseBody())
[ "def", "save_driver_script", "(", "driver", ",", "script_save", "=", "None", ")", ":", "# noqa: E501", "if", "connexion", ".", "request", ".", "is_json", ":", "script_save", "=", "ScriptSave", ".", "from_dict", "(", "connexion", ".", "request", ".", "get_json"...
30.76
23.12
def replicate_methods(srcObj, dstObj): """Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object). @param srcObj: source object @param dstObj: destination object of the same type. @return : none Implementer notes: 1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class. 2. An example of what a 'key' and 'value' look like: key: MakeSequential value: <bound method IOpticalSystem.MakeSequential of <win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>> """ # prevent methods that we intend to specialize from being mapped. The specialized # (overridden) methods are methods with the same name as the corresponding method in # the source ZOS API COM object written for each ZOS API COM object in an associated # python script such as i_analyses_methods.py for I_Analyses overridden_methods = get_callable_method_dict(type(dstObj)).keys() #overridden_attrs = [each for each in type(dstObj).__dict__.keys() if not each.startswith('_')] # def zos_wrapper_deco(func): def wrapper(*args, **kwargs): return wrapped_zos_object(func(*args, **kwargs)) varnames = func.im_func.func_code.co_varnames # alternative is to use inspect.getargspec params = [par for par in varnames if par not in ('self', 'ret')] # removes 'self' and 'ret' wrapper.__doc__ = func.im_func.func_name + '(' + ', '.join(params) + ')' return wrapper # for key, value in get_callable_method_dict(srcObj).items(): if key not in overridden_methods: setattr(dstObj, key, zos_wrapper_deco(value))
[ "def", "replicate_methods", "(", "srcObj", ",", "dstObj", ")", ":", "# prevent methods that we intend to specialize from being mapped. The specialized ", "# (overridden) methods are methods with the same name as the corresponding method in ", "# the source ZOS API COM object written for each ZOS...
52.529412
28.029412
async def handle_websocket_exception(self, error: Exception) -> Optional[Response]: """Handle an uncaught exception. By default this logs the exception and then re-raises it. """ await got_websocket_exception.send(self, exception=error) self.log_exception(sys.exc_info()) internal_server_error = all_http_exceptions[500]() handler = self._find_exception_handler(internal_server_error) if handler is None: return internal_server_error.get_response() else: return await self.finalize_websocket(await handler(error), from_error_handler=True)
[ "async", "def", "handle_websocket_exception", "(", "self", ",", "error", ":", "Exception", ")", "->", "Optional", "[", "Response", "]", ":", "await", "got_websocket_exception", ".", "send", "(", "self", ",", "exception", "=", "error", ")", "self", ".", "log_...
38.9375
25.75