language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void setOpen(boolean open, boolean fireEvents) { if (m_open == open) { return; } m_open = open; executeOpen(fireEvents); CmsDomUtil.resizeAncestor(getParent()); }
python
def peekuntil(self, token, size=0): """ Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """ self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) return False, self.buf[self.pos:newpos] newpos = i + len(token) return True, self.buf[self.pos:newpos]
python
def split_page_artid(page_artid): """Split page_artid into page_start/end and artid.""" page_start = None page_end = None artid = None if not page_artid: return None, None, None # normalize unicode dashes page_artid = unidecode(six.text_type(page_artid)) if '-' in page_artid: # if it has a dash it's a page range page_range = page_artid.replace('--', '-').split('-') if len(page_range) == 2: page_start, page_end = page_range else: artid = page_artid elif _RE_2_CHARS.search(page_artid): # if it has 2 or more letters it's an article ID artid = page_artid elif len(_RE_CHAR.sub('', page_artid)) >= 5: # if there are more than 5 digits it's an article ID artid = page_artid else: if artid is None: artid = page_artid if page_start is None: page_start = page_artid return page_start, page_end, artid
python
def monitor_resource_sync_state(resource, callback, exit_event=None): """Coroutine that monitors a KATCPResource's sync state. Calls callback(True/False) whenever the resource becomes synced or unsynced. Will always do an initial callback(False) call. Exits without calling callback() if exit_event is set """ exit_event = exit_event or AsyncEvent() callback(False) # Initial condition, assume resource is not connected while not exit_event.is_set(): # Wait for resource to be synced yield until_any(resource.until_synced(), exit_event.until_set()) if exit_event.is_set(): break # If exit event is set we stop without calling callback else: callback(True) # Wait for resource to be un-synced yield until_any(resource.until_not_synced(), exit_event.until_set()) if exit_event.is_set(): break # If exit event is set we stop without calling callback else: callback(False)
python
def config(param_map, mastercode=DEFAULT_MASTERCODE): """Takes a dictionary of {Config.key: value} and returns a dictionary of processed keys and values to be used in the construction of a POST request to FlashAir's config.cgi""" pmap = {Config.mastercode: mastercode} pmap.update(param_map) processed_params = dict(_process_params(pmap)) return processed_params
python
def sigterm_handler(signum, stack_frame): """ Just tell the server to exit. WARNING: There are race conditions, for example with TimeoutSocket.accept. We don't care: the user can just rekill the process after like 1 sec. if the first kill did not work. """ # pylint: disable-msg=W0613 global _KILLED for name, cmd in _COMMANDS.iteritems(): if cmd.at_stop: LOG.info("at_stop: %r", name) cmd.at_stop() _KILLED = True if _HTTP_SERVER: _HTTP_SERVER.kill() _HTTP_SERVER.server_close()
java
public Content getFieldDetails(Content fieldDetailsTree) { if (configuration.allowTag(HtmlTag.SECTION)) { HtmlTree htmlTree = HtmlTree.SECTION(getMemberTree(fieldDetailsTree)); return htmlTree; } return getMemberTree(fieldDetailsTree); }
java
public void marshall(GetUsagePlanKeyRequest getUsagePlanKeyRequest, ProtocolMarshaller protocolMarshaller) { if (getUsagePlanKeyRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(getUsagePlanKeyRequest.getUsagePlanId(), USAGEPLANID_BINDING); protocolMarshaller.marshall(getUsagePlanKeyRequest.getKeyId(), KEYID_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public RabbitmqClient createRabbitmqClient() throws RabbitmqCommunicateException { List<RabbitmqClusterContext> contextList = this.reader.readConfiguration(); return createRabbitmqClient(contextList); }
java
public static void writeVector( String path, SimpleFeatureCollection featureCollection ) throws IOException { OmsVectorWriter writer = new OmsVectorWriter(); writer.file = path; writer.inVector = featureCollection; writer.process(); }
python
def print_dependencies(package_name): """Print the formatted information to standard out.""" info = get_sys_info() print("\nSystem Information") print("==================") print_info(info) info = get_pkg_info(package_name) print("\nPackage Versions") print("================") print_info(info)
python
def flatten_dict(root, parents=None, sep='.'): ''' Args: root (dict) : Nested dictionary (e.g., JSON object). parents (list) : List of ancestor keys. Returns ------- list List of ``(key, value)`` tuples, where ``key`` corresponds to the ancestor keys of the respective value joined by ``'.'``. For example, for the item in the dictionary ``{'a': {'b': {'c': 'foo'}}}``, the joined key would be ``'a.b.c'``. See also :func:`expand_items`. ''' if parents is None: parents = [] result = [] for i, (k, v) in enumerate(root.iteritems()): parents_i = parents + [k] key_i = sep.join(parents_i) if isinstance(v, dict): value_i = flatten_dict(v, parents=parents_i, sep=sep) result.extend(value_i) else: value_i = v result.append((key_i, value_i)) return result
java
public static void migrate(Connection connection, String... tables) throws SQLException { new PKMigrate(connection, tables).migrate(); }
python
def setScales(self,scales=None,term_num=None): """ get random initialization of variances based on the empirical trait variance Args: scales: if scales==None: set them randomly, else: set scales to term_num (if term_num==None: set to all terms) term_num: set scales to term_num """ if scales==None: for term_i in range(self.n_terms): n_scales = self.vd.getTerm(term_i).getNumberScales() self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales))) elif term_num==None: assert scales.shape[0]==self.vd.getNumberScales(), 'incompatible shape' index = 0 for term_i in range(self.n_terms): index1 = index+self.vd.getTerm(term_i).getNumberScales() self.vd.getTerm(term_i).setScales(scales[index:index1]) index = index1 else: assert scales.shape[0]==self.vd.getTerm(term_num).getNumberScales(), 'incompatible shape' self.vd.getTerm(term_num).setScales(scales)
java
public static final Object deserialize(byte[] bytes) throws Exception { if (bytes == null || bytes.length < 4) { return new LinkedHashMap<>(); } // Try to deserialize content as Object (type-safe serialization) if (bytes[0] == 1) { try { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); bais.skip(1); return new ObjectInputStream(bais).readObject(); } catch (Throwable ignored) { } } // Read content as JSON String json = new String(Arrays.copyOfRange(bytes, 1, bytes.length), StandardCharsets.UTF_8); return TreeReaderRegistry.getReader(null).parse(json); }
java
public void setRawParameters(Hashtable params) { if (WCCustomProperties.CHECK_REQUEST_OBJECT_IN_USE){ checkRequestObjectInUse(); } //321485 if (TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) { //306998.15 logger.logp(Level.FINE, CLASS_NAME,"setRawParameters", ""); } SRTServletRequestThreadData.getInstance().setParameters(params); }
java
public List<Subscription> matches(String topic) { List<Token> tokens; try { tokens = splitTopic(topic); } catch (ParseException ex) { //TODO handle the parse exception Log.error(null, ex); return Collections.EMPTY_LIST; } Queue<Token> tokenQueue = new LinkedBlockingDeque<Token>(tokens); List<Subscription> matchingSubs = new ArrayList<Subscription>(); subscriptions.matches(tokenQueue, matchingSubs); return matchingSubs; }
python
def do_status(self, service): """ List all services on the cluster Usage: > status """ if service: self.do_show("services", single=service) else: self.do_show("services")
java
public <G, ERR> OrFuture<G, ERR> firstCompletedOf(Iterable<? extends OrFuture<? extends G, ? extends ERR>> input) { OrPromise<G, ERR> promise = promise(); input.forEach(future -> future.onComplete(promise::tryComplete)); return promise.future(); }
java
private void findParamDescMethods() { for (Method method : m_commandClass.getMethods()) { if (method.isAnnotationPresent(ParamDescription.class)) { try { RESTParameter cmdParam = (RESTParameter) method.invoke(null, (Object[])null); addParameter(cmdParam); } catch (Exception e) { LOGGER.warn("Method '{}' for class '{}' could not be invoked: {}", new Object[]{method.getName(), m_commandClass.getName(), e.toString()}); } } } }
java
@Override public void usageDetail( final char commandPrefix, final ICmdLineArg<?> arg, final int _indentLevel) { nameIt(commandPrefix, arg); final String help = ((AbstractCLA<?>) arg).getHelp(); if (help != null && help.trim().length() > 0) { allign(29); append(help); unallign(); newLine(); } }
java
private void createProxyListener() throws SIResourceException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "createProxyListener"); // Create the proxy listener instance _proxyListener = new NeighbourProxyListener(_neighbours, this); /* * Now we can create our asynchronous consumer to listen on * SYSTEM.MENAME.PROXY.QUEUE Queue for receiving subscription * updates */ // 169897.1 modified parameters try { _proxyAsyncConsumer = _messageProcessor .getSystemConnection() .createSystemConsumerSession( _messageProcessor.getProxyHandlerDestAddr(), // destination name null, //Destination filter null, // SelectionCriteria - discriminator and selector Reliability.ASSURED_PERSISTENT, // reliability false, // enable read ahead false, null, false); // 169897.1 modified parameters _proxyAsyncConsumer.registerAsynchConsumerCallback( _proxyListener, 0, 0, 1, null); _proxyAsyncConsumer.start(false); } catch (SIException e) { FFDCFilter.processException( e, "com.ibm.ws.sib.processor.proxyhandler.MultiMEProxyHandler.createProxyListener", "1:1271:1.96", this); SibTr.exception(tc, e); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "createProxyListener", "SIResourceException"); // The Exceptions should already be NLS'd throw new SIResourceException(e); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "createProxyListener"); }
python
def get_d1str(self, goobj, reverse=False): """Get D1-string representing all parent terms which are depth-01 GO terms.""" return "".join(sorted(self.get_parents_letters(goobj), reverse=reverse))
python
def unzip(zip_file, dest, excludes=None, options=None, template=None, runas=None, trim_output=False, password=None, extract_perms=True): ''' Uses the ``zipfile`` Python module to unpack zip files .. versionchanged:: 2015.5.0 This function was rewritten to use Python's native zip file support. The old functionality has been preserved in the new function :mod:`archive.cmd_unzip <salt.modules.archive.cmd_unzip>`. For versions 2014.7.x and earlier, see the :mod:`archive.cmd_zip <salt.modules.archive.cmd_zip>` documentation. zip_file Path of zip file to be unpacked dest The destination directory into which the file should be unpacked excludes : None Comma-separated list of files not to unpack. Can also be passed in a Python list. options This options are only used when ``unzip`` binary is used. In this function is ignored. .. versionadded:: 2016.3.1 template : None Can be set to 'jinja' or another supported template engine to render the command arguments before execution: .. code-block:: bash salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2 runas : None Unpack the zip file as the specified user. Defaults to the user under which the minion is running. trim_output : False The number of files we should output on success before the rest are trimmed, if this is set to True then it will default to 100 CLI Example: .. code-block:: bash salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2 password Password to use with password protected zip files .. note:: The password will be present in the events logged to the minion log file at the ``debug`` log level. If the minion is logging at ``debug`` (or more verbose), then be advised that the password will appear in the log. .. versionadded:: 2016.3.0 extract_perms : True The Python zipfile_ module does not extract file/directory attributes by default. When this argument is set to ``True``, Salt will attempt to apply the file permission attributes to the extracted files/folders. On Windows, only the read-only flag will be extracted as set within the zip file, other attributes (i.e. user/group permissions) are ignored. Set this argument to ``False`` to disable this behavior. .. versionadded:: 2016.11.0 .. _zipfile: https://docs.python.org/2/library/zipfile.html CLI Example: .. code-block:: bash salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='BadPassword' ''' if not excludes: excludes = [] if runas: euid = os.geteuid() egid = os.getegid() uinfo = __salt__['user.info'](runas) if not uinfo: raise SaltInvocationError( "User '{0}' does not exist".format(runas) ) zip_file, dest = _render_filenames(zip_file, dest, None, template) if runas and (euid != uinfo['uid'] or egid != uinfo['gid']): # Change the egid first, as changing it after the euid will fail # if the runas user is non-privileged. os.setegid(uinfo['gid']) os.seteuid(uinfo['uid']) try: # Define cleaned_files here so that an exception will not prevent this # variable from being defined and cause a NameError in the return # statement at the end of the function. cleaned_files = [] with contextlib.closing(zipfile.ZipFile(zip_file, "r")) as zfile: files = zfile.namelist() if isinstance(excludes, six.string_types): excludes = [x.strip() for x in excludes.split(',')] elif isinstance(excludes, (float, six.integer_types)): excludes = [six.text_type(excludes)] cleaned_files.extend([x for x in files if x not in excludes]) for target in cleaned_files: if target not in excludes: if salt.utils.platform.is_windows() is False: info = zfile.getinfo(target) # Check if zipped file is a symbolic link if stat.S_ISLNK(info.external_attr >> 16): source = zfile.read(target) os.symlink(source, os.path.join(dest, target)) continue zfile.extract(target, dest, password) if extract_perms: if not salt.utils.platform.is_windows(): perm = zfile.getinfo(target).external_attr >> 16 if perm == 0: umask_ = salt.utils.files.get_umask() if target.endswith('/'): perm = 0o777 & ~umask_ else: perm = 0o666 & ~umask_ os.chmod(os.path.join(dest, target), perm) else: win32_attr = zfile.getinfo(target).external_attr & 0xFF win32file.SetFileAttributes(os.path.join(dest, target), win32_attr) except Exception as exc: if runas: os.seteuid(euid) os.setegid(egid) # Wait to raise the exception until euid/egid are restored to avoid # permission errors in writing to minion log. raise CommandExecutionError( 'Exception encountered unpacking zipfile: {0}'.format(exc) ) finally: # Restore the euid/egid if runas: os.seteuid(euid) os.setegid(egid) return _trim_files(cleaned_files, trim_output)
python
def download(self, media_id, as_stream=False): """ Скачивает указанный файл :param media_id: string :rtype: requests.Response """ response = self.__app.native_api_call('media', 'd/' + media_id, {}, self.__options, False, None, as_stream, http_path="/api/meta/v1/", http_method='GET') return response
python
def classify_import(module_name, application_directories=('.',)): """Classifies an import by its package. Returns a value in ImportType.__all__ :param text module_name: The dotted notation of a module :param tuple application_directories: tuple of paths which are considered application roots. """ # Only really care about the first part of the path base, _, _ = module_name.partition('.') found, module_path, is_builtin = _get_module_info( base, application_directories, ) if base == '__future__': return ImportType.FUTURE # Relative imports: `from .foo import bar` elif base == '': return ImportType.APPLICATION # If imp tells us it is builtin, it is builtin elif is_builtin: return ImportType.BUILTIN # If the module path exists in the project directories elif _module_path_is_local_and_is_not_symlinked( module_path, application_directories, ): return ImportType.APPLICATION # Otherwise we assume it is a system module or a third party module elif ( found and PACKAGES_PATH not in module_path and not _due_to_pythonpath(module_path) ): return ImportType.BUILTIN else: return ImportType.THIRD_PARTY
java
public static int sort( LongArray array, long numRecords, int startByteIndex, int endByteIndex, boolean desc, boolean signed) { assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0"; assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7"; assert endByteIndex > startByteIndex; assert numRecords * 2 <= array.size(); long inIndex = 0; long outIndex = numRecords; if (numRecords > 0) { long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex); for (int i = startByteIndex; i <= endByteIndex; i++) { if (counts[i] != null) { sortAtByte( array, numRecords, counts[i], i, inIndex, outIndex, desc, signed && i == endByteIndex); long tmp = inIndex; inIndex = outIndex; outIndex = tmp; } } } return Ints.checkedCast(inIndex); }
python
def makeSingleBandWKBRaster(cls, session, width, height, upperLeftX, upperLeftY, cellSizeX, cellSizeY, skewX, skewY, srid, dataArray, initialValue=None, noDataValue=None): """ Generate Well Known Binary via SQL. Must be used on a PostGIS database as it relies on several PostGIS database functions. :param session: SQLAlchemy session object bound to a PostGIS enabled database :param height: Height of the raster (or number of rows) :param width: Width of the raster (or number of columns) :param upperLeftX: Raster upper left corner X coordinate :param upperLeftY: Raster upper left corner Y coordinate :param cellSizeX: Raster cell size in X direction :param cellSizeY: Raster cell size in Y direction :param skewX: Skew in X direction :param skewY: Skew in Y direction :param srid: SRID of the raster :param initialValue: Initial / default value of the raster cells :param noDataValue: Value of cells to be considered as cells containing no cells :param dataArray: 2-dimensional list of values or a string representation of a 2-dimensional list that will be used to populate the raster values """ # Stringify the data array if isinstance(dataArray, str): dataArrayString = dataArray else: dataArrayString = json.dumps(dataArray) # Validate if initialValue is None: initialValue = 'NULL' if noDataValue is None: noDataValue = 'NULL' # Cell size in the Y direction must be negative if cellSizeY > 0: print('RASTER LOADER WARNING: cellSizeY should be defined as negative.') cellSizeY = -1 * cellSizeY # Create the SQL statement statement = ''' SELECT ST_SetValues( ST_AddBand( ST_MakeEmptyRaster({0}::integer, {1}::integer, {2}, {3}, {4}, {5}, {6}, {7}, {8}::integer), 1::integer, '32BF'::text, {9}::double precision, {10}::double precision ), 1, 1, 1, ARRAY{11}::double precision[][] ); '''.format(width, height, upperLeftX, upperLeftY, cellSizeX, cellSizeY, skewX, skewY, srid, initialValue, noDataValue, dataArrayString) result = session.execute(statement) # Extract result wellKnownBinary = '' for row in result: wellKnownBinary = row[0] return wellKnownBinary
java
public HttpRequest withBody(byte[] content) { headers.set("Content-Length", String.valueOf(content.length)); // Unpooled.wrappedBuffer(body) allocates ByteBuf from unpooled heap return withBody(Flux.defer(() -> Flux.just(Unpooled.wrappedBuffer(content)))); }
python
def normalize(self, text): """Run the Normalizer on a string. :param text: The string to normalize. """ # Normalize to canonical unicode (using NFKC by default) if self.form is not None: text = unicodedata.normalize(self.form, text) # Strip out any control characters (they occasionally creep in somehow) for control in CONTROLS: text = text.replace(control, '') # Normalize unusual whitespace not caught by unicodedata text = text.replace('\u000b', ' ').replace('\u000c', ' ').replace(u'\u0085', ' ') text = text.replace('\u2028', '\n').replace('\u2029', '\n').replace('\r\n', '\n').replace('\r', '\n') # Normalize all hyphens, minuses and dashes to ascii hyphen-minus and remove soft hyphen entirely if self.hyphens: # TODO: Better normalization of em/en dashes to '--' if surrounded by spaces or start/end? for hyphen in HYPHENS | MINUSES: text = text.replace(hyphen, '-') text = text.replace('\u00ad', '') # Normalize all quotes and primes to ascii apostrophe and quotation mark if self.quotes: for double_quote in DOUBLE_QUOTES: text = text.replace(double_quote, '"') # \u0022 for single_quote in (SINGLE_QUOTES | APOSTROPHES | ACCENTS): text = text.replace(single_quote, "'") # \u0027 text = text.replace('′', "'") # \u2032 prime text = text.replace('‵', "'") # \u2035 reversed prime text = text.replace('″', "''") # \u2033 double prime text = text.replace('‶', "''") # \u2036 reversed double prime text = text.replace('‴', "'''") # \u2034 triple prime text = text.replace('‷', "'''") # \u2037 reversed triple prime text = text.replace('⁗', "''''") # \u2057 quadruple prime if self.ellipsis: text = text.replace('…', '...').replace(' . . . ', ' ... ') # \u2026 if self.slashes: for slash in SLASHES: text = text.replace(slash, '/') if self.tildes: for tilde in TILDES: text = text.replace(tilde, '~') if self.strip: text = text.strip() # Collapse all whitespace down to a single space if self.collapse: text = ' '.join(text.split()) return text
java
public static double pdf(double val, int v) { // TODO: improve precision by computing "exp" last? return FastMath.exp(GammaDistribution.logGamma((v + 1) * .5) - GammaDistribution.logGamma(v * .5)) // * (1 / FastMath.sqrt(v * Math.PI)) * FastMath.pow(1 + (val * val) / v, -((v + 1) * .5)); }
python
def buffer_close(self, buf, redraw=True): """ closes given :class:`~alot.buffers.Buffer`. This it removes it from the bufferlist and calls its cleanup() method. """ # call pre_buffer_close hook prehook = settings.get_hook('pre_buffer_close') if prehook is not None: prehook(ui=self, dbm=self.dbman, buf=buf) buffers = self.buffers success = False if buf not in buffers: logging.error('tried to close unknown buffer: %s. \n\ni have:%s', buf, self.buffers) elif self.current_buffer == buf: logging.info('closing current buffer %s', buf) index = buffers.index(buf) buffers.remove(buf) offset = settings.get('bufferclose_focus_offset') nextbuffer = buffers[(index + offset) % len(buffers)] self.buffer_focus(nextbuffer, redraw) buf.cleanup() success = True else: buffers.remove(buf) buf.cleanup() success = True # call post_buffer_closed hook posthook = settings.get_hook('post_buffer_closed') if posthook is not None: posthook(ui=self, dbm=self.dbman, buf=buf, success=success)
java
@Deprecated public void request(Bundle parameters, RequestListener listener, final Object state) { request(null, parameters, "GET", listener, state); }
python
def parse(self, xml_data): """ Parse XML data """ # parse tree if isinstance(xml_data, string_types): # Presumably, this is textual xml data. try: root = ET.fromstring(xml_data) except StdlibParseError as e: raise ParseError(str(e)) else: # Otherwise, assume it has already been parsed into a tree root = xml_data # get type if 'type' in root.attrib: self.kind = root.attrib['type'] # parse component for c1 in root: # <id> if c1.tag == 'id': self.id = c1.text # <updatecontact> elif c1.tag == 'updatecontact' or c1.tag == 'update_contact': self.update_contact = c1.text # <metadata_license> elif c1.tag == 'metadata_license': self.metadata_license = c1.text # <releases> elif c1.tag == 'releases': for c2 in c1: if c2.tag == 'release': rel = Release() rel._parse_tree(c2) self.add_release(rel) # <reviews> elif c1.tag == 'reviews': for c2 in c1: if c2.tag == 'review': rev = Review() rev._parse_tree(c2) self.add_review(rev) # <screenshots> elif c1.tag == 'screenshots': for c2 in c1: if c2.tag == 'screenshot': ss = Screenshot() ss._parse_tree(c2) self.add_screenshot(ss) # <provides> elif c1.tag == 'provides': for c2 in c1: prov = Provide() prov._parse_tree(c2) self.add_provide(prov) # <requires> elif c1.tag == 'requires': for c2 in c1: req = Require() req._parse_tree(c2) self.add_require(req) # <kudos> elif c1.tag == 'kudos': for c2 in c1: if not c2.tag == 'kudo': continue self.kudos.append(c2.text) # <keywords> elif c1.tag == 'keywords': for c2 in c1: if not c2.tag == 'keyword': continue self.keywords.append(c2.text) # <categories> elif c1.tag == 'categories': for c2 in c1: if not c2.tag == 'category': continue self.categories.append(c2.text) # <custom> elif c1.tag == 'custom': for c2 in c1: if not c2.tag == 'value': continue if 'key' not in c2.attrib: continue self.custom[c2.attrib['key']] = c2.text # <project_license> elif c1.tag == 'project_license' or c1.tag == 'licence': self.project_license = c1.text # <developer_name> elif c1.tag == 'developer_name': self.developer_name = _join_lines(c1.text) # <name> elif c1.tag == 'name' and not self.name: self.name = _join_lines(c1.text) # <pkgname> elif c1.tag == 'pkgname' and not self.pkgname: self.pkgname = _join_lines(c1.text) # <summary> elif c1.tag == 'summary' and not self.summary: self.summary = _join_lines(c1.text) # <description> elif c1.tag == 'description' and not self.description: self.description = _parse_desc(c1) # <url> elif c1.tag == 'url': key = 'homepage' if 'type' in c1.attrib: key = c1.attrib['type'] self.urls[key] = c1.text elif c1.tag == 'icon': key = c1.attrib.pop('type', 'unknown') c1.attrib['value'] = c1.text self.icons[key] = self.icons.get(key, []) + [c1.attrib]
java
public String ingest(Context context, InputStream serialization, String logMessage, String format, String encoding, String pid) throws ServerException { return worker.ingest(context, serialization, logMessage, format, encoding, pid); }
python
def _what_default(self, pronunciation): """Provide the default prediction of the what task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param pronunciation: The list or array of confusion probabilities at each index """ token_default = self['metadata']['token_default']['what'] index_count = 2*len(pronunciation) + 1 predictions = {} for i in range(index_count): index_predictions = {} if i % 2 == 0: index_predictions.update(token_default['0']) else: presented_phoneme = pronunciation[int((i-1)/2)] index_predictions[presented_phoneme] = token_default['1']['='] index_predictions['*'] = token_default['1']['*'] index_predictions[''] = token_default['1'][''] predictions['{}'.format(i)] = index_predictions return predictions
java
public final void setShadowElevation(final int elevation) { Condition.INSTANCE.ensureAtLeast(elevation, 0, "The elevation must be at least 0"); Condition.INSTANCE.ensureAtMaximum(elevation, ElevationUtil.MAX_ELEVATION, "The elevation must be at maximum " + ElevationUtil.MAX_ELEVATION); this.elevation = elevation; adaptElevationShadow(); }
java
protected byte[] spoolInternalValue() { try { return value.getString().getBytes(Constants.DEFAULT_ENCODING); } catch (UnsupportedEncodingException e) { throw new RuntimeException("FATAL ERROR Charset " + Constants.DEFAULT_ENCODING + " is not supported!"); } }
java
public static Menu get(final String _name) throws CacheReloadException { return AbstractUserInterfaceObject.<Menu>get(_name, Menu.class, CIAdminUserInterface.Menu.getType()); }
java
public void setLastSegment(boolean v) { if (SourceDocumentInformation_Type.featOkTst && ((SourceDocumentInformation_Type)jcasType).casFeat_lastSegment == null) jcasType.jcas.throwFeatMissing("lastSegment", "org.apache.uima.examples.SourceDocumentInformation"); jcasType.ll_cas.ll_setBooleanValue(addr, ((SourceDocumentInformation_Type)jcasType).casFeatCode_lastSegment, v);}
python
def clean_url(url): """ Normalize the url and clean it >>> clean_url("http://www.assemblee-nationale.fr/15/dossiers/le_nouveau_dossier.asp#deuxieme_partie") 'http://www.assemblee-nationale.fr/dyn/15/dossiers/deuxieme_partie' >>> clean_url("http://www.conseil-constitutionnel.fr/conseil-constitutionnel/francais/les-decisions/acces-par-date/decisions-depuis-1959/2013/2013-681-dc/decision-n-2013-681-dc-du-5-decembre-2013.138900.html") 'https://www.conseil-constitutionnel.fr/decision/2013/2013681DC.htm' """ url = url.strip() # fix urls like 'pjl09-518.htmlhttp://www.assemblee-nationale.fr/13/ta/ta051`8.asp' if url.find('https://') > 0: url = 'https://' + url.split('https://')[1] if url.find('http://') > 0: url = 'http://' + url.split('http://')[1] scheme, netloc, path, params, query, fragment = urlparse(url) path = path.replace('//', '/') if 'xtor' in fragment: fragment = '' # fix url like http://www.senat.fr/dossier-legislatif/www.conseil-constitutionnel.fr/decision/2012/2012646dc.htm if 'www.conseil-' in url: url = urlunparse((scheme, netloc, path, params, query, fragment)) url = 'http://www.conseil-' + url.split('www.conseil-')[1] return find_stable_link_for_CC_decision(url) if 'legifrance.gouv.fr' in url: params = '' url_jo_params = parse_qs(query) if 'WAspad' in path: newurl = get_redirected_url(url) if url != newurl: return clean_url(newurl) if 'cidTexte' in url_jo_params: query = 'cidTexte=' + url_jo_params['cidTexte'][0] elif path.endswith('/jo/texte'): newurl = find_jo_link(url) if url != newurl: return clean_url(newurl) if netloc == 'legifrance.gouv.fr': netloc = 'www.legifrance.gouv.fr' if 'jo_pdf.do' in path and 'id' in url_jo_params: path = 'affichTexte.do' query = 'cidTexte=' + url_jo_params['id'][0] # ensure to link initial version of the text and not furtherly modified ones if query.startswith('cidTexte'): query += '&categorieLien=id' path = path.replace('./affichTexte.do', 'affichTexte.do') if 'senat.fr' in netloc: path = path.replace('leg/../', '/') path = path.replace('dossierleg/', 'dossier-legislatif/') # normalize dosleg url by removing extra url parameters if 'dossier-legislatif/' in path: query = '' fragment = '' if netloc == 'webdim': netloc = 'www.assemblee-nationale.fr' # force https if 'assemblee-nationale.fr' not in netloc and 'conseil-constitutionnel.fr' not in netloc: scheme = 'https' # url like http://www.assemblee-nationale.fr/13/projets/pl2727.asp2727 if 'assemblee-nationale.fr' in url: path = re_clean_ending_digits.sub(r"\1", path) if '/dossiers/' in path: url = urlunparse((scheme, netloc, path, params, query, fragment)) legislature, slug = parse_national_assembly_url(url) if legislature and slug: template = AN_OLD_URL_TEMPLATE if legislature > 14: template = AN_NEW_URL_TEMPLATE return template.format(legislature=legislature, slug=slug) return urlunparse((scheme, netloc, path, params, query, fragment))
python
def to_pickle(self, path, compression='infer', protocol=pickle.HIGHEST_PROTOCOL): """ Pickle (serialize) object to file. Parameters ---------- path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \ default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ from pandas.io.pickle import to_pickle return to_pickle(self, path, compression=compression, protocol=protocol)
java
public boolean isTimeIncluded (final long timeStamp) { if (timeStamp <= 0) { throw new IllegalArgumentException ("timeStamp must be greater 0"); } if (m_aBaseCalendar != null) { if (m_aBaseCalendar.isTimeIncluded (timeStamp) == false) { return false; } } return true; }
java
final void writeLong(long value) { byte writeBuffer[] = new byte[8]; writeBuffer[0] = (byte) (value >>> 56); writeBuffer[1] = (byte) (value >>> 48); writeBuffer[2] = (byte) (value >>> 40); writeBuffer[3] = (byte) (value >>> 32); writeBuffer[4] = (byte) (value >>> 24); writeBuffer[5] = (byte) (value >>> 16); writeBuffer[6] = (byte) (value >>> 8); writeBuffer[7] = (byte) (value >>> 0); write(writeBuffer, 0, 8); }
python
def bin_executables(self): """A normalized map of bin executable names and local path to an executable :rtype: dict """ if isinstance(self.payload.bin_executables, string_types): # In this case, the package_name is the bin name return { self.package_name: self.payload.bin_executables } return self.payload.bin_executables
python
def normalize_datum(self, datum): """ Convert `datum` into something that umsgpack likes. :param datum: something that we want to process with umsgpack :return: a packable version of `datum` :raises TypeError: if `datum` cannot be packed This message is called by :meth:`.packb` to recursively normalize an input value before passing it to :func:`umsgpack.packb`. Values are normalized according to the following table. +-------------------------------+-------------------------------+ | **Value** | **MsgPack Family** | +-------------------------------+-------------------------------+ | :data:`None` | `nil byte`_ (0xC0) | +-------------------------------+-------------------------------+ | :data:`True` | `true byte`_ (0xC3) | +-------------------------------+-------------------------------+ | :data:`False` | `false byte`_ (0xC2) | +-------------------------------+-------------------------------+ | :class:`int` | `integer family`_ | +-------------------------------+-------------------------------+ | :class:`float` | `float family`_ | +-------------------------------+-------------------------------+ | String | `str family`_ | +-------------------------------+-------------------------------+ | :class:`bytes` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`bytearray` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`memoryview` | `bin family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Sequence` | `array family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Set` | `array family`_ | +-------------------------------+-------------------------------+ | :class:`collections.Mapping` | `map family`_ | +-------------------------------+-------------------------------+ | :class:`uuid.UUID` | Converted to String | +-------------------------------+-------------------------------+ .. _nil byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil .. _true byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family .. _false byte: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family .. _integer family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family .. _float family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family .. _str family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family .. _array family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family .. _map family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md #mapping-format-family .. _bin family: https://github.com/msgpack/msgpack/blob/ 0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family """ if datum is None: return datum if isinstance(datum, self.PACKABLE_TYPES): return datum if isinstance(datum, uuid.UUID): datum = str(datum) if isinstance(datum, bytearray): datum = bytes(datum) if isinstance(datum, memoryview): datum = datum.tobytes() if hasattr(datum, 'isoformat'): datum = datum.isoformat() if isinstance(datum, (bytes, str)): return datum if isinstance(datum, (collections.Sequence, collections.Set)): return [self.normalize_datum(item) for item in datum] if isinstance(datum, collections.Mapping): out = {} for k, v in datum.items(): out[k] = self.normalize_datum(v) return out raise TypeError( '{} is not msgpackable'.format(datum.__class__.__name__))
python
def get_search_fields(self): """Return list of lookup names.""" if self.search_fields: return self.search_fields raise NotImplementedError('%s, must implement "search_fields".' % self.__class__.__name__)
python
def filter_belief(): """Filter to beliefs above a given threshold.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') belief_cutoff = body.get('belief_cutoff') if belief_cutoff is not None: belief_cutoff = float(belief_cutoff) stmts = stmts_from_json(stmts_json) stmts_out = ac.filter_belief(stmts, belief_cutoff) return _return_stmts(stmts_out)
python
def first_or_new(self, _attributes=None, **attributes): """ Get the first related model record matching the attributes or instantiate it. :param attributes: The attributes :type attributes: dict :rtype: Model """ if _attributes is not None: attributes.update(_attributes) instance = self.where(attributes).first() if instance is None: instance = self._related.new_instance() instance.set_attribute(self.get_plain_foreign_key(), self.get_parent_key()) return instance
java
public void setProductConstraints(com.google.api.ads.admanager.axis.v201805.ProposalLineItemConstraints productConstraints) { this.productConstraints = productConstraints; }
python
def remove_shard(self, shard, drop_buffered_records=False): """Remove a Shard from the Coordinator. Drops all buffered records from the Shard. If the Shard is active or a root, it is removed and any children promoted to those roles. :param shard: The shard to remove :type shard: :class:`~bloop.stream.shard.Shard` :param bool drop_buffered_records: Whether records from this shard should be removed. Default is False. """ try: self.roots.remove(shard) except ValueError: # Wasn't a root Shard pass else: self.roots.extend(shard.children) try: self.active.remove(shard) except ValueError: # Wasn't an active Shard pass else: self.active.extend(shard.children) if drop_buffered_records: # TODO can this be improved? Gets expensive for high-volume streams with large buffers heap = self.buffer.heap # Clear buffered records from the shard. Each record is (ordering, record, shard) to_remove = [x for x in heap if x[2] is shard] for x in to_remove: heap.remove(x)
java
@Override public String getOperationReplyValueTypeDescription(String operationName, Locale locale, ResourceBundle bundle, String... suffixes) { try { return bundle.getString(getVariableBundleKey(new String[]{operationName, REPLY}, suffixes)); } catch (MissingResourceException e) { try { return getOperationParameterValueTypeDescription(operationName, suffixes[0], locale, bundle); } catch (MissingResourceException ex) { throw e; } } }
python
def stack(frame, level=-1, dropna=True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = _factorize_from_iterable(index) return categories, codes N, K = frame.shape # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] clev, clab = factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) codes = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex(levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False) if frame._is_homogeneous_type: # For homogeneous EAs, frame.values will coerce to object. So # we concatenate instead. dtypes = list(frame.dtypes.values) dtype = dtypes[0] if is_extension_array_dtype(dtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type([ col._values for _, col in frame.iteritems() ]) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: # homogeneous, non-EA new_values = frame.values.ravel() else: # non-homogeneous new_values = frame.values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index)
python
def visible(self): """ Return the comments that are visible based on the ``COMMENTS_XXX_VISIBLE`` settings. When these settings are set to ``True``, the relevant comments are returned that shouldn't be shown, and are given placeholders in the template ``generic/includes/comment.html``. """ visible = self.all() if not settings.COMMENTS_UNAPPROVED_VISIBLE: visible = visible.filter(is_public=True) if not settings.COMMENTS_REMOVED_VISIBLE: visible = visible.filter(is_removed=False) return visible
java
protected final CnvIbnDateToCv createPutCnvIbnDateToCv() throws Exception { CnvIbnDateToCv convrt = new CnvIbnDateToCv(); this.convertersMap .put(CnvIbnDateToCv.class.getSimpleName(), convrt); return convrt; }
python
def find_matlab_version(process_path): """ Tries to guess matlab's version according to its process path. If we couldn't gues the version, None is returned. """ bin_path = os.path.dirname(process_path) matlab_path = os.path.dirname(bin_path) matlab_dir_name = os.path.basename(matlab_path) version = matlab_dir_name if not is_linux(): version = matlab_dir_name.replace('MATLAB_', '').replace('.app', '') if not is_valid_release_version(version): return None return version
java
public boolean areRepairLogsComplete() { for (Entry<Long, ReplicaRepairStruct> entry : m_replicaRepairStructs.entrySet()) { if (!entry.getValue().logsComplete()) { return false; } } return true; }
java
private boolean isAcceptableCandidate(int targetLength, ViterbiNode glueBase, ViterbiNode candidate) { return (glueBase == null || candidate.getSurface().length() < glueBase.getSurface().length()) && candidate.getSurface().length() >= targetLength; }
python
def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr'])
java
public void handleDecode(Result rawResult, Bitmap barcode, float scaleFactor) { inactivityTimer.onActivity(); lastResult = rawResult; ResultHandler resultHandler = ResultHandlerFactory.makeResultHandler(this, rawResult); boolean fromLiveScan = barcode != null; if (fromLiveScan) { historyManager.addHistoryItem(rawResult, resultHandler); // Then not from history, so beep/vibrate and we have an image to draw on beepManager.playBeepSoundAndVibrate(); drawResultPoints(barcode, scaleFactor, rawResult); } switch (source) { case NATIVE_APP_INTENT: case PRODUCT_SEARCH_LINK: handleDecodeExternally(rawResult, resultHandler, barcode); break; case ZXING_LINK: if (scanFromWebPageManager == null || !scanFromWebPageManager.isScanFromWebPage()) { handleDecodeInternally(rawResult, resultHandler, barcode); } else { handleDecodeExternally(rawResult, resultHandler, barcode); } break; case NONE: SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this); if (fromLiveScan && prefs.getBoolean(PreferencesActivity.KEY_BULK_MODE, false)) { Toast.makeText(getApplicationContext(), getResources().getString(R.string.msg_bulk_mode_scanned) + " (" + rawResult.getText() + ')', Toast.LENGTH_SHORT).show(); maybeSetClipboard(resultHandler); // Wait a moment or else it will scan the same barcode continuously about 3 times restartPreviewAfterDelay(BULK_MODE_SCAN_DELAY_MS); } else { handleDecodeInternally(rawResult, resultHandler, barcode); } break; } }
java
protected void setResultType(MappedStatement ms, Class<?> entityClass) { EntityTable entityTable = EntityHelper.getEntityTable(entityClass); List<ResultMap> resultMaps = new ArrayList<ResultMap>(); resultMaps.add(entityTable.getResultMap(ms.getConfiguration())); MetaObject metaObject = MetaObjectUtil.forObject(ms); metaObject.setValue("resultMaps", Collections.unmodifiableList(resultMaps)); }
java
private State handleResetChunk(CellChunk cellChunk) { validate(cellChunk.getRowKey().isEmpty(), "Reset chunks can't have row keys"); validate(!cellChunk.hasFamilyName(), "Reset chunks can't have families"); validate(!cellChunk.hasQualifier(), "Reset chunks can't have qualifiers"); validate(cellChunk.getTimestampMicros() == 0, "Reset chunks can't have timestamps"); validate(cellChunk.getValueSize() == 0, "Reset chunks can't have value sizes"); validate(cellChunk.getValue().isEmpty(), "Reset chunks can't have values"); reset(); return currentState; // AWAITING_NEW_ROW }
python
def merge_partition(self, partition, path, value): """ Merge a value into a partition for a key path. """ dct = self.partitions[partition] *heads, tail = path for part in heads: dct = dct.setdefault(part, dict()) dct[tail] = value
python
def add_comment(self, comment, metadata=""): """ Add a canned comment :type comment: str :param comment: New canned comment :type metadata: str :param metadata: Optional metadata :rtype: dict :return: A dictionnary containing canned comment description """ data = { 'comment': comment, 'metadata': metadata } return self.post('createComment', data)
python
def parse_value(self, stream): """ Value ::= (SimpleValue | Set | Sequence) WSC UnitsExpression? """ if self.has_sequence(stream): value = self.parse_sequence(stream) elif self.has_set(stream): value = self.parse_set(stream) else: value = self.parse_simple_value(stream) self.skip_whitespace_or_comment(stream) if self.has_units(stream): return Units(value, self.parse_units(stream)) return value
python
def debug(func): """ Decorator that prints a message whenever a function is entered or left. """ @wraps(func) def wrapped(*args, **kwargs): arg = repr(args) + ' ' + repr(kwargs) sys.stdout.write('Entering ' + func.__name__ + arg + '\n') try: result = func(*args, **kwargs) except: sys.stdout.write('Traceback caught:\n') sys.stdout.write(format_exception(*sys.exc_info())) raise arg = repr(result) sys.stdout.write('Leaving ' + func.__name__ + '(): ' + arg + '\n') return result return wrapped
python
def ld_prune(df, ld_beds, snvs=None): """ Prune set of GWAS based on LD and significance. A graph of all SNVs is constructed with edges for LD >= 0.8 and the most significant SNV per connected component is kept. Parameters ---------- df : pandas.DataFrame Pandas dataframe with unique SNVs. The index is of the form chrom:pos where pos is the one-based position of the SNV. The columns must include chrom, start, end, and pvalue. chrom, start, end make a zero-based bed file with the SNV coordinates. ld_beds : dict Dict whose keys are chromosomes and whose values are filenames of tabixed LD bed files. An LD bed file looks like "chr1 11007 11008 11008:11012:1" where the first three columns are the zero-based half-open coordinate of the SNV and the fourth column has the one-based coordinate followed of the SNV followed by the one-based coordinate of a different SNV and the LD between them. In this example, the variants are in perfect LD. The bed file should also contain the reciprocal line for this LD relationship: "chr1 11011 11012 11012:11008:1". snvs : list List of SNVs to filter against. If a SNV is not in this list, it will not be included. If you are working with GWAS SNPs, this is useful for filtering out SNVs that aren't in the SNPsnap database for instance. Returns ------- out : pandas.DataFrame Pandas dataframe in the same format as the input dataframe but with only independent SNVs. """ import networkx as nx import tabix if snvs: df = df.ix[set(df.index) & set(snvs)] keep = set() for chrom in ld_beds.keys(): tdf = df[df['chrom'].astype(str) == chrom] if tdf.shape[0] > 0: f = tabix.open(ld_beds[chrom]) # Make a dict where each key is a SNP and the values are all of the # other SNPs in LD with the key. ld_d = {} for j in tdf.index: p = tdf.ix[j, 'end'] ld_d[p] = [] try: r = f.query(chrom, p - 1, p) while True: try: n = r.next() p1, p2, r2 = n[-1].split(':') if float(r2) >= 0.8: ld_d[p].append(int(p2)) except StopIteration: break except TabixError: continue # Make adjacency matrix for LD. cols = sorted(list(set( [item for sublist in ld_d.values() for item in sublist]))) t = pd.DataFrame(0, index=ld_d.keys(), columns=cols) for k in ld_d.keys(): t.ix[k, ld_d[k]] = 1 t.index = ['{}:{}'.format(chrom, x) for x in t.index] t.columns = ['{}:{}'.format(chrom, x) for x in t.columns] # Keep all SNPs not in LD with any others. These will be in the index # but not in the columns. keep |= set(t.index) - set(t.columns) # Filter so we only have SNPs that are in LD with at least one other # SNP. ind = list(set(t.columns) & set(t.index)) # Keep one most sig. SNP per connected subgraph. t = t.ix[ind, ind] g = nx.Graph(t.values) c = nx.connected_components(g) while True: try: sg = c.next() s = tdf.ix[t.index[list(sg)]] keep.add(s[s.pvalue == s.pvalue.min()].index[0]) except StopIteration: break out = df.ix[keep] return out
java
public final BuilderType setHeaderIconTintMode(@NonNull final PorterDuff.Mode mode) { getProduct().setHeaderIconTintMode(mode); return self(); }
python
def _GetExtractionErrorsAsWarnings(self): """Retrieves errors from from the store, and converts them to warnings. This method is for backwards compatibility with pre-20190309 storage format stores which used ExtractionError attribute containers. Yields: ExtractionWarning: extraction warnings. """ for extraction_error in self._GetAttributeContainers( self._CONTAINER_TYPE_EXTRACTION_ERROR): error_attributes = extraction_error.CopyToDict() warning = warnings.ExtractionWarning() warning.CopyFromDict(error_attributes) yield warning
python
def run(self): """ Esegue il montaggio delle varie condivisioni chiedendo all'utente username e password di dominio. """ logging.info('start run with "{}" at {}'.format( self.username, datetime.datetime.now())) progress = Progress(text="Controllo requisiti software...", pulsate=True, auto_close=True) progress(1) try: self.requirements() except LockFailedException as lfe: ErrorMessage('Errore "{}" probabilmente l\'utente {} non ha i' ' diritti di amministratore'.format(lfe, self.username)) sys.exit(20) except Exception as e: ErrorMessage("Si e' verificato un errore generico: {}".format(e)) sys.exit(21) progress(100) self.set_shares() # richiesta username del dominio insert_msg = "Inserisci l'utente del Dominio/Posta Elettronica" default_username = (self.host_username if self.host_username else os.environ['USER']) self.domain_username = GetText(text=insert_msg, entry_text=self.username) if self.domain_username is None or len(self.domain_username) == 0: error_msg = "Inserimento di un username di dominio vuoto" ErrorMessage(self.msg_error % error_msg) sys.exit(2) # richiesta della password di dominio insert_msg = u"Inserisci la password del Dominio/Posta Elettronica" self.domain_password = GetText(text=insert_msg, entry_text='password', password=True) if self.domain_password is None or len(self.domain_password) == 0: error_msg = u"Inserimento di una password di dominio vuota" ErrorMessage(self.msg_error % error_msg) sys.exit(3) progress_msg = u"Collegamento unità di rete in corso..." progress = Progress(text=progress_msg, pulsate=True, auto_close=True) progress(1) # ciclo per montare tutte le condivisioni result = [] for share in self.samba_shares: # print("#######") # print(share) if 'mountpoint' not in share.keys(): # creazione stringa che rappresente il mount-point locale mountpoint = os.path.expanduser( '~%s/%s/%s' % (self.host_username, share['hostname'], share['share'])) share.update({'mountpoint': mountpoint}) elif not share['mountpoint'].startswith('/'): mountpoint = os.path.expanduser( '~%s/%s' % (self.host_username, share['mountpoint'])) share.update({'mountpoint': mountpoint}) share.update({ 'host_username': self.host_username, 'domain_username': share.get( 'username', self.domain_username), 'domain_password': share.get( 'password', self.domain_password)}) # controllo che il mount-point locale esista altrimenti non # viene creato if not os.path.exists(share['mountpoint']): if self.verbose: logging.warning('Mountpoint "%s" not exist.' % share['mountpoint']) if not self.dry_run: os.makedirs(share['mountpoint']) # smonto la condivisione prima di rimontarla umont_cmd = self.cmd_umount % share if self.verbose: logging.warning("Umount command: %s" % umont_cmd) if not self.dry_run: umount_p = subprocess.Popen(umont_cmd, shell=True) returncode = umount_p.wait() time.sleep(2) mount_cmd = self.cmd_mount % share if self.verbose: placeholder = ",password=" logging.warning("Mount command: %s%s" % (mount_cmd.split( placeholder)[0], placeholder + "******\"")) # print(mount_cmd) # print("#######") if not self.dry_run: # montaggio della condivisione p_mnt = subprocess.Popen(mount_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) returncode = p_mnt.wait() result.append({'share': share['share'], 'returncode': returncode, 'stdout': p_mnt.stdout.read(), 'stderr': p_mnt.stderr.read()}) progress(100) if self.verbose: logging.warning("Risultati: %s" % result)
java
private static TriFunction<AnyBiPredicate, MetricValue, MetricValue, Optional<Boolean>> select_(SelType x, SelType y) { switch (x) { case BOOLEAN: switch (y) { case BOOLEAN: { TriFunction<AnyBiPredicate, Boolean, Boolean, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getBoolValue).bind3_fn(MetricValue::getBoolValue); } case INT: { TriFunction<AnyBiPredicate, Boolean, Long, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getBoolValue).bind3_fn(MetricValue::getIntValue); } case FLOAT: { TriFunction<AnyBiPredicate, Boolean, Double, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getBoolValue).bind3_fn(MetricValue::getFltValue); } case STRING: { TriFunction<AnyBiPredicate, Boolean, String, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getBoolValue).bind3_fn(MetricValue::getStrValue); } case HISTOGRAM: { TriFunction<AnyBiPredicate, Boolean, Histogram, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getBoolValue).bind3_fn(MetricValue::getHistValue); } case NONE: return AnyBiPredicate::unbound_; } break; case INT: switch (y) { case BOOLEAN: { TriFunction<AnyBiPredicate, Long, Boolean, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getIntValue).bind3_fn(MetricValue::getBoolValue); } case INT: { TriFunction<AnyBiPredicate, Long, Long, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getIntValue).bind3_fn(MetricValue::getIntValue); } case FLOAT: { TriFunction<AnyBiPredicate, Long, Double, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getIntValue).bind3_fn(MetricValue::getFltValue); } case STRING: { TriFunction<AnyBiPredicate, Long, String, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getIntValue).bind3_fn(MetricValue::getStrValue); } case HISTOGRAM: { TriFunction<AnyBiPredicate, Long, Histogram, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getIntValue).bind3_fn(MetricValue::getHistValue); } case NONE: return AnyBiPredicate::unbound_; } break; case FLOAT: switch (y) { case BOOLEAN: { TriFunction<AnyBiPredicate, Double, Boolean, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getFltValue).bind3_fn(MetricValue::getBoolValue); } case INT: { TriFunction<AnyBiPredicate, Double, Long, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getFltValue).bind3_fn(MetricValue::getIntValue); } case FLOAT: { TriFunction<AnyBiPredicate, Double, Double, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getFltValue).bind3_fn(MetricValue::getFltValue); } case STRING: { TriFunction<AnyBiPredicate, Double, String, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getFltValue).bind3_fn(MetricValue::getStrValue); } case HISTOGRAM: { TriFunction<AnyBiPredicate, Double, Histogram, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getFltValue).bind3_fn(MetricValue::getHistValue); } case NONE: return AnyBiPredicate::unbound_; } break; case STRING: switch (y) { case BOOLEAN: { TriFunction<AnyBiPredicate, String, Boolean, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getStrValue).bind3_fn(MetricValue::getBoolValue); } case INT: { TriFunction<AnyBiPredicate, String, Long, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getStrValue).bind3_fn(MetricValue::getIntValue); } case FLOAT: { TriFunction<AnyBiPredicate, String, Double, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getStrValue).bind3_fn(MetricValue::getFltValue); } case STRING: { TriFunction<AnyBiPredicate, String, String, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getStrValue).bind3_fn(MetricValue::getStrValue); } case HISTOGRAM: { TriFunction<AnyBiPredicate, String, Histogram, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getStrValue).bind3_fn(MetricValue::getHistValue); } case NONE: return AnyBiPredicate::unbound_; } break; case HISTOGRAM: switch (y) { case BOOLEAN: { TriFunction<AnyBiPredicate, Histogram, Boolean, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getHistValue).bind3_fn(MetricValue::getBoolValue); } case INT: { TriFunction<AnyBiPredicate, Histogram, Long, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getHistValue).bind3_fn(MetricValue::getIntValue); } case FLOAT: { TriFunction<AnyBiPredicate, Histogram, Double, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getHistValue).bind3_fn(MetricValue::getFltValue); } case STRING: { TriFunction<AnyBiPredicate, Histogram, String, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getHistValue).bind3_fn(MetricValue::getStrValue); } case HISTOGRAM: { TriFunction<AnyBiPredicate, Histogram, Histogram, Optional<Boolean>> fn = AnyBiPredicate::predicate; return fn.bind2_fn(MetricValue::getHistValue).bind3_fn(MetricValue::getHistValue); } case NONE: return AnyBiPredicate::unbound_; } break; case NONE: switch (y) { case BOOLEAN: case INT: case FLOAT: case STRING: case NONE: return AnyBiPredicate::unbound_; } } throw new IllegalStateException("Unrecognized Selection type"); }
java
@Nullable public synchronized V remove(K key) { V oldValue = mMap.remove(key); mSizeInBytes -= getValueSizeInBytes(oldValue); return oldValue; }
python
def number_aware_alphabetical_cmp(str1, str2): """ cmp function for sorting a list of strings by alphabetical order, but with numbers sorted numerically. i.e., foo1, foo2, foo10, foo11 instead of foo1, foo10 """ def flatten_tokens(tokens): l = [] for token in tokens: if isinstance(token, str): for char in token: l.append(char) else: assert isinstance(token, float) l.append(token) return l seq1 = flatten_tokens(tokenize_by_number(str1)) seq2 = flatten_tokens(tokenize_by_number(str2)) l = min(len(seq1),len(seq2)) i = 0 while i < l: if seq1[i] < seq2[i]: return -1 elif seq1[i] > seq2[i]: return 1 i += 1 if len(seq1) < len(seq2): return -1 elif len(seq1) > len(seq2): return 1 return 0
python
def _get_parent_classes_transparent(cls, slot, page, instance=None): """ Return all parent classes including those marked as "transparent". """ parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance) if parent_classes is None: if cls.get_require_parent(slot, page) is False: return parent_classes = [] # add all plugins marked as 'transparent', since they all are potential parents parent_classes = set(parent_classes) parent_classes.update(TransparentContainer.get_plugins()) return list(parent_classes)
python
def _cl_int_plot_top_losses(self, k, largest=True, figsize=(12,12), heatmap:bool=True, heatmap_thresh:int=16, return_fig:bool=None)->Optional[plt.Figure]: "Show images in `top_losses` along with their prediction, actual, loss, and probability of actual class." tl_val,tl_idx = self.top_losses(k, largest) classes = self.data.classes cols = math.ceil(math.sqrt(k)) rows = math.ceil(k/cols) fig,axes = plt.subplots(rows, cols, figsize=figsize) fig.suptitle('prediction/actual/loss/probability', weight='bold', size=14) for i,idx in enumerate(tl_idx): im,cl = self.data.dl(self.ds_type).dataset[idx] cl = int(cl) im.show(ax=axes.flat[i], title= f'{classes[self.pred_class[idx]]}/{classes[cl]} / {self.losses[idx]:.2f} / {self.probs[idx][cl]:.2f}') if heatmap: xb,_ = self.data.one_item(im, detach=False, denorm=False) m = self.learn.model.eval() with hook_output(m[0]) as hook_a: with hook_output(m[0], grad= True) as hook_g: preds = m(xb) preds[0,cl].backward() acts = hook_a.stored[0].cpu() if (acts.shape[-1]*acts.shape[-2]) >= heatmap_thresh: grad = hook_g.stored[0][0].cpu() grad_chan = grad.mean(1).mean(1) mult = F.relu(((acts*grad_chan[...,None,None])).sum(0)) sz = list(im.shape[-2:]) axes.flat[i].imshow(mult, alpha=0.6, extent=(0,*sz[::-1],0), interpolation='bilinear', cmap='magma') if ifnone(return_fig, defaults.return_fig): return fig
python
def bootstrap_salt(name, config=None, approve_key=True, install=True, pub_key=None, priv_key=None, bootstrap_url=None, force_install=False, unconditional_install=False, bootstrap_delay=None, bootstrap_args=None, bootstrap_shell=None): ''' Bootstrap a container from package servers, if dist is None the os the minion is running as will be created, otherwise the needed bootstrapping tools will need to be available on the host. CLI Example:: salt '*' nspawn.bootstrap_salt arch1 ''' if bootstrap_delay is not None: try: time.sleep(bootstrap_delay) except TypeError: # Bad input, but assume since a value was passed that # a delay was desired, and sleep for 5 seconds time.sleep(5) c_info = info(name) if not c_info: return None # default set here as we cannot set them # in def as it can come from a chain of procedures. if bootstrap_args: # custom bootstrap args can be totally customized, and user could # have inserted the placeholder for the config directory. # For example, some salt bootstrap script do not use at all -c if '{0}' not in bootstrap_args: bootstrap_args += ' -c {0}' else: bootstrap_args = '-c {0}' if not bootstrap_shell: bootstrap_shell = 'sh' orig_state = _ensure_running(name) if not orig_state: return orig_state if not force_install: needs_install = _needs_install(name) else: needs_install = True seeded = retcode(name, 'test -e \'{0}\''.format(SEED_MARKER)) == 0 tmp = tempfile.mkdtemp() if seeded and not unconditional_install: ret = True else: ret = False cfg_files = __salt__['seed.mkconfig']( config, tmp=tmp, id_=name, approve_key=approve_key, pub_key=pub_key, priv_key=priv_key) if needs_install or force_install or unconditional_install: if install: rstr = __salt__['test.random_hash']() configdir = '/tmp/.c_{0}'.format(rstr) run(name, 'install -m 0700 -d {0}'.format(configdir), python_shell=False) bs_ = __salt__['config.gather_bootstrap_script']( bootstrap=bootstrap_url) dest_dir = os.path.join('/tmp', rstr) for cmd in [ 'mkdir -p {0}'.format(dest_dir), 'chmod 700 {0}'.format(dest_dir), ]: if run_stdout(name, cmd): log.error('tmpdir %s creation failed (%s)', dest_dir, cmd) return False copy_to(name, bs_, '{0}/bootstrap.sh'.format(dest_dir), makedirs=True) copy_to(name, cfg_files['config'], os.path.join(configdir, 'minion')) copy_to(name, cfg_files['privkey'], os.path.join(configdir, 'minion.pem')) copy_to(name, cfg_files['pubkey'], os.path.join(configdir, 'minion.pub')) bootstrap_args = bootstrap_args.format(configdir) cmd = ('{0} {2}/bootstrap.sh {1}' .format(bootstrap_shell, bootstrap_args.replace("'", "''"), dest_dir)) # log ASAP the forged bootstrap command which can be wrapped # out of the output in case of unexpected problem log.info('Running %s in LXC container \'%s\'', cmd, name) ret = retcode(name, cmd, output_loglevel='info', use_vt=True) == 0 else: ret = False else: minion_config = salt.config.minion_config(cfg_files['config']) pki_dir = minion_config['pki_dir'] copy_to(name, cfg_files['config'], '/etc/salt/minion') copy_to(name, cfg_files['privkey'], os.path.join(pki_dir, 'minion.pem')) copy_to(name, cfg_files['pubkey'], os.path.join(pki_dir, 'minion.pub')) run(name, 'salt-call --local service.enable salt-minion', python_shell=False) ret = True shutil.rmtree(tmp) if orig_state == 'stopped': stop(name) # mark seeded upon successful install if ret: run(name, 'touch \'{0}\''.format(SEED_MARKER), python_shell=False) return ret
python
def decompress(self, value): """ Retreieve each field value or provide the initial values """ if value: return [value.get(field.name, None) for field in self.fields] return [field.field.initial for field in self.fields]
java
@Override public void addCodeBase(ICodeBaseLocator locator, boolean isApplication) { addToWorkList(projectWorkList, new WorkListItem(locator, isApplication, ICodeBase.Discovered.SPECIFIED)); }
java
private void updateBindings(Map<String, Object> props) { // Process the user element processProps(props, CFG_KEY_USER, users); // Process the user-access-id element processProps(props, CFG_KEY_USER_ACCESSID, users); // Process the group element processProps(props, CFG_KEY_GROUP, groups); // Process the group-access-id element processProps(props, CFG_KEY_GROUP_ACCESSID, groups); }
java
public static <E extends Enum<? extends Style.HasCssName>> E fromStyleName(final String styleName, final Class<E> enumClass, final E defaultValue) { return EnumHelper.fromStyleName(styleName, enumClass, defaultValue, true); }
java
public static byte schemaToColumnType(String s) { switch (s.toLowerCase()) { case "boolean": case "smallint": case "tinyint": case "bigint": // FIXME: make sure this is fixed by Tomas. case "int": case "float": case "double": case "decimal": return Vec.T_NUM; case "timestamp": case "date": return Vec.T_TIME; case "enum": return Vec.T_CAT; case "string": case "varchar": // case "binary": // Removed binary column type support for now case "char": return Vec.T_STR; default: throw new IllegalArgumentException("Unsupported Orc schema type: " + s); } }
python
def __type2js(cls, value): """ :Description: Convert python value to executable javascript value by type. :param value: Value to transform. :type value: None, bool, int, float, string :return: string """ if value is None: return 'null' elif isinstance(value, bool): return 'false' if not value else 'true' elif isinstance(value, (int, float)): return '%s' % value elif isinstance(value, dict): return json.dumps(dict) return '"%s"' % value
python
def pub_connect(self): ''' Create and connect this thread's zmq socket. If a publisher socket already exists "pub_close" is called before creating and connecting a new socket. ''' if self.pub_sock: self.pub_close() ctx = zmq.Context.instance() self._sock_data.sock = ctx.socket(zmq.PUSH) self.pub_sock.setsockopt(zmq.LINGER, -1) if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = 'tcp://127.0.0.1:{0}'.format( self.opts.get('tcp_master_publish_pull', 4514) ) else: pull_uri = 'ipc://{0}'.format( os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') ) log.debug("Connecting to pub server: %s", pull_uri) self.pub_sock.connect(pull_uri) return self._sock_data.sock
java
static List<String> getFQDNValueListCMS(JSONObject jObj, String projectionStr) throws JSONException { final List<String> labelList = new ArrayList<String>(); if (!jObj.has("result")) { logger.error("!!CMS_ERROR! result key is not in jOBJ in getFQDNValueListCMS!!: \njObj:" + PcStringUtils.renderJson(jObj)); return labelList; } JSONArray jArr = (JSONArray) jObj.get("result"); if (jArr == null || jArr.length() == 0) { return labelList; } for (int i = 0; i < jArr.length(); ++i) { JSONObject agentObj = jArr.getJSONObject(i); // properties can be null if (!agentObj.has(projectionStr)) { continue; } String label = (String) agentObj.get(projectionStr); if (label != null && !label.trim().isEmpty()) { labelList.add(label); } } return labelList; }
python
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
java
public byte[] ensureBufferHasCapacityLeft(int size) throws FileParsingException { if (bytesHolder == null) { bytesHolder = new ByteArrayHolder(size); } else { bytesHolder.ensureHasSpace(size); } return bytesHolder.getUnderlyingBytes(); }
java
@Override public String getEnterpriseBeanClassName(Object homeKey) { HomeRecord hr = homesByName.get(homeKey); // d366845.3 return hr.homeInternal.getEnterpriseBeanClassName(homeKey); }
java
public JsHandlerRegistration addGeometryIndexDisabledHandler(final GeometryIndexDisabledHandler handler) { org.geomajas.plugin.editing.client.event.state.GeometryIndexDisabledHandler h; h = new org.geomajas.plugin.editing.client.event.state.GeometryIndexDisabledHandler() { public void onGeometryIndexDisabled(GeometryIndexDisabledEvent event) { org.geomajas.plugin.editing.jsapi.client.event.state.GeometryIndexDisabledEvent e; List<GeometryIndex> indices = event.getIndices(); e = new org.geomajas.plugin.editing.jsapi.client.event.state.GeometryIndexDisabledEvent( event.getGeometry(), indices.toArray(new GeometryIndex[indices.size()])); handler.onGeometryIndexDisabled(e); } }; return new JsHandlerRegistration(new HandlerRegistration[] { delegate.addGeometryIndexDisabledHandler(h) }); }
java
public Calendar ceil(long t) { Calendar cal = new GregorianCalendar(Locale.US); cal.setTimeInMillis(t); return ceil(cal); }
python
def _validate_python_type(self, python_type): """Validate the possible combinations of python_type and type_name.""" if python_type == 'bool': if self.variable: raise ArgumentError("You can only specify a bool python type on a scalar (non-array) type_name", type_name=self.type_name) return if python_type == 'string': if not (self.variable and self.unit_size == 1): raise ArgumentError("You can only pass a string python type on an array of 1-byte objects", type_name=self.type_name) return if python_type is not None: raise ArgumentError("You can only declare a bool or string python type. Otherwise it must be passed as None", python_type=python_type)
java
protected static List<String> splitViewKeys(String viewKeysString) { List<String> splits = new ArrayList<>(); char[] chars = viewKeysString.toCharArray(); boolean betweenSquareBraces = false; boolean betweenQuotes = false; int lastMatch = 0; for (int i = 0; i < chars.length; i++) { if ((chars[i] == KEYS_STRING_SEPARATOR) && !betweenQuotes && !betweenSquareBraces) { splits.add(viewKeysString.substring(lastMatch, i)); lastMatch = i + 1; } if ((chars[i] == '"') && (i == 0 || chars[i - 1] != '\\')) { betweenQuotes = !betweenQuotes; //toggle betweenQuotes } if ((chars[i] == '[') && (i == 0 || chars[i - 1] != '\\')) { betweenSquareBraces = true; } if ((chars[i] == ']') && (i == 0 || chars[i - 1] != '\\')) { betweenSquareBraces = false; } } //add last key if needed if (lastMatch < chars.length) { splits.add(viewKeysString.substring(lastMatch, chars.length)); } return splits; }
python
def format_dapi_score(cls, meta, offset): '''Format the line with DAPI user rating and number of votes''' if 'average_rank' and 'rank_count' in meta: label = (cls._nice_strings['average_rank'] + ':').ljust(offset + 2) score = cls._format_field(meta['average_rank']) votes = ' ({num} votes)'.format(num=meta['rank_count']) return label + score + votes else: return ''
python
def get_cdpp(self, flux=None): ''' Returns the scalar CDPP for the light curve. ''' if flux is None: flux = self.flux return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence)
java
protected void closeCDATA() throws org.xml.sax.SAXException { try { m_writer.write(CDATA_DELIMITER_CLOSE); // write out a CDATA section closing "]]>" m_cdataTagOpen = false; // Remember that we have done so. } catch (IOException e) { throw new SAXException(e); } }
python
def set_appium_timeout(self, seconds): """Sets the timeout in seconds used by various keywords. There are several `Wait ...` keywords that take timeout as an argument. All of these timeout arguments are optional. The timeout used by all of them can be set globally using this keyword. The previous timeout value is returned by this keyword and can be used to set the old value back later. The default timeout is 5 seconds, but it can be altered in `importing`. Example: | ${orig timeout} = | Set Appium Timeout | 15 seconds | | Open page that loads slowly | | Set Appium Timeout | ${orig timeout} | """ old_timeout = self.get_appium_timeout() self._timeout_in_secs = robot.utils.timestr_to_secs(seconds) return old_timeout
python
def _lastWord(self, text): """Move backward to the start of the word at the end of a string. Return the word """ for index, char in enumerate(text[::-1]): if char.isspace() or \ char in ('(', ')'): return text[len(text) - index :] else: return text
java
public static Class<?> getPropertyClass(Class<?> beanClass, String field) { return INSTANCE.findPropertyClass(beanClass, field); }
python
def all_enclosing_scopes(scope, allow_global=True): """Utility function to return all scopes up to the global scope enclosing a given scope.""" _validate_full_scope(scope) # TODO: validate scopes here and/or in `enclosing_scope()` instead of assuming correctness. def scope_within_range(tentative_scope): if tentative_scope is None: return False if not allow_global and tentative_scope == GLOBAL_SCOPE: return False return True while scope_within_range(scope): yield scope scope = (None if scope == GLOBAL_SCOPE else enclosing_scope(scope))
python
def generic_add(a, b): """Simple function to add two numbers""" logger.debug('Called generic_add({}, {})'.format(a, b)) return a + b
java
protected List<PropertyData> getChildProps(String parentId, boolean withValue) { return getChildProps.run(parentId, withValue); }