language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
private void delayEnableMyself() { if (!autoDisable) { return; } setEnabled(false); postDelayed(new Runnable() { @Override public void run() { setEnabled(true); } }, disableDuration); }
java
@Override public DBInstance rebootDBInstance(RebootDBInstanceRequest request) { request = beforeClientExecution(request); return executeRebootDBInstance(request); }
python
def ToPhotlam(self, wave, flux, **kwargs): """Convert to ``photlam``. .. math:: \\textnormal{photlam} = 10^{-0.4 \\; \\textnormal{vegamag}} \\; f_{\\textnormal{Vega}} where :math:`f_{\\textnormal{Vega}}` is the flux of :ref:`pysynphot-vega-spec` resampled at given wavelength values and converted to ``photlam``. Parameters ---------- wave, flux : number or array_like Wavelength and flux values to be used for conversion. kwargs : dict Extra keywords (not used). Returns ------- result : number or array_like Converted values. """ resampled = self.vegaspec.resample(wave) return resampled.flux * 10.0**(-0.4 * flux)
java
@SuppressWarnings("unchecked") @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case AfplibPackage.GRLINE__XPOS: setXPOS((Integer)newValue); return; case AfplibPackage.GRLINE__YPOS: setYPOS((Integer)newValue); return; case AfplibPackage.GRLINE__RG: getRg().clear(); getRg().addAll((Collection<? extends GRLINERG>)newValue); return; } super.eSet(featureID, newValue); }
java
public static void mult( DMatrix3x3 a , DMatrix3 b , DMatrix3 c) { c.a1 = a.a11*b.a1 + a.a12*b.a2 + a.a13*b.a3; c.a2 = a.a21*b.a1 + a.a22*b.a2 + a.a23*b.a3; c.a3 = a.a31*b.a1 + a.a32*b.a2 + a.a33*b.a3; }
java
public static List random(Number min, Number max, long lenMin, long lenMax) { long len = random(lenMin, lenMax).longValue(); if (len > 0) { List ret = new ArrayList(); for (int i = 0; i < len; i++) { ret.add(random(min, max)); } return ret; } return null; }
java
public String getPublicCertificateHash() throws CertificateEncodingException, NoSuchAlgorithmException { return Base64.encodeBase64String(AsymmetricKeyCredential .getHash(this.publicCertificate.getEncoded())); }
python
def iter_files(root, exts=None, recursive=False): """ Iterate over file paths within root filtered by specified extensions. :param compat.string_types root: Root folder to start collecting files :param iterable exts: Restrict results to given file extensions :param bool recursive: Wether to walk the complete directory tree :rtype collections.Iterable[str]: absolute file paths with given extensions """ if exts is not None: exts = set((x.lower() for x in exts)) def matches(e): return (exts is None) or (e in exts) if recursive is False: for entry in compat.scandir(root): if compat.has_scandir: ext = splitext(entry.name)[-1].lstrip('.').lower() if entry.is_file() and matches(ext): yield entry.path else: ext = splitext(entry)[-1].lstrip('.').lower() if not compat.isdir(entry) and matches(ext): yield join(root, entry) else: for root, folders, files in compat.walk(root): for f in files: ext = splitext(f)[-1].lstrip('.').lower() if matches(ext): yield join(root, f)
java
public ExtensibleType<TldExtensionType<T>> getOrCreateExtensionElement() { List<Node> nodeList = childNode.get("extension-element"); if (nodeList != null && nodeList.size() > 0) { return new ExtensibleTypeImpl<TldExtensionType<T>>(this, "extension-element", childNode, nodeList.get(0)); } return createExtensionElement(); }
java
public LayoutInfo getLayoutInfo(Container parent) { synchronized (parent.getTreeLock()) { initializeColAndRowComponentLists(); Dimension size = parent.getSize(); Insets insets = parent.getInsets(); int totalWidth = size.width - insets.left - insets.right; int totalHeight = size.height - insets.top - insets.bottom; int[] x = computeGridOrigins(parent, totalWidth, insets.left, colSpecs, colComponents, colGroupIndices, minimumWidthMeasure, preferredWidthMeasure ); int[] y = computeGridOrigins(parent, totalHeight, insets.top, rowSpecs, rowComponents, rowGroupIndices, minimumHeightMeasure, preferredHeightMeasure ); return new LayoutInfo(x, y); } }
java
@Override public ModifyLaunchTemplateResult modifyLaunchTemplate(ModifyLaunchTemplateRequest request) { request = beforeClientExecution(request); return executeModifyLaunchTemplate(request); }
java
public JSONObject imageCensorUserDefined(byte[] imgData, HashMap<String, String> options) { AipRequest request = new AipRequest(); String base64Content = Base64Util.encode(imgData); request.addBody("image", base64Content); return imageCensorUserDefinedHelper(request, options); }
java
public XmlStringBuilder element(String name, CharSequence content) { return element(name, content.toString()); }
java
public ExchangeRate getExchangeRate(String baseCode, String termCode){ return getExchangeRate(Monetary.getCurrency(baseCode), Monetary.getCurrency(termCode)); }
java
public List<String> getMetaKeywords(TypeElement typeElement) { ArrayList<String> results = new ArrayList<>(); // Add field and method keywords only if -keywords option is used if (config.keywords) { results.addAll(getClassKeyword(typeElement)); results.addAll(getMemberKeywords(config.utils.getFields(typeElement))); results.addAll(getMemberKeywords(config.utils.getMethods(typeElement))); } ((ArrayList)results).trimToSize(); return results; }
python
def get_raw_import_values(self): # pragma: no cover, deprecation """ Get some properties of timeperiod (timeperiod is a bit different from classic item) TODO: never called anywhere, still useful? :return: a dictionnary of some properties :rtype: dict """ properties = ['timeperiod_name', 'alias', 'use', 'register'] res = {} for prop in properties: if hasattr(self, prop): val = getattr(self, prop) res[prop] = val # Now the unresolved one. The only way to get ride of same key things is to put # directly the full value as the key for other in self.unresolved: res[other] = '' return res
python
def zero_state(self, batch_size): """ Initial state of the network """ return torch.zeros(batch_size, self.state_dim, dtype=torch.float32)
java
public static long address(final ByteBuffer buffer) { if (!buffer.isDirect()) { throw new IllegalArgumentException("buffer.isDirect() must be true"); } return UNSAFE.getLong(buffer, BYTE_BUFFER_ADDRESS_FIELD_OFFSET); }
python
def decode(self, encoding=None, errors='strict'): """Decode using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle UnicodeDecodeErrors. :param str encoding: Codec. :param str errors: Error handling scheme. """ return self.__class__(super(ColorStr, self).decode(encoding, errors), keep_tags=True)
java
public static AddressType guessDeviceAddressType(URL url) { if (url.getDeviceAddress() == null) { return AddressType.COMPOSITE; } return guessAddressType(url.getDeviceAddress()); }
python
def verify_refresh_request(request): """ Wrapper around JWTIdentityPolicy.verify_refresh which verify if the request to refresh the token is valid. If valid it returns the userid which can be used to create to create an updated identity with ``remember_identity``. Otherwise it raises an exception based on InvalidTokenError. :param request: request object :type request: :class:`morepath.Request` :returns: userid :raises: InvalidTokenError, ExpiredSignatureError, DecodeError, MissingRequiredClaimError """ jwtauth_settings = request.app.settings.jwtauth.__dict__.copy() identity_policy = JWTIdentityPolicy(**jwtauth_settings) return identity_policy.verify_refresh(request)
java
public static <T> Function<T, Boolean> forPredicate (final Predicate<T> predicate) { return new Function<T, Boolean>() { public Boolean apply (T arg) { return predicate.apply(arg); } }; }
python
def convert_time(time): """Convert a time string into 24-hour time.""" split_time = time.split() try: # Get rid of period in a.m./p.m. am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj = datetime.strptime(time_str, '%I %p') return time_obj.strftime('%H:%M %p')
java
boolean write(final int value) { if (blockLength > blockLengthLimit) { return false; } final int rleCurrentValue = this.rleCurrentValue; final int rleLength = this.rleLength; if (rleLength == 0) { this.rleCurrentValue = value; this.rleLength = 1; } else if (rleCurrentValue != value) { // This path commits us to write 6 bytes - one RLE run (5 bytes) plus one extra writeRun(rleCurrentValue & 0xff, rleLength); this.rleCurrentValue = value; this.rleLength = 1; } else { if (rleLength == 254) { writeRun(rleCurrentValue & 0xff, 255); this.rleLength = 0; } else { this.rleLength = rleLength + 1; } } return true; }
python
def with_known_args(self, **kwargs): """Send only known keyword-arguments to the phase when called.""" argspec = inspect.getargspec(self.func) stored = {} for key, arg in six.iteritems(kwargs): if key in argspec.args or argspec.keywords: stored[key] = arg if stored: return self.with_args(**stored) return self
python
def citation_count(papers, key='ayjid', verbose=False): """ Generates citation counts for all of the papers cited by papers. Parameters ---------- papers : list A list of :class:`.Paper` instances. key : str Property to use as node key. Default is 'ayjid' (recommended). verbose : bool If True, prints status messages. Returns ------- counts : dict Citation counts for all papers cited by papers. """ if verbose: print "Generating citation counts for "+unicode(len(papers))+" papers..." counts = Counter() for P in papers: if P['citations'] is not None: for p in P['citations']: counts[p[key]] += 1 return counts
python
def _rpt_unused_sections(self, prt): """Report unused sections.""" sections_unused = set(self.sections_seen).difference(self.section2goids.keys()) for sec in sections_unused: prt.write(" UNUSED SECTION: {SEC}\n".format(SEC=sec))
python
def create_element(self, ns, name): """ Create an element as a child of this SLDNode. @type ns: string @param ns: The namespace of the new element. @type name: string @param name: The name of the new element. @rtype: L{SLDNode} @return: The wrapped node, in the parent's property class. This will always be a descendent of SLDNode. """ elem = self._node.makeelement('{%s}%s' % (SLDNode._nsmap[ns], name), nsmap=SLDNode._nsmap) self._node.append(elem) return getattr(self, name)
python
def clip(self, channels=True): """Limit the values of the array to the default [0,1] range. *channels* says which channels should be clipped.""" if not isinstance(channels, (tuple, list)): channels = [channels] * len(self.channels) for i in range(len(self.channels)): if channels[i]: self.channels[i] = np.ma.clip(self.channels[i], 0.0, 1.0)
python
def compute_xy( self, projection: Union[pyproj.Proj, crs.Projection, None] = None ): """Computes x and y columns from latitudes and longitudes. The source projection is WGS84 (EPSG 4326). The default destination projection is a Lambert Conformal Conical projection centered on the data inside the dataframe. For consistency reasons with pandas DataFrame, a new Traffic structure is returned. """ if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) if projection is None: projection = pyproj.Proj( proj="lcc", lat_1=self.data.latitude.min(), lat_2=self.data.latitude.max(), lat_0=self.data.latitude.mean(), lon_0=self.data.longitude.mean(), ) x, y = pyproj.transform( pyproj.Proj(init="EPSG:4326"), projection, self.data.longitude.values, self.data.latitude.values, ) return self.__class__(self.data.assign(x=x, y=y))
java
@SuppressWarnings("PMD.SwitchStmtsShouldHaveDefault") public boolean isCompatible(@NonNull EventType eventType) { switch (eventType) { case CREATED: return (listener instanceof CacheEntryCreatedListener<?, ?>); case UPDATED: return (listener instanceof CacheEntryUpdatedListener<?, ?>); case REMOVED: return (listener instanceof CacheEntryRemovedListener<?, ?>); case EXPIRED: return (listener instanceof CacheEntryExpiredListener<?, ?>); } throw new IllegalStateException("Unknown event type: " + eventType); }
python
def get_tabix_cmd(config): """Retrieve tabix command, handling new bcftools tabix and older tabix. """ try: bcftools = config_utils.get_program("bcftools", config) # bcftools has terrible error codes and stderr output, swallow those. bcftools_tabix = subprocess.check_output("{bcftools} 2>&1; echo $?".format(**locals()), shell=True).decode().find("tabix") >= 0 except config_utils.CmdNotFound: bcftools_tabix = False if bcftools_tabix: return "{0} tabix".format(bcftools) else: tabix = config_utils.get_program("tabix", config) return tabix
java
@Override public void sync(List<Dml> dmls) { if (dmls == null || dmls.isEmpty()) { return; } try { rdbSyncService.sync(mappingConfigCache, dmls, envProperties); rdbMirrorDbSyncService.sync(dmls); } catch (Exception e) { throw new RuntimeException(e); } }
java
public Query insideBoundingBox(float latitudeP1, float longitudeP1, float latitudeP2, float longitudeP2) { if (insideBoundingBox == null) { insideBoundingBox = "insideBoundingBox=" + latitudeP1 + "," + longitudeP1 + "," + latitudeP2 + "," + longitudeP2; } else if (insideBoundingBox.length() > 18) { insideBoundingBox += "," + latitudeP1 + "," + longitudeP1 + "," + latitudeP2 + "," + longitudeP2; } return this; }
python
def _add_initial_value(self, data_id, value, initial_dist=0.0, fringe=None, check_cutoff=None, no_call=None): """ Add initial values updating workflow, seen, and fringe. :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param check_cutoff: Check the cutoff limit. :type check_cutoff: (int | float) -> bool :param no_call: If True data node estimation function is not used. :type no_call: bool :param data_id: Data node id. :type data_id: str :param value: Data node value e.g., {'value': val}. :type value: dict[str, T] :param initial_dist: Data node initial distance in the ArciDispatch algorithm. :type initial_dist: float, int, optional :return: True if the data has been visited, otherwise false. :rtype: bool """ # Namespace shortcuts for speed. nodes, seen, edge_weight = self.nodes, self.seen, self._edge_length wf_remove_edge, check_wait_in = self._wf_remove_edge, self.check_wait_in wf_add_edge, dsp_in = self._wf_add_edge, self._set_sub_dsp_node_input update_view = self._update_meeting if fringe is None: fringe = self.fringe if no_call is None: no_call = self.no_call check_cutoff = check_cutoff or self.check_cutoff if data_id not in nodes: # Data node is not in the dmap. return False wait_in = nodes[data_id]['wait_inputs'] # Store wait inputs flag. index = nodes[data_id]['index'] # Store node index. wf_add_edge(START, data_id, **value) # Add edge. if data_id in self._wildcards: # Check if the data node has wildcard. self._visited.add(data_id) # Update visited nodes. self.workflow.add_node(data_id) # Add node to workflow. for w, edge_data in self.dmap[data_id].items(): # See func node. wf_add_edge(data_id, w, **value) # Set workflow. node = nodes[w] # Node attributes. # Evaluate distance. vw_dist = initial_dist + edge_weight(edge_data, node) update_view(w, vw_dist) # Update view distance. # Check the cutoff limit and if all inputs are satisfied. if check_cutoff(vw_dist): wf_remove_edge(data_id, w) # Remove workflow edge. continue # Pass the node. elif node['type'] == 'dispatcher': dsp_in(data_id, w, fringe, check_cutoff, no_call, vw_dist) elif check_wait_in(True, w): continue # Pass the node. seen[w] = vw_dist # Update distance. vd = (True, w, self.index + node['index']) # Virtual distance. heapq.heappush(fringe, (vw_dist, vd, (w, self))) # Add 2 heapq. return True update_view(data_id, initial_dist) # Update view distance. if check_cutoff(initial_dist): # Check the cutoff limit. wf_remove_edge(START, data_id) # Remove workflow edge. elif not check_wait_in(wait_in, data_id): # Check inputs. seen[data_id] = initial_dist # Update distance. vd = (wait_in, data_id, self.index + index) # Virtual distance. # Add node to heapq. heapq.heappush(fringe, (initial_dist, vd, (data_id, self))) return True return False
python
def setup_queue(self, queue_name): """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare. """ _logger.debug('Declaring queue %s', queue_name) self._channel.queue_declare(self.on_queue_declareok, queue_name)
python
def info(self, remote_path): """Gets information about resource on WebDAV. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :param remote_path: the path to remote resource. :return: a dictionary of information attributes and them values with following keys: `created`: date of resource creation, `name`: name of resource, `size`: size of resource, `modified`: date of resource modification. """ urn = Urn(remote_path) if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()): raise RemoteResourceNotFound(remote_path) response = self.execute_request(action='info', path=urn.quote()) path = self.get_full_path(urn) return WebDavXmlUtils.parse_info_response(content=response.content, path=path, hostname=self.webdav.hostname)
python
def createReference(self, fromnode, tonode, edge_data=None): """ Create a reference from fromnode to tonode """ if fromnode is None: fromnode = self fromident, toident = self.getIdent(fromnode), self.getIdent(tonode) if fromident is None or toident is None: return self.msg(4, "createReference", fromnode, tonode, edge_data) self.graph.add_edge(fromident, toident, edge_data=edge_data)
python
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
python
def eject_virtual_media(self, device='FLOPPY'): """Ejects the Virtual Media image if one is inserted.""" vm_status = self.get_vm_status(device=device) if vm_status['IMAGE_INSERTED'] == 'NO': return dic = {'DEVICE': device.upper()} self._execute_command( 'EJECT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic)
java
public final void setValue(int dayOfWeek, int hour, int minute, int second) { data = set(dayOfWeek, hour, minute, second, new short[3], 0); }
java
@Override public int open(String path, FuseFileInfo fi) { final AlluxioURI uri = mPathResolverCache.getUnchecked(path); // (see {@code man 2 open} for the structure of the flags bitfield) // File creation flags are the last two bits of flags final int flags = fi.flags.get(); LOG.trace("open({}, 0x{}) [Alluxio: {}]", path, Integer.toHexString(flags), uri); try { final URIStatus status = mFileSystem.getStatus(uri); if (status.isFolder()) { LOG.error("Cannot open folder {}", path); return -ErrorCodes.EISDIR(); } if (!status.isCompleted() && !waitForFileCompleted(uri)) { LOG.error("Cannot open incomplete folder {}", path); return -ErrorCodes.EFAULT(); } if (mOpenFiles.size() >= MAX_OPEN_FILES) { LOG.error("Cannot open {}: too many open files (MAX_OPEN_FILES: {})", path, MAX_OPEN_FILES); return ErrorCodes.EMFILE(); } FileInStream is = mFileSystem.openFile(uri); synchronized (mOpenFiles) { mOpenFiles.add(new OpenFileEntry(mNextOpenFileId, path, is, null)); fi.fh.set(mNextOpenFileId); // Assuming I will never wrap around (2^64 open files are quite a lot anyway) mNextOpenFileId += 1; } } catch (FileDoesNotExistException | InvalidPathException e) { LOG.debug("Failed to open file {}, path does not exist or is invalid", path); return -ErrorCodes.ENOENT(); } catch (Throwable t) { LOG.error("Failed to open file {}", path, t); return AlluxioFuseUtils.getErrorCode(t); } return 0; }
java
private void unlockCompletely(ReentrantLock lockToUnlock){ int counter = lockToUnlock.getHoldCount(); for(int i = 0; i<counter; i++){ lockToUnlock.unlock(); } }
python
def sql_program_name_func(command): """ Extract program name from `command`. >>> sql_program_name_func('ls') 'ls' >>> sql_program_name_func('git status') 'git' >>> sql_program_name_func('EMACS=emacs make') 'make' :type command: str """ args = command.split(' ') for prog in args: if '=' not in prog: return prog return args[0]
python
def appendBitPadding(str, blocksize=AES_blocksize): '''Bit padding a.k.a. One and Zeroes Padding A single set ('1') bit is added to the message and then as many reset ('0') bits as required (possibly none) are added. Input: (str) str - String to be padded (int) blocksize - block size of the algorithm Return: Padded string according to ANSI X.923 standart Used in when padding bit strings. 0x80 in binary is 10000000 0x00 in binary is 00000000 Defined in ANSI X.923 (based on NIST Special Publication 800-38A) and ISO/IEC 9797-1 as Padding Method 2. Used in hash functions MD5 and SHA, described in RFC 1321 step 3.1. ''' pad_len = paddingLength(len(str), blocksize) - 1 padding = chr(0x80)+'\0'*pad_len return str + padding
java
public static PageRange parse(String pages) { ANTLRInputStream is = new ANTLRInputStream(pages); InternalPageLexer lexer = new InternalPageLexer(is); lexer.removeErrorListeners(); //do not output errors to console CommonTokenStream tokens = new CommonTokenStream(lexer); InternalPageParser parser = new InternalPageParser(tokens); parser.removeErrorListeners(); //do not output errors to console PagesContext ctx = parser.pages(); return new PageRange(ctx.literal != null ? ctx.literal : pages, ctx.pageFrom, ctx.numberOfPages); }
java
public SymbolType withName(String name) { return clone(marker, name, arrayCount, typeVariable, null, null); }
java
@Deprecated public String format(Locale locale, Duration duration, DurationFormat format) { return format(duration); }
python
def label(self): """str display-name for this element, '' when absent from cube response. This property handles numeric, datetime and text variables, but also subvar dimensions """ value = self._element_dict.get("value") type_name = type(value).__name__ if type_name == "NoneType": return "" if type_name == "list": # ---like '10-15' or 'A-F'--- return "-".join([str(item) for item in value]) if type_name in ("float", "int"): return str(value) if type_name in ("str", "unicode"): return value # ---For CA and MR subvar dimensions--- name = value.get("references", {}).get("name") return name if name else ""
python
def preview(self, request): """ Return a occurrences in JSON format up until the configured limit. """ recurrence_rule = request.POST.get('recurrence_rule') limit = int(request.POST.get('limit', 10)) try: rruleset = rrule.rrulestr( recurrence_rule, dtstart=djtz.now(), forceset=True) except ValueError as e: data = { 'error': six.text_type(e), } else: data = { 'occurrences': rruleset[:limit] } return JsonResponse(data)
python
def line(y, thickness, gaussian_width): """ Infinite-length line with a solid central region, then Gaussian fall-off at the edges. """ distance_from_line = abs(y) gaussian_y_coord = distance_from_line - thickness/2.0 sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = y*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq)) return np.where(gaussian_y_coord<=0, 1.0, falloff)
python
def get_concrete_model(model): """ Get model defined in Meta. :param str or django.db.models.Model model: :return: model or None :rtype django.db.models.Model or None: :raise ValueError: model is not found or abstract """ if not(inspect.isclass(model) and issubclass(model, models.Model)): model = get_model_by_name(model) return model
python
def decode_tx_packet(packet: str) -> dict: """Break packet down into primitives, and do basic interpretation. >>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True """ node_id, protocol, attrs = packet.split(DELIM, 2) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) data['protocol'] = protocol.lower() for i, attr in enumerate(filter(None, attrs.strip(DELIM).split(DELIM))): if i == 0: data['id'] = attr if i == 1: data['switch'] = attr if i == 2: data['command'] = attr # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
java
public long getTime() throws NotAvailableException { synchronized (timeSync) { try { if (!isRunning()) { throw new InvalidStateException("Stopwatch was never started!"); } if (endTime == -1) { return System.currentTimeMillis() - startTime; } return endTime - startTime; } catch (CouldNotPerformException ex) { throw new NotAvailableException(ContextType.INSTANCE, "time", ex); } } }
python
def _seconds_as_string(seconds): """ Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s' """ TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)] unit_strings = [] cur = max(int(seconds), 1) for suffix, size in TIME_UNITS: if size is not None: cur, rest = divmod(cur, size) else: rest = cur if rest > 0: unit_strings.insert(0, '%d%s' % (rest, suffix)) return ' '.join(unit_strings)
java
private void setupLoggerFromProperties(final Properties props) { final String driverLogLevel = PGProperty.LOGGER_LEVEL.get(props); if (driverLogLevel == null) { return; // Don't mess with Logger if not set } if ("OFF".equalsIgnoreCase(driverLogLevel)) { PARENT_LOGGER.setLevel(Level.OFF); return; // Don't mess with Logger if set to OFF } else if ("DEBUG".equalsIgnoreCase(driverLogLevel)) { PARENT_LOGGER.setLevel(Level.FINE); } else if ("TRACE".equalsIgnoreCase(driverLogLevel)) { PARENT_LOGGER.setLevel(Level.FINEST); } ExpressionProperties exprProps = new ExpressionProperties(props, System.getProperties()); final String driverLogFile = PGProperty.LOGGER_FILE.get(exprProps); if (driverLogFile != null && driverLogFile.equals(loggerHandlerFile)) { return; // Same file output, do nothing. } for (java.util.logging.Handler handlers : PARENT_LOGGER.getHandlers()) { // Remove previously set Handlers handlers.close(); PARENT_LOGGER.removeHandler(handlers); loggerHandlerFile = null; } java.util.logging.Handler handler = null; if (driverLogFile != null) { try { handler = new java.util.logging.FileHandler(driverLogFile); loggerHandlerFile = driverLogFile; } catch (Exception ex) { System.err.println("Cannot enable FileHandler, fallback to ConsoleHandler."); } } Formatter formatter = new SimpleFormatter(); if ( handler == null ) { if (DriverManager.getLogWriter() != null) { handler = new WriterHandler(DriverManager.getLogWriter()); } else if ( DriverManager.getLogStream() != null) { handler = new StreamHandler(DriverManager.getLogStream(), formatter); } else { handler = new StreamHandler(System.err, formatter); } } else { handler.setFormatter(formatter); } handler.setLevel(PARENT_LOGGER.getLevel()); PARENT_LOGGER.setUseParentHandlers(false); PARENT_LOGGER.addHandler(handler); }
java
private void takeLeadership() { context.setLeader(context.getCluster().member().id()); context.getClusterState().getRemoteMemberStates().forEach(m -> m.resetState(context.getLog())); }
python
def on_unsubscribe(self): # type: () -> Callable """Decorate a callback funtion to handle unsubscribtions. **Usage:**:: @mqtt.unsubscribe() def handle_unsubscribe(client, userdata, mid) print('Unsubscribed from topic (id: {})' .format(mid)') """ def decorator(handler): # type: (Callable) -> Callable self.client.on_unsubscribe = handler return handler return decorator
python
def auth_user_get_url(self): 'Build authorization URL for User Agent.' if not self.client_id: raise AuthenticationError('No client_id specified') return '{}?{}'.format(self.auth_url_user, urllib.urlencode(dict( client_id=self.client_id, state=self.auth_state_check, response_type='code', redirect_uri=self.auth_redirect_uri )))
python
def make_timestamp_columns(): """Return two columns, created_at and updated_at, with appropriate defaults""" return ( Column('created_at', DateTime, default=func.utcnow(), nullable=False), Column('updated_at', DateTime, default=func.utcnow(), onupdate=func.utcnow(), nullable=False), )
java
public JBBPOut Byte(final String str) throws IOException { assertNotEnded(); assertStringNotNull(str); if (this.processCommands) { for (int i = 0; i < str.length(); i++) { this.outStream.write(str.charAt(i)); } } return this; }
python
def next(self): """ Provide iteration capabilities Use a small object cache for performance """ if not self._cache: self._cache = self._get_results() self._retrieved += len(self._cache) # If we don't have any other data to return, we just # stop the iteration. if not self._cache: raise StopIteration() # Consuming the cache and updating the "cursor" return self._cache.pop(0)
python
def classmethod(self, encoding): """Function decorator for class methods.""" # Add encodings for hidden self and cmd arguments. encoding = ensure_bytes(encoding) typecodes = parse_type_encoding(encoding) typecodes.insert(1, b'@:') encoding = b''.join(typecodes) def decorator(f): def objc_class_method(objc_cls, objc_cmd, *args): py_cls = ObjCClass(objc_cls) py_cls.objc_cmd = objc_cmd args = convert_method_arguments(encoding, args) result = f(py_cls, *args) if isinstance(result, ObjCClass): result = result.ptr.value elif isinstance(result, ObjCInstance): result = result.ptr.value return result name = f.__name__.replace('_', ':') self.add_class_method(objc_class_method, name, encoding) return objc_class_method return decorator
java
private int[] determineAll_Buckets_Sarray(int q) { int strLen = length; int alphabetSize = alphabet.size; int numberBuckets = kbs_power_Ulong(alphabetSize, q); int[] buckets = new int[numberBuckets + 1]; for (int i = 0; i < q; i++) { seq[start + length + i] = alphabet.charArray[0]; } for (int i = 0; i < KBS_STRING_EXTENSION_SIZE - q; i++) { seq[start + length + i + q] = 0; } /* computation of first hashvalue */ int[] alphaMap = alphabet.alphaMapping; int mappedUcharArray = 0; int hashCode = 0; int tempPower = 1; int i; for (i = q - 1; i >= 0; i--) { hashCode += (seq[start + mappedUcharArray + i] = alphaMap[seq[start + mappedUcharArray + i]]) * tempPower; tempPower *= alphabetSize; } int firstHashCode = hashCode; /* computation of the size of buckets */ int tempModulo = kbs_power_Ulong(alphabetSize, q - 1); mappedUcharArray += q; buckets[hashCode]++; int j; for (j = 1; j < strLen; j++) { hashCode -= (seq[start + mappedUcharArray - q]) * tempModulo; hashCode *= alphabetSize; hashCode += seq[start + mappedUcharArray] = alphaMap[seq[start + mappedUcharArray]]; mappedUcharArray++; buckets[hashCode]++; } /* update the alphabet for mapped string */ for (j = 0; j < alphabetSize; j++) { alphabet.charFreq[j] = alphabet.charFreq[alphabet.charArray[j]]; alphabet.charArray[j] = j; alphaMap[j] = j; } for (; j < KBS_MAX_ALPHABET_SIZE; j++) { alphaMap[j] = -1; } this.suffixArray = new int[strLen + 1]; /* computation of the bucket pointers, pointers into the suffix array */ for (j = 1; j <= numberBuckets; j++) { buckets[j] = buckets[j - 1] + buckets[j]; } /* computation of the suffix array (buckets that are copied later are left out) */ int[] charRank = getCharWeightedRank_Alphabet(buckets, q); mappedUcharArray = q; hashCode = firstHashCode; for (j = 0; j < strLen - 1; j++) { int c1; buckets[hashCode]--; if ((c1 = charRank[seq[start + mappedUcharArray - q]]) < charRank[seq[start + mappedUcharArray + 1 - q]] && c1 <= charRank[seq[start + mappedUcharArray + 2 - q]]) suffixArray[buckets[hashCode]] = j; hashCode -= (seq[start + mappedUcharArray - q]) * tempModulo; hashCode *= alphabetSize; hashCode += seq[start + mappedUcharArray]; mappedUcharArray++; } buckets[hashCode]--; suffixArray[buckets[hashCode]] = strLen - 1; buckets[numberBuckets] = strLen; return buckets; }
java
@ObjectiveCName("toggleVideoEnabledWithCallId:") public void toggleVideoEnabled(long callId) { if (modules.getCallsModule().getCall(callId).getIsVideoEnabled().get()) { modules.getCallsModule().disableVideo(callId); } else { modules.getCallsModule().enableVideo(callId); } }
java
public synchronized static AminoAcid[] createAAs() { if (aminoAcids != null) { return aminoAcids; } // Create set of AtomContainers aminoAcids = new AminoAcid[20]; IChemFile list = new ChemFile(); CMLReader reader = new CMLReader(AminoAcids.class.getClassLoader().getResourceAsStream( "org/openscience/cdk/templates/data/list_aminoacids.cml")); try { list = (IChemFile) reader.read(list); List<IAtomContainer> containersList = ChemFileManipulator.getAllAtomContainers(list); Iterator<IAtomContainer> iterator = containersList.iterator(); int counter = 0; while (iterator.hasNext()) { IAtomContainer ac = (IAtomContainer) iterator.next(); LOGGER.debug("Adding AA: ", ac); // convert into an AminoAcid AminoAcid aminoAcid = new AminoAcid(); Iterator<IAtom> atoms = ac.atoms().iterator(); Iterator<Object> props = ac.getProperties().keySet().iterator(); while (props.hasNext()) { Object next = props.next(); LOGGER.debug("Prop class: " + next.getClass().getName()); LOGGER.debug("Prop: " + next.toString()); if (next instanceof DictRef) { DictRef dictRef = (DictRef) next; // logger.debug("DictRef type: " + dictRef.getType()); if (dictRef.getType().equals("pdb:residueName")) { aminoAcid.setProperty(RESIDUE_NAME, ac.getProperty(dictRef).toString().toUpperCase()); aminoAcid.setMonomerName(ac.getProperty(dictRef).toString()); } else if (dictRef.getType().equals("pdb:oneLetterCode")) { aminoAcid.setProperty(RESIDUE_NAME_SHORT, ac.getProperty(dictRef)); } else if (dictRef.getType().equals("pdb:id")) { aminoAcid.setProperty(ID, ac.getProperty(dictRef)); LOGGER.debug("Set AA ID to: ", ac.getProperty(dictRef)); } else { LOGGER.error("Cannot deal with dictRef!"); } } } while (atoms.hasNext()) { IAtom atom = (IAtom) atoms.next(); String dictRef = (String) atom.getProperty("org.openscience.cdk.dict"); if (dictRef != null && dictRef.equals("pdb:nTerminus")) { aminoAcid.addNTerminus(atom); } else if (dictRef != null && dictRef.equals("pdb:cTerminus")) { aminoAcid.addCTerminus(atom); } else { aminoAcid.addAtom(atom); } } Iterator<IBond> bonds = ac.bonds().iterator(); while (bonds.hasNext()) { IBond bond = (IBond) bonds.next(); aminoAcid.addBond(bond); } AminoAcidManipulator.removeAcidicOxygen(aminoAcid); aminoAcid.setProperty(NO_ATOMS, "" + aminoAcid.getAtomCount()); aminoAcid.setProperty(NO_BONDS, "" + aminoAcid.getBondCount()); if (counter < aminoAcids.length) { aminoAcids[counter] = aminoAcid; } else { LOGGER.error("Could not store AminoAcid! Array too short!"); } counter++; } reader.close(); } catch (CDKException | IOException exception) { LOGGER.error("Failed reading file: ", exception.getMessage()); LOGGER.debug(exception); } return aminoAcids; }
java
public ResultList<MovieInfo> getTopRatedMovies(Integer page, String language) throws MovieDbException { return tmdbMovies.getTopRatedMovies(page, language); }
java
private boolean isMultiChunked(final String filename) { final FileCacheKey fileCacheKey = new FileCacheKey(indexName, filename, affinitySegmentId); final FileMetadata fileMetadata = metadataCache.get(fileCacheKey); if (fileMetadata==null) { //This might happen under high load when the metadata is being written //using putAsync; in such case we return true as it's the safest option. //Skipping the readlocks is just a performance optimisation, and this //condition is extremely rare. return true; } else { return fileMetadata.isMultiChunked(); } }
java
public static String mergeFromPath(String templatePath, Map<String, ?> values) { return mergeFromTemplate(readResource(templatePath), values); }
java
protected void sendAppendRequest(Connection connection, MemberState member, AppendRequest request) { long timestamp = System.nanoTime(); logger.trace("{} - Sending {} to {}", context.getCluster().member().address(), request, member.getMember().address()); connection.<AppendRequest, AppendResponse>sendAndReceive(request).whenComplete((response, error) -> { context.checkThread(); // Complete the append to the member. if (!request.entries().isEmpty()) { member.completeAppend(System.nanoTime() - timestamp); } else { member.completeAppend(); } if (open) { if (error == null) { logger.trace("{} - Received {} from {}", context.getCluster().member().address(), response, member.getMember().address()); handleAppendResponse(member, request, response); } else { handleAppendResponseFailure(member, request, error); } } }); updateNextIndex(member, request); if (!request.entries().isEmpty() && hasMoreEntries(member)) { appendEntries(member); } }
java
public Status withCause(Throwable cause) { if (Objects.equal(this.cause, cause)) { return this; } return new Status(this.code, this.description, cause); }
python
def _set_fields(self, fields): """Set or update the fields value.""" super(_BaseHNVModel, self)._set_fields(fields) if not self.resource_ref: endpoint = self._endpoint.format( resource_id=self.resource_id, parent_id=self.parent_id, grandparent_id=self.grandparent_id) self.resource_ref = re.sub("(/networking/v[0-9]+)", "", endpoint)
java
public Collection<Event> getEvents(final DateTime start, final DateTime end) { final Interval interval = new Interval(start, end); final Predicate<Event> withinInterval = new Predicate<Event>() { public boolean apply(Event input) { return interval.contains(input.getStart()); } }; final Collection<Event> values = eventById.values(); return Collections2.filter(values, withinInterval); }
java
public void setColor(Color color) { if (null == color) { String _message = "The color must be non null."; Object[] _parameters = {}; throw new IllegalArgumentException(MessageFormat.format(_message, _parameters)); } myColor = color; }
python
def user_has_email(username): """ make sure, that given user has an email associated """ user = api.user.get(username=username) if not user.getProperty("email"): msg = _( "This user doesn't have an email associated " "with their account." ) raise Invalid(msg) return True
java
public boolean deleteSinkAsync(String sinkName) throws ExecutionException, InterruptedException { // [START deleteSinkAsync] Future<Boolean> future = logging.deleteSinkAsync(sinkName); // ... boolean deleted = future.get(); if (deleted) { // the sink was deleted } else { // the sink was not found } // [END deleteSinkAsync] return deleted; }
java
protected static InstallationModificationImpl.InstallationState load(final InstalledIdentity installedIdentity) throws IOException { final InstallationModificationImpl.InstallationState state = new InstallationModificationImpl.InstallationState(); for (final Layer layer : installedIdentity.getLayers()) { state.putLayer(layer); } for (final AddOn addOn : installedIdentity.getAddOns()) { state.putAddOn(addOn); } return state; }
java
protected void parseAsynchronousContinuation(Element element, ActivityImpl activity) { boolean isAsyncBefore = isAsyncBefore(element); boolean isAsyncAfter = isAsyncAfter(element); boolean exclusive = isExclusive(element); // set properties on activity activity.setAsyncBefore(isAsyncBefore, exclusive); activity.setAsyncAfter(isAsyncAfter, exclusive); }
java
public void validateConnections() { boolean anyDestroyed = false; ManagedConnectionFactory mcf = pool.getConnectionManager().getManagedConnectionFactory(); if (mcf instanceof ValidatingManagedConnectionFactory) { ValidatingManagedConnectionFactory vcf = (ValidatingManagedConnectionFactory) mcf; long timestamp = System.currentTimeMillis(); for (ConnectionListener cl : listeners) { if (cl.changeState(FREE, VALIDATION)) { if (cl.getValidated() + pool.getConfiguration().getBackgroundValidationMillis() <= timestamp) { ConnectionListener result = validateConnectionListener(listeners, cl, FREE); if (result == null) { if (Tracer.isEnabled()) Tracer.destroyConnectionListener(pool.getConfiguration().getId(), this, cl, false, false, true, false, false, false, false, Tracer.isRecordCallstacks() ? new Throwable("CALLSTACK") : null); anyDestroyed = true; } } else { if (!cl.changeState(VALIDATION, FREE)) { if (Tracer.isEnabled()) Tracer.destroyConnectionListener(pool.getConfiguration().getId(), this, cl, false, false, false, false, true, false, false, Tracer.isRecordCallstacks() ? new Throwable("CALLSTACK") : null); destroyAndRemoveConnectionListener(cl, listeners); } } } } } else { log.validateOnMatchNonCompliantManagedConnectionFactory(mcf.getClass().getName()); } if (anyDestroyed) prefill(); }
java
private void onMessageSent(MessageSentEvent event) { handler.post(() -> listener.onMessageSent(event)); log("Event published " + event.toString()); }
python
def get_conversation(self, peer_jid, *, current_jid=None): """ Get or create a new one-to-one conversation with a peer. :param peer_jid: The JID of the peer to converse with. :type peer_jid: :class:`aioxmpp.JID` :param current_jid: The current JID to lock the conversation to (see :rfc:`6121`). :type current_jid: :class:`aioxmpp.JID` :rtype: :class:`Conversation` :return: The new or existing conversation with the peer. `peer_jid` must be a full or bare JID. See the :class:`Service` documentation for details. .. versionchanged:: 0.10 In 0.9, this was a coroutine. Sorry. """ try: return self._conversationmap[peer_jid] except KeyError: pass return self._make_conversation(peer_jid, False)
java
public <T extends Page> void mountPage(final String path, final Class<T> pageClass, final IPageParametersEncoder pageParametersEncoder) { mount(new MountedMapper(path, pageClass, pageParametersEncoder)); }
python
def commit_offsets_sync(self, offsets): """Commit specific offsets synchronously. This method will retry until the commit completes successfully or an unrecoverable error is encountered. Arguments: offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit Raises error on failure """ assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API' assert all(map(lambda k: isinstance(k, TopicPartition), offsets)) assert all(map(lambda v: isinstance(v, OffsetAndMetadata), offsets.values())) self._invoke_completed_offset_commit_callbacks() if not offsets: return while True: self.ensure_coordinator_ready() future = self._send_offset_commit_request(offsets) self._client.poll(future=future) if future.succeeded(): return future.value if not future.retriable(): raise future.exception # pylint: disable-msg=raising-bad-type time.sleep(self.config['retry_backoff_ms'] / 1000)
java
public synchronized void addTask(int taskId) { Preconditions.checkArgument(!mTaskIdToInfo.containsKey(taskId), ""); mTaskIdToInfo.put(taskId, new TaskInfo().setJobId(mId).setTaskId(taskId) .setStatus(Status.CREATED).setErrorMessage("").setResult(null)); }
python
def save(self, *args, **kwargs): ''' Just add "s" if no plural name given. ''' if not self.pluralName: self.pluralName = self.name + 's' super(self.__class__, self).save(*args, **kwargs)
java
public String getSrcSet() { StringBuffer result = new StringBuffer(128); if (m_srcSet != null) { int items = m_srcSet.size(); for (Map.Entry<Integer, CmsJspImageBean> entry : m_srcSet.entrySet()) { CmsJspImageBean imageBean = entry.getValue(); // append the image source result.append(imageBean.getSrcUrl()); result.append(" "); // append width result.append(imageBean.getScaler().getWidth()); result.append("w"); if (--items > 0) { result.append(", "); } } } return result.toString(); }
python
def _fill_diagonals(m, diag_indices): """Fills diagonals of `nsites` matrices in `m` so rows sum to 0.""" assert m.ndim == 3, "M must have 3 dimensions" assert m.shape[1] == m.shape[2], "M must contain square matrices" for r in range(m.shape[0]): scipy.fill_diagonal(m[r], 0) m[r][diag_indices] -= scipy.sum(m[r], axis=1)
python
def help(self, command=None): '''help prints the general function help, or help for a specific command Parameters ========== command: the command to get help for, if none, prints general help ''' from spython.utils import check_install check_install() cmd = ['singularity','--help'] if command != None: cmd.append(command) help = self._run_command(cmd) return help
java
public StorageBuilder setUserCredentialsRepository( UserCredentialsRepository userCredentialsRepo, String userId ) { this.userCredentialsRepo = userCredentialsRepo; this.userId = userId; return this; }
python
def which_roles_can(self, name): """Which role can SendMail? """ targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first() return [{'role': group.role} for group in targetPermissionRecords.groups]
java
public FailedRemediationBatch withFailedItems(RemediationConfiguration... failedItems) { if (this.failedItems == null) { setFailedItems(new com.amazonaws.internal.SdkInternalList<RemediationConfiguration>(failedItems.length)); } for (RemediationConfiguration ele : failedItems) { this.failedItems.add(ele); } return this; }
python
def delete_repository(self, repository, params=None): """ Removes a shared file system repository. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_ :arg repository: A comma-separated list of repository names :arg master_timeout: Explicit operation timeout for connection to master node :arg timeout: Explicit operation timeout """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") return self.transport.perform_request('DELETE', _make_path('_snapshot', repository), params=params)
java
public List<String> findNames(String classname, String[] pkgnames) { List<String> result; Class cls; result = new ArrayList<>(); try { cls = Class.forName(classname); result = findNames(cls, pkgnames); } catch (Throwable t) { getLogger().log(Level.SEVERE, "Failed to instantiate '" + classname + "'/" + ClassUtils.arrayToString(pkgnames) + " (findNames):", t); } return result; }
python
def hdf5_col(self, chain=-1): """Return a pytables column object. :Parameters: chain : integer The index of the chain. .. note:: This method is specific to the ``hdf5`` backend. """ return self.db._tables[chain].colinstances[self.name]
python
def GetTARInfo(self): """Retrieves the TAR info. Returns: tarfile.TARInfo: TAR info or None if it does not exist. Raises: PathSpecError: if the path specification is incorrect. """ if not self._tar_info: location = getattr(self.path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') if not location.startswith(self._file_system.LOCATION_ROOT): raise errors.PathSpecError('Invalid location in path specification.') if len(location) == 1: return None tar_file = self._file_system.GetTARFile() try: self._tar_info = tar_file.getmember(location[1:]) except KeyError: pass return self._tar_info
python
def agg_conc(original_countries, aggregates, missing_countries='test', merge_multiple_string='_&_', log_missing_countries=None, log_merge_multiple_strings=None, coco=None, as_dataframe='sparse', original_countries_class=None): """ Builds an aggregation concordance dict, vec or matrix Parameters ---------- original_countries: list or str List of countries to aggregated, also accepts and valid column name of CountryConverter.data aggregates: list of dict or str List of aggregation information. This can either be dict mapping the names of 'original_countries' to aggregates, or a valid column name of CountryConverter.data Aggregation happens in order given in this parameter. Thus, country assigned to an aggregate are not re-assigned by the following aggregation information. missing_countries: str, boolean, None Entry to fill in for countries in 'original_countries' which do not appear in 'aggregates'. str: Use the given name for all missing countries True: Use the name in original_countries for missing countries False: Skip these countries None: Use None for these countries merge_multiple_string: str or None, optional If multiple correspondance entries are given in one of the aggregates join them with the given string (default: '_&_'. To skip these enries, pass None. log_missing_countries: function, optional This function is called with country is country is in 'original_countries' but missing in all 'aggregates'. For example, pass lambda x: logging.error('Country {} missing'.format(x)) to log errors for such countries. Default: do nothing log_merge_multiple_strings: function, optional Function to call for logging multiple strings, see log_missing_countries Default: do nothing coco: instance of CountryConverter, optional CountryConverter instance used for the conversion. Pass a custom one if additional data is needed in addition to the custom country converter file. If None (default), the bare CountryConverter is used as_dataframe: boolean or st, optional If False, output as OrderedDict. If True or str, output as pandas dataframe. If str and 'full', output as a full matrix, otherwise only two collumns with the original and aggregated names are returned. original_countries_class: str, optional Valid column name of CountryConverter.data. This parameter is needed if a list of countries is passed to 'orginal_countries' and strings corresponding to data in CountryConverter.data are used subsequently. Can be omitted otherwise. Returns ------- OrderedDict or DataFrame (defined by 'as_dataframe') """ if coco is None: coco = CountryConverter() if type(original_countries) is str: original_countries_class = original_countries original_countries = coco.data[original_countries].values else: original_countries_class = (original_countries_class or coco._get_input_format_from_name( original_countries[0])) if type(aggregates) is not list: aggregates = [aggregates] correspond = OrderedDict.fromkeys(original_countries) for agg in aggregates: if type(agg) is str: agg = coco.get_correspondance_dict(original_countries_class, agg) for country in original_countries: if correspond.get(country) is None: try: entry = agg[country] except KeyError: entry = None if type(entry) is list: if 1 < len(entry): if merge_multiple_string: entry = merge_multiple_string.join([ str(e) for e in entry]) else: entry = None if log_merge_multiple_strings: log_merge_multiple_strings(country) else: entry = entry[0] correspond[country] = entry for country in original_countries: if correspond.get(country) is None: if missing_countries is True: correspond[country] = country elif missing_countries is False: del correspond[country] else: correspond[country] = missing_countries if log_missing_countries: log_missing_countries(country) if as_dataframe: correspond = pd.DataFrame.from_dict( correspond, orient='index').reset_index() correspond.columns = ['original', 'aggregated'] if ((type(as_dataframe) is str) and (as_dataframe[0].lower() == 'f')): _co_list = correspond.original correspond['val'] = 1 correspond = correspond.set_index( ['original', 'aggregated']).unstack().fillna(0)['val'] correspond = correspond.loc[_co_list] return correspond
java
private void sendRecordFailedNotify(String reason) { Status failedStatus = new Status(StatusCodes.NS_RECORD_FAILED); failedStatus.setLevel(Status.ERROR); failedStatus.setClientid(getStreamId()); failedStatus.setDetails(getPublishedName()); failedStatus.setDesciption(reason); StatusMessage failedMsg = new StatusMessage(); failedMsg.setBody(failedStatus); pushMessage(failedMsg); }
python
def energy_ratio_by_chunks(x, param): """ Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole series. Takes as input parameters the number num_segments of segments to divide the series into and segment_focus which is the segment number (starting at zero) to return a feature on. If the length of the time series is not a multiple of the number of segments, the remaining data points are distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`. Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario in case somebody calls it. Sum of the ratios should be 1.0. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints :return: the feature values :return type: list of tuples (index, data) """ res_data = [] res_index = [] full_series_energy = np.sum(x ** 2) for parameter_combination in param: num_segments = parameter_combination["num_segments"] segment_focus = parameter_combination["segment_focus"] assert segment_focus < num_segments assert num_segments > 0 res_data.append(np.sum(np.array_split(x, num_segments)[segment_focus] ** 2.0)/full_series_energy) res_index.append("num_segments_{}__segment_focus_{}".format(num_segments, segment_focus)) return list(zip(res_index, res_data))
python
def create(cls, propertyfile, allow_unknown): """ Create a Property instance by attempting to parse the given property file. @param propertyfile: A file name of a property file @param allow_unknown: Whether to accept unknown properties """ with open(propertyfile) as f: content = f.read().strip() # parse content for known properties is_svcomp = False known_properties = [] only_known_svcomp_property = True if content == 'OBSERVER AUTOMATON' or content == 'SATISFIABLE': known_properties = [_PROPERTY_NAMES[content]] elif content.startswith('CHECK'): is_svcomp = True for line in filter(None, content.splitlines()): if content.startswith('CHECK'): # SV-COMP property, either a well-known one or a new one props_in_line = [ prop for (substring, prop) in _PROPERTY_NAMES.items() if substring in line] if len(props_in_line) == 1: known_properties.append(props_in_line[0]) else: only_known_svcomp_property = False else: # not actually an SV-COMP property file is_svcomp = False known_properties = [] break # check if some known property content was found subproperties = None if only_known_svcomp_property and len(known_properties) == 1: is_well_known = True name = known_properties[0] elif only_known_svcomp_property and set(known_properties) == _MEMSAFETY_SUBPROPERTIES: is_well_known = True name = _PROP_MEMSAFETY subproperties = list(known_properties) else: if not allow_unknown: raise BenchExecException( 'File "{0}" does not contain a known property.'.format(propertyfile)) is_well_known = False name = os.path.splitext(os.path.basename(propertyfile))[0] return cls(propertyfile, is_well_known, is_svcomp, name, subproperties)
python
def prox_tv(x, gamma, G, A=None, At=None, nu=1, tol=10e-4, maxit=200, use_matrix=True): r""" Total Variation proximal operator for graphs. This function computes the TV proximal operator for graphs. The TV norm is the one norm of the gradient. The gradient is defined in the function :meth:`pygsp.graphs.Graph.grad`. This function requires the PyUNLocBoX to be executed. This function solves: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \|x\|_{TV}` Parameters ---------- x: int Input signal gamma: ndarray Regularization parameter G: graph object Graphs structure A: lambda function Forward operator, this parameter allows to solve the following problem: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \| A x\|_{TV}` (default = Id) At: lambda function Adjoint operator. (default = Id) nu: float Bound on the norm of the operator (default = 1) tol: float Stops criterion for the loop. The algorithm will stop if : :math:`\frac{n(t) - n(t - 1)} {n(t)} < tol` where :math:`n(t) = f(x) + 0.5 \|x-y\|_2^2` is the objective function at iteration :math:`t` (default = :math:`10e-4`) maxit: int Maximum iteration. (default = 200) use_matrix: bool If a matrix should be used. (default = True) Returns ------- sol: solution Examples -------- """ if A is None: def A(x): return x if At is None: def At(x): return x tight = 0 l1_nu = 2 * G.lmax * nu if use_matrix: def l1_a(x): return G.Diff * A(x) def l1_at(x): return G.Diff * At(D.T * x) else: def l1_a(x): return G.grad(A(x)) def l1_at(x): return G.div(x) functions, _ = _import_pyunlocbox() functions.norm_l1(x, gamma, A=l1_a, At=l1_at, tight=tight, maxit=maxit, verbose=verbose, tol=tol)