language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
@Override public InjectionBinding<AdministeredObjectDefinition> createInjectionBinding(AdministeredObjectDefinition annotation, Class<?> instanceClass, Member member, String jndiName) throws InjectionException { InjectionBinding<AdministeredObjectDefinition> injectionBinding = new AdministeredObjectDefinitionInjectionBinding(jndiName, ivNameSpaceConfig); injectionBinding.merge(annotation, instanceClass, null); return injectionBinding; }
java
@Override public List<Namespace> getNamespaces(Kam kam) { if (kam == null) throw new InvalidArgument("kam", kam); if (!exists(kam)) return null; return getNamespaces(kam.getKamInfo()); }
java
public static int getAgeByIdCard(String idCard, Date dateToCompare) { String birth = getBirthByIdCard(idCard); return DateUtil.age(DateUtil.parse(birth, "yyyyMMdd"), dateToCompare); }
java
public void setCustomAttributes(java.util.Collection<SchemaAttributeType> customAttributes) { if (customAttributes == null) { this.customAttributes = null; return; } this.customAttributes = new java.util.ArrayList<SchemaAttributeType>(customAttributes); }
python
def cmd_fence_move(self, args): '''handle fencepoint move''' if len(args) < 1: print("Usage: fence move FENCEPOINTNUM") return if not self.have_list: print("Please list fence points first") return idx = int(args[0]) if idx <= 0 or idx > self.fenceloader.count(): print("Invalid fence point number %u" % idx) return try: latlon = self.module('map').click_position except Exception: print("No map available") return if latlon is None: print("No map click position available") return # note we don't subtract 1, as first fence point is the return point self.fenceloader.move(idx, latlon[0], latlon[1]) if self.send_fence(): print("Moved fence point %u" % idx)
java
public void setEffectiveLabelFrequencyCaps(com.google.api.ads.admanager.axis.v201811.LabelFrequencyCap[] effectiveLabelFrequencyCaps) { this.effectiveLabelFrequencyCaps = effectiveLabelFrequencyCaps; }
python
def fit_delta_ts(data, livetime, fit_background=True): """Fits gaussians to delta t for each PMT pair. Parameters ---------- data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency livetime: length of data taking in seconds fit_background: if True: fits gaussian with offset, else without offset Returns ------- numpy arrays with rates and means for all PMT combinations """ data = data / livetime start = -(data.shape[1] - 1) / 2 end = -start + 1 xs = np.arange(start, end) rates = [] sigmas = [] means = [] popts = [] pcovs = [] for combination in data: mean0 = np.argmax(combination) + start try: if fit_background: popt, pcov = optimize.curve_fit( gaussian, xs, combination, p0=[mean0, 4., 5., 0.1], bounds=([start, 0, 0, 0], [end, 10, 10, 1]) ) else: popt, pcov = optimize.curve_fit( gaussian_wo_offset, xs, combination, p0=[mean0, 4., 5.], bounds=([start, 0, 0], [end, 10, 10]) ) except RuntimeError: popt = (0, 0, 0, 0) rates.append(popt[2]) means.append(popt[0]) sigmas.append(popt[1]) popts.append(popt) pcovs.append(pcov) return ( np.array(rates), np.array(means), np.array(sigmas), np.array(popts), np.array(pcovs) )
python
def add_item(self, assessment_id, item_id): """Adds an existing ``Item`` to an assessment. arg: assessment_id (osid.id.Id): the ``Id`` of the ``Assessment`` arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` raise: NotFound - ``assessment_id`` or ``item_id`` not found raise: NullArgument - ``assessment_id`` or ``item_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ if assessment_id.get_identifier_namespace() != 'assessment.Assessment': raise errors.InvalidArgument self._part_item_design_session.add_item(item_id, self._get_first_part_id(assessment_id))
python
def get_block_symbol_data(editor, block): """ Gets the list of ParenthesisInfo for specific text block. :param editor: Code editor instance :param block: block to parse """ def list_symbols(editor, block, character): """ Retuns a list of symbols found in the block text :param editor: code editor instance :param block: block to parse :param character: character to look for. """ text = block.text() symbols = [] cursor = QTextCursor(block) cursor.movePosition(cursor.StartOfBlock) pos = text.find(character, 0) cursor.movePosition(cursor.Right, cursor.MoveAnchor, pos) while pos != -1: if not TextHelper(editor).is_comment_or_string(cursor): # skips symbols in string literal or comment info = ParenthesisInfo(pos, character) symbols.append(info) pos = text.find(character, pos + 1) cursor.movePosition(cursor.StartOfBlock) cursor.movePosition(cursor.Right, cursor.MoveAnchor, pos) return symbols parentheses = sorted( list_symbols(editor, block, '(') + list_symbols(editor, block, ')'), key=lambda x: x.position) square_brackets = sorted( list_symbols(editor, block, '[') + list_symbols(editor, block, ']'), key=lambda x: x.position) braces = sorted( list_symbols(editor, block, '{') + list_symbols(editor, block, '}'), key=lambda x: x.position) return parentheses, square_brackets, braces
python
def plot_qq_unf(fignum, D, title, subplot=False, degrees=True): """ plots data against a uniform distribution in 0=>360. Parameters _________ fignum : matplotlib figure number D : data title : title for plot subplot : if True, make this number one of two subplots degrees : if True, assume that these are degrees Return Mu : Mu statistic (Fisher et al., 1987) Mu_crit : critical value of Mu for uniform distribution Effect ______ makes a Quantile Quantile plot of data """ if subplot == True: plt.subplot(1, 2, fignum) else: plt.figure(num=fignum) X, Y, dpos, dneg = [], [], 0., 0. if degrees: D = (np.array(D)) % 360 X = D/D.max() X = np.sort(X) n = float(len(D)) i = np.arange(0, len(D)) Y = (i-0.5)/n ds = (i/n)-X dpos = ds.max() dneg = ds.min() plt.plot(Y, X, 'ro') v = dneg + dpos # kuiper's v # Mu of fisher et al. equation 5.16 Mu = v * (np.sqrt(n) - 0.567 + (old_div(1.623, (np.sqrt(n))))) plt.axis([0, 1., 0., 1.]) bounds = plt.axis() notestr = 'N: ' + '%i' % (n) plt.text(.1 * bounds[1], .9 * bounds[3], notestr) notestr = 'Mu: ' + '%7.3f' % (Mu) plt.text(.1 * bounds[1], .8 * bounds[3], notestr) if Mu > 1.347: notestr = "Non-uniform (99%)" elif Mu < 1.207: notestr = "Uniform (95%)" elif Mu > 1.207: notestr = "Uniform (99%)" plt.text(.1 * bounds[1], .7 * bounds[3], notestr) plt.text(.1 * bounds[1], .7 * bounds[3], notestr) plt.title(title) plt.xlabel('Uniform Quantile') plt.ylabel('Data Quantile') return Mu, 1.207
java
public static <M extends PMessage<M,F>, F extends PField> MessageNamedArgumentFinder<M,F> forMessage(@Nonnull M message, @Nonnull FieldType... fieldTypes) { return new MessageNamedArgumentFinder<>(null, message, makeFieldTypes(fieldTypes)); }
java
@SuppressWarnings("unchecked") private void toJson(MarcRecord marcRecord, StringBuilder sb) { if (marcRecord.isEmpty()) { return; } if (top) { top = false; if (style == Style.ELASTICSEARCH_BULK) { writeMetaDataLine(marcRecord); } } else { switch (style) { case ARRAY: sb.append(","); break; case LINES: sb.append("\n"); break; case ELASTICSEARCH_BULK: sb.append("\n"); writeMetaDataLine(marcRecord); break; default: break; } } sb.append("{"); int c0 = 0; for (Map.Entry<String, Object> tags : marcRecord.entrySet()) { if (c0 > 0) { sb.append(","); } String tag = tags.getKey(); sb.append("\"").append(tag).append("\":"); Object o = tags.getValue(); if (o instanceof Map) { int c00 = 0; Map<String, Object> repeatMap = (Map<String, Object>) o; sb.append("["); for (Map.Entry<String, Object> repeats : repeatMap.entrySet()) { if (c00 > 0) { sb.append(","); } o = repeats.getValue(); if (!(o instanceof List)) { o = Collections.singletonList(o); } List<?> list = (List<?>) o; if (list.size() > 1) { sb.append("["); } int c1 = 0; for (Object value : list) { if (c1 > 0) { sb.append(","); } if (value instanceof Map) { sb.append("{"); int c2 = 0; for (Map.Entry<String, Object> indicators : ((Map<String, Object>) value).entrySet()) { if (c2 > 0) { sb.append(","); } String indicator = indicators.getKey(); sb.append("\"").append(indicator).append("\":"); o = indicators.getValue(); if (!(o instanceof List)) { o = Collections.singletonList(o); } List<?> list2 = (List<?>) o; sb.append("["); int c3 = 0; for (Object value2 : list2) { if (c3 > 0) { sb.append(","); } if (value2 instanceof Map) { Map<String, Object> map = (Map<String, Object>) value2; int c4 = 0; for (Map.Entry<String, Object> subfield : map.entrySet()) { if (c4 > 0) { sb.append(","); } sb.append("{"); sb.append("\"").append(subfield.getKey()).append("\":"); if (subfield.getValue() instanceof List) { sb.append("["); int c5 = 0; for (String s : (List<String>) subfield.getValue()) { if (c5 > 0) { sb.append(","); } sb.append("\"").append(escape(s)).append("\""); c5++; } sb.append("]"); } else { sb.append("\"").append(escape(subfield.getValue().toString())).append("\""); } c4++; sb.append("}"); } } else { sb.append("\"").append(escape(value2.toString())).append("\""); } c3++; } sb.append("]"); c2++; } sb.append("}"); } else { if (value == null) { sb.append("null"); } else { sb.append("\"").append(escape(value.toString())).append("\""); } } c1++; } if (list.size() > 1) { sb.append("]"); } c00++; } sb.append("]"); } else { if (o == null) { sb.append("null"); } else { sb.append("\"").append(escape(o.toString())).append("\""); } } c0++; } sb.append('}'); }
python
def is_to_be_built_or_is_installed(self, shutit_module_obj): """Returns true if this module is configured to be built, or if it is already installed. """ shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg if cfg[shutit_module_obj.module_id]['shutit.core.module.build']: return True return self.is_installed(shutit_module_obj)
java
public void write(byte[] b, int off, int len) throws IOException { // Sanity checks if (compressor.finished()) { throw new IOException("write beyond end of stream"); } if (b == null) { throw new NullPointerException(); } else if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return; } long limlen = compressor.getBytesRead(); if (len + limlen > MAX_INPUT_SIZE && limlen > 0) { // Adding this segment would exceed the maximum size. // Flush data if we have it. finish(); compressor.reset(); } if (len > MAX_INPUT_SIZE) { // The data we're given exceeds the maximum size. Any data // we had have been flushed, so we write out this chunk in segments // not exceeding the maximum size until it is exhausted. rawWriteInt(len); do { int bufLen = Math.min(len, MAX_INPUT_SIZE); compressor.setInput(b, off, bufLen); compressor.finish(); while (!compressor.finished()) { compress(); } compressor.reset(); off += bufLen; len -= bufLen; } while (len > 0); return; } // Give data to the compressor compressor.setInput(b, off, len); if (!compressor.needsInput()) { // compressor buffer size might be smaller than the maximum // size, so we permit it to flush if required. rawWriteInt((int)compressor.getBytesRead()); do { compress(); } while (!compressor.needsInput()); } }
python
def digital_read(self, pin): """ Retrieve the last data update for the specified digital pin. It is intended for a polling application. :param pin: Digital pin number :returns: Last value reported for the digital pin """ task = asyncio.ensure_future(self.core.digital_read(pin)) value = self.loop.run_until_complete(task) return value
java
public void setLayerIds(java.util.Collection<String> layerIds) { if (layerIds == null) { this.layerIds = null; return; } this.layerIds = new com.amazonaws.internal.SdkInternalList<String>(layerIds); }
java
public static <T extends ExecutorService> T requireNotShutdown(final T executorService) { if (executorService.isShutdown()) { throw new IllegalArgumentException("ExecutorService is shutdown"); } return executorService; }
java
public boolean hasLogger(final String name, final Class<? extends MessageFactory> messageFactoryClass) { return getOrCreateInnerMap(factoryClassKey(messageFactoryClass)).containsKey(name); }
python
def drop_dimension(self, dimensions): """Drops dimension(s) from keys Args: dimensions: Dimension(s) to drop Returns: Clone of object with with dropped dimension(s) """ dimensions = [dimensions] if np.isscalar(dimensions) else dimensions dims = [d for d in self.kdims if d not in dimensions] dim_inds = [self.get_dimension_index(d) for d in dims] key_getter = itemgetter(*dim_inds) return self.clone([(key_getter(k), v) for k, v in self.data.items()], kdims=dims)
python
def set_schema_location(self, ns_uri, schema_location, replace=False): """Sets the schema location of the given namespace. If ``replace`` is ``True``, then any existing schema location is replaced. Otherwise, if the schema location is already set to a different value, an exception is raised. If the schema location is set to None, it is effectively erased from this set (this is not considered "replacement".) Args: ns_uri (str): The namespace whose schema location is to be set schema_location (str): The schema location URI to set, or None replace (bool): Whether to replace any existing schema location Raises: NamespaceNotFoundError: If the given namespace isn't in this set. ConflictingSchemaLocationError: If replace is False, schema_location is not None, and the namespace already has a different schema location in this set. """ ni = self.__lookup_uri(ns_uri) if ni.schema_location == schema_location: return elif replace or ni.schema_location is None: ni.schema_location = schema_location elif schema_location is None: ni.schema_location = None # Not considered "replacement". else: raise ConflictingSchemaLocationError(ns_uri, ni.schema_location, schema_location)
java
public static boolean isAssignableFrom(MappedField destination,MappedField source) { return isAssignableFrom(destination.getValue(), source.getValue()); }
java
@Override public <BD extends BehaviorData, B extends Behavior<BD, ?>> BD getBehaviorData(final Class<B> behaviorClass) { BD data = null; final B behavior = getBehavior(behaviorClass); if (behavior != null) { data = behavior.data(); } return data; }
python
def _find_year_for_season(league): """ Return the necessary seaons's year based on the current date. Since all sports start and end at different times throughout the year, simply using the current year is not sufficient to describe a season. For example, the NCAA Men's Basketball season begins in November every year. However, for the November and December months, the following year is used to denote the season (ie. November 2017 marks the start of the '2018' season) on sports-reference.com. This rule does not apply to all sports. Baseball begins and ends in one single calendar year, so the year never needs to be incremented. Additionally, since information for future seasons is generally not finalized until a month before the season begins, the year will default to the most recent season until the month prior to the season start date. For example, the 2018 MLB season begins in April. In January 2018, however, not all of the season's information is present in the system, so the default year will be '2017'. Parameters ---------- league : string A string pertaining to the league start information as listed in SEASON_START_MONTH (ie. 'mlb', 'nba', 'nfl', etc.). League must be present in SEASON_START_MONTH. Returns ------- int The respective season's year. Raises ------ ValueError If the passed 'league' is not a key in SEASON_START_MONTH. """ today = _todays_date() if league not in SEASON_START_MONTH: raise ValueError('"%s" league cannot be found!') start = SEASON_START_MONTH[league]['start'] wrap = SEASON_START_MONTH[league]['wrap'] if wrap and start - 1 <= today.month <= 12: return today.year + 1 elif not wrap and start == 1 and today.month == 12: return today.year + 1 elif not wrap and not start - 1 <= today.month <= 12: return today.year - 1 else: return today.year
java
public static byte[] getResourceAsBytes(String name) throws IOException { InputStream is = getResourceAsStream(name); ByteArrayOutputStream os = new ByteArrayOutputStream(); try { byte[] buffer = new byte[4096]; int length = 0; while((length = is.read(buffer)) != -1) { os.write(buffer, 0, length); } } finally { Files.close(is); Files.close(os); } return os.toByteArray(); }
python
def number_of_changes(slots, events, original_schedule, X, **kwargs): """ A function that counts the number of changes between a given schedule and an array (either numpy array of lp array). """ changes = 0 original_array = schedule_to_array(original_schedule, events=events, slots=slots) for row, event in enumerate(original_array): for col, slot in enumerate(event): if slot == 0: changes += X[row, col] else: changes += 1 - X[row, col] return changes
python
def customdata(self, lookup, default=None): """ Args: lookup: the custom data file default: the optional value to return if lookup failed; returns None if not set Returns: The custom data returned from the file 'lookup' or default/None if no match found """ try: if lookup in EFConfig.CUSTOM_DATA: return EFConfig.CUSTOM_DATA[lookup] else: return default except AttributeError: return default
java
private void processOPCPackage(OPCPackage pkg) throws FormatNotUnderstoodException { LOG.debug("Processing OPCPackage in low footprint mode"); // check if signature should be verified if (this.hocr.getVerifySignature()) { LOG.info("Verifying signature of document"); SignatureConfig sic = new SignatureConfig(); sic.setOpcPackage(pkg); SignatureInfo si = new SignatureInfo(); si.setSignatureConfig(sic); if (!si.verifySignature()) { throw new FormatNotUnderstoodException("Cannot verify signature of OOXML (.xlsx) file: "+this.hocr.getFileName()); } else { LOG.info("Successfully verifed first part signature of OXXML (.xlsx) file: "+this.hocr.getFileName()); } Iterator<SignaturePart> spIter = si.getSignatureParts().iterator(); while (spIter.hasNext()) { SignaturePart currentSP = spIter.next(); if (!(currentSP.validate())) { throw new FormatNotUnderstoodException("Could not validate all signature parts for file: "+this.hocr.getFileName()); } else { X509Certificate currentCertificate = currentSP.getSigner(); try { if ((this.hocr.getX509CertificateChain().size()>0) && (!CertificateChainVerificationUtil.verifyCertificateChain(currentCertificate, this.hocr.getX509CertificateChain()))) { throw new FormatNotUnderstoodException("Could not validate signature part for principal \""+currentCertificate.getSubjectX500Principal().getName()+"\" : "+this.hocr.getFileName()); } } catch (CertificateException | NoSuchAlgorithmException | NoSuchProviderException | InvalidAlgorithmParameterException e) { LOG.error("Could not validate signature part for principal \""+currentCertificate.getSubjectX500Principal().getName()+"\" : "+this.hocr.getFileName(), e); throw new FormatNotUnderstoodException("Could not validate signature part for principal \""+currentCertificate.getSubjectX500Principal().getName()+"\" : "+this.hocr.getFileName()); } } } LOG.info("Successfully verifed all signatures of OXXML (.xlsx) file: "+this.hocr.getFileName()); } // continue in lowfootprint mode XSSFReader r; try { r = new XSSFReader( pkg ); } catch (IOException | OpenXML4JException e) { LOG.error(e); throw new FormatNotUnderstoodException("Error cannot parse new Excel file (.xlsx)"); } try { // read date format InputStream workbookDataXML = r.getWorkbookData(); WorkbookDocument wd = WorkbookDocument.Factory.parse(workbookDataXML); this.isDate1904 = wd.getWorkbook().getWorkbookPr().getDate1904(); // read shared string tables if (HadoopOfficeReadConfiguration.OPTION_LOWFOOTPRINT_PARSER_SAX.equalsIgnoreCase(this.hocr.getLowFootprintParser())) { this.pushSST = new ReadOnlySharedStringsTable(pkg); } else if (HadoopOfficeReadConfiguration.OPTION_LOWFOOTPRINT_PARSER_STAX.equalsIgnoreCase(this.hocr.getLowFootprintParser())) { List<PackagePart> pkgParts = pkg.getPartsByContentType(XSSFRelation.SHARED_STRINGS.getContentType()); if (pkgParts.size()>0) { this.pullSST = new EncryptedCachedDiskStringsTable(pkgParts.get(0), this.hocr.getSstCacheSize(), this.hocr.getCompressSST(), this.ca, this.cm); } } this.styles = r.getStylesTable(); XSSFReader.SheetIterator iter = (XSSFReader.SheetIterator)r.getSheetsData(); int sheetNumber = 0; while (iter.hasNext()) { // check if we need to parse this sheet? boolean parse=false; if (this.sheets!=null) { for (int i=0;i<this.sheets.length;i++) { if (iter.getSheetName().equals(this.sheets[i])) { parse=true; break; } } } else { parse=true; } // sheet is supposed to be parsed if (parse) { InputStream rawSheetInputStream = iter.next(); this.sheetNameList.add(iter.getSheetName()); InputSource rawSheetInputSource = new InputSource(rawSheetInputStream); if (HadoopOfficeReadConfiguration.OPTION_LOWFOOTPRINT_PARSER_SAX.equalsIgnoreCase(this.hocr.getLowFootprintParser())) { this.event=true; LOG.info("Using SAX parser for low footprint Excel parsing"); XMLReader sheetParser = SAXHelper.newXMLReader(); XSSFEventParser xssfp = new XSSFEventParser(sheetNumber,iter.getSheetName(), this.spreadSheetCellDAOCache); ContentHandler handler = new XSSFSheetXMLHandler( this.styles, iter.getSheetComments(), this.pushSST, xssfp, this.useDataFormatter, false); sheetParser.setContentHandler(handler); sheetParser.parse(rawSheetInputSource); sheetNumber++; } else if (HadoopOfficeReadConfiguration.OPTION_LOWFOOTPRINT_PARSER_STAX.equalsIgnoreCase(this.hocr.getLowFootprintParser())) { LOG.info("Using STAX parser for low footprint Excel parsing"); this.event=false; this.pullSheetInputList.add(rawSheetInputStream); this.pullSheetNameList.add(iter.getSheetName()); // make shared string table available // everything else is in the getNext method } else { LOG.error("Unknown XML parser configured for low footprint mode: \""+this.hocr.getLowFootprintParser()+"\""); throw new FormatNotUnderstoodException("Unknown XML parser configured for low footprint mode: \""+this.hocr.getLowFootprintParser()+"\""); } } } } catch (InvalidFormatException | IOException e) { LOG.error(e); throw new FormatNotUnderstoodException("Error cannot parse new Excel file (.xlsx)"); } catch (SAXException e) { LOG.error(e); throw new FormatNotUnderstoodException("Parsing Excel sheet in .xlsx format failed. Cannot read XML content"); } catch (ParserConfigurationException e) { LOG.error(e); throw new FormatNotUnderstoodException("Parsing Excel sheet in .xlsx format failed. Cannot read XML content"); } catch (XmlException e) { LOG.error(e); throw new FormatNotUnderstoodException("Parsing Excel sheet in .xlsx format failed. Cannot read XML content"); } // check skipping of additional lines for (int i=0;i<this.hocr.getSkipLines();i++) { this.getNext(); } // check header if (this.hocr.getReadHeader()) { LOG.debug("Reading header..."); Object[] firstRow = this.getNext(); if (firstRow!=null) { this.header=new String[firstRow.length]; for (int i=0;i<firstRow.length;i++) { if ((firstRow[i]!=null) && (!"".equals(((SpreadSheetCellDAO)firstRow[i]).getFormattedValue()))) { this.header[i]=((SpreadSheetCellDAO)firstRow[i]).getFormattedValue(); } } this.header=MSExcelParser.sanitizeHeaders(this.header, this.hocr.getColumnNameRegex(), this.hocr.getColumnNameReplace()); } else { this.header=new String[0]; } } this.headerParsed=true; }
java
protected String getProcessDescription(OptionsAndArgs pOpts, VirtualMachineHandler pHandler) { if (pOpts.getPid() != null) { return "PID " + pOpts.getPid(); } else if (pOpts.getProcessPattern() != null) { StringBuffer desc = new StringBuffer("process matching \"") .append(pOpts.getProcessPattern().pattern()) .append("\""); try { desc.append(" (PID: ") .append(pHandler.findProcess(pOpts.getProcessPattern()).getId()) .append(")"); } catch (InvocationTargetException e) { // ignored } catch (NoSuchMethodException e) { // ignored } catch (IllegalAccessException e) { // ignored } return desc.toString(); } else { return "(null)"; } }
java
public void assign(WorkerSlot slot, Collection<ExecutorDetails> executors) { for (ExecutorDetails executor : executors) { this.executorToSlot.put(executor, slot); } }
java
public static pq_stats get(nitro_service service) throws Exception{ pq_stats obj = new pq_stats(); pq_stats[] response = (pq_stats[])obj.stat_resources(service); return response[0]; }
java
public PythonStreamExecutionEnvironment create_remote_execution_environment( String host, int port, String... jar_files) { return new PythonStreamExecutionEnvironment( StreamExecutionEnvironment.createRemoteEnvironment(host, port, jar_files), new Path(localTmpPath), scriptName); }
java
private final void waitForActivityIfNotAvailable(){ if(activityStack.isEmpty() || activityStack.peek().get() == null){ if (activityMonitor != null) { Activity activity = activityMonitor.getLastActivity(); while (activity == null){ sleeper.sleepMini(); activity = activityMonitor.getLastActivity(); } addActivityToStack(activity); } else if(config.trackActivities){ sleeper.sleepMini(); setupActivityMonitor(); waitForActivityIfNotAvailable(); } } }
java
protected boolean verifyStringMatch(String text) { if (text == null) { return false; } if (!this.caseSensitive) { text = text.toLowerCase(); } return text.equals(this.expectedString); }
python
def punsubscribe(self, *args): """ Unsubscribe from the supplied patterns. If empty, unsubscribe from all patterns. """ if args: args = list_or_args(args[0], args[1:]) patterns = self._normalize_keys(dict.fromkeys(args)) else: patterns = self.patterns self.pending_unsubscribe_patterns.update(patterns) return self.execute_command('PUNSUBSCRIBE', *args)
python
def merge(self, other): """ Merge all children stats. """ for this, other in zip(self.stats, other.stats): this.merge(other)
java
@Override public void onOpenDocument(final PdfWriter writer, final Document document) { try { bf = BaseFont.createFont(BaseFont.HELVETICA, BaseFont.CP1252, BaseFont.NOT_EMBEDDED); cb = writer.getDirectContent(); template = cb.createTemplate(50, 50); } catch (final DocumentException de) { throw new IllegalStateException(de); } catch (final IOException ioe) { throw new IllegalStateException(ioe); } }
java
@Override public void unsafe_set(int row, int col, double value) { int index = nz_index(row,col); if( index < 0 ) addItem( row,col,value); else { nz_value.data[index] = value; } }
java
public static String getEntryNameForMapKey(String property, Integer index) { return getEntryNameForMap(property, true, index); }
java
public void restoreOriginalParams() { for (int i = 0, N = mHost.getChildCount(); i < N; i++) { View view = mHost.getChildAt(i); ViewGroup.LayoutParams params = view.getLayoutParams(); if (Log.isLoggable(TAG, Log.DEBUG)) { Log.d(TAG, "should restore " + view + " " + params); } if (params instanceof PercentLayoutParams) { PercentLayoutInfo info = ((PercentLayoutParams) params).getPercentLayoutInfo(); if (Log.isLoggable(TAG, Log.DEBUG)) { Log.d(TAG, "using " + info); } if (info != null) { if (params instanceof ViewGroup.MarginLayoutParams) { info.restoreMarginLayoutParams((ViewGroup.MarginLayoutParams) params); } else { info.restoreLayoutParams(params); } } } } }
python
def sink_update( self, project, sink_name, filter_, destination, unique_writer_identity=False ): """API call: update a sink resource. :type project: str :param project: ID of the project containing the sink. :type sink_name: str :param sink_name: the name of the sink :type filter_: str :param filter_: the advanced logs filter expression defining the entries exported by the sink. :type destination: str :param destination: destination URI for the entries exported by the sink. :type unique_writer_identity: bool :param unique_writer_identity: (Optional) determines the kind of IAM identity returned as writer_identity in the new sink. :rtype: dict :returns: The sink resource returned from the API (converted from a protobuf to a dictionary). """ path = "projects/%s/sinks/%s" % (project, sink_name) sink_pb = LogSink(name=path, filter=filter_, destination=destination) sink_pb = self._gapic_api.update_sink( path, sink_pb, unique_writer_identity=unique_writer_identity ) # NOTE: LogSink message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. return MessageToDict(sink_pb)
java
public Configuration addOptions(Option... options) { EnumSet<Option> opts = EnumSet.noneOf(Option.class); opts.addAll(this.options); opts.addAll(asList(options)); return Configuration.builder().jsonProvider(jsonProvider).mappingProvider(mappingProvider).options(opts).evaluationListener(evaluationListeners).build(); }
python
def sge_submit(nslave, worker_args, worker_envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit """ env_arg = ','.join(['%s=\"%s\"' % (k, str(v)) for k, v in worker_envs.items()]) cmd = 'qsub -cwd -t 1-%d -S /bin/bash' % nslave if args.queue != 'default': cmd += '-q %s' % args.queue cmd += ' -N %s ' % args.jobname cmd += ' -e %s -o %s' % (args.logdir, args.logdir) cmd += ' -pe orte %d' % (args.vcores) cmd += ' -v %s,PATH=${PATH}:.' % env_arg cmd += ' %s %s' % (runscript, ' '.join(args.command + worker_args)) print cmd subprocess.check_call(cmd, shell = True) print 'Waiting for the jobs to get up...'
python
def unpublish(scm, published_branch, verbose, fake): """Removes a published branch from the remote repository.""" scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(published_branch) if not branch: scm.display_available_branches() raise click.BadArgumentUsage('Please specify a branch to unpublish') branch_names = scm.get_branch_names(local=False) if branch not in branch_names: raise click.BadArgumentUsage( "Branch {0} is not published. Use a branch that is published." .format(crayons.yellow(branch))) status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format( crayons.yellow(branch)), branch)
python
def save(self, dst, overwrite=False, compression=0): """Save MDF to *dst*. If overwrite is *True* then the destination file is overwritten, otherwise the file name is appended with '.<cntr>', were '<cntr>' is the first counter that produces a new file name (that does not already exist in the filesystem). Parameters ---------- dst : str | pathlib.Path destination file name overwrite : bool overwrite flag, default *False* compression : int does nothing for mdf version3; introduced here to share the same API as mdf version 4 files Returns ------- output_file : str output file name """ dst = Path(dst).with_suffix(".mdf") destination_dir = dst.parent destination_dir.mkdir(parents=True, exist_ok=True) if overwrite is False: if dst.is_file(): cntr = 0 while True: name = dst.with_suffix(f".{cntr}.mdf") if not name.exists(): break else: cntr += 1 message = ( f'Destination file "{dst}" already exists ' f'and "overwrite" is False. Saving MDF file as "{name}"' ) logger.warning(message) dst = name if not self.header.comment: self.header.comment = """<FHcomment> <TX>created</TX> <tool_id>asammdf</tool_id> <tool_vendor> </tool_vendor> <tool_version>{__version__}</tool_version> </FHcomment>""" else: old_history = self.header.comment timestamp = time.asctime() text = f"{old_history}\n{timestamp}: updated by asammdf {__version__}" self.header.comment = text defined_texts, cc_map, si_map = {}, {}, {} if dst == self.name: destination = dst.with_suffix(".savetemp") else: destination = dst with open(destination, "wb+") as dst_: groups_nr = len(self.groups) write = dst_.write seek = dst_.seek # list of all blocks blocks = [] address = 0 write(bytes(self.identification)) address += v23c.ID_BLOCK_SIZE write(bytes(self.header)) address += self.header.block_len if self.header.program: write(bytes(self.header.program)) self.header.program_addr = address address += self.header.program.block_len else: self.header.program_addr = 0 comment = TextBlock(text=self.header.comment) write(bytes(comment)) self.header.comment_addr = address address += comment.block_len # DataGroup # put them first in the block list so they will be written first to # disk this way, in case of memory=False, we can safely # restore he original data block address gp_rec_ids = [] original_data_block_addrs = [ group.data_group.data_block_addr for group in self.groups ] for idx, gp in enumerate(self.groups): dg = gp.data_group gp_rec_ids.append(dg.record_id_len) dg.record_id_len = 0 # DataBlock dim = 0 for (data_bytes, _, __) in self._load_data(gp): dim += len(data_bytes) write(data_bytes) if gp.data_blocks: gp.data_group.data_block_addr = address else: gp.data_group.data_block_addr = 0 address += dim if self._callback: self._callback(int(33 * (idx + 1) / groups_nr), 100) if self._terminate: dst_.close() self.close() return for gp in self.groups: dg = gp.data_group blocks.append(dg) dg.address = address address += dg.block_len if self.groups: for i, dg in enumerate(self.groups[:-1]): addr = self.groups[i + 1].data_group.address dg.data_group.next_dg_addr = addr self.groups[-1].data_group.next_dg_addr = 0 for idx, gp in enumerate(self.groups): # Channel Dependency cd = gp.channel_dependencies for dep in cd: if dep: dep.address = address blocks.append(dep) address += dep.block_len for channel, dep in zip(gp.channels, gp.channel_dependencies): if dep: channel.component_addr = dep.address = address blocks.append(dep) address += dep.block_len else: channel.component_addr = 0 address = channel.to_blocks( address, blocks, defined_texts, cc_map, si_map ) count = len(gp.channels) if count: for i in range(count - 1): gp.channels[i].next_ch_addr = gp.channels[i + 1].address gp.channels[-1].next_ch_addr = 0 # ChannelGroup cg = gp.channel_group if gp.channels: cg.first_ch_addr = gp.channels[0].address else: cg.first_ch_addr = 0 cg.next_cg_addr = 0 address = cg.to_blocks(address, blocks, defined_texts, si_map) # TriggerBLock trigger = gp.trigger if trigger: address = trigger.to_blocks(address, blocks) if self._callback: self._callback(int(33 * (idx + 1) / groups_nr) + 33, 100) if self._terminate: dst_.close() self.close() return # update referenced channels addresses in the channel dependecies for gp in self.groups: for dep in gp.channel_dependencies: if not dep: continue for i, pair_ in enumerate(dep.referenced_channels): dg_nr, ch_nr = pair_ grp = self.groups[dg_nr] ch = grp.channels[ch_nr] dep[f"ch_{i}"] = ch.address dep[f"cg_{i}"] = grp.channel_group.address dep[f"dg_{i}"] = grp.data_group.address # DataGroup for gp in self.groups: gp.data_group.first_cg_addr = gp.channel_group.address if gp.trigger: gp.data_group.trigger_addr = gp.trigger.address else: gp.data_group.trigger_addr = 0 if self.groups: address = self.groups[0].data_group.address self.header.first_dg_addr = address self.header.dg_nr = len(self.groups) if self._terminate: dst_.close() self.close() return if self._callback: blocks_nr = len(blocks) threshold = blocks_nr / 33 count = 1 for i, block in enumerate(blocks): write(bytes(block)) if i >= threshold: self._callback(66 + count, 100) count += 1 threshold += blocks_nr / 33 else: for block in blocks: write(bytes(block)) for gp, rec_id, original_address in zip( self.groups, gp_rec_ids, original_data_block_addrs ): gp.data_group.record_id_len = rec_id gp.data_group.data_block_addr = original_address seek(0) write(bytes(self.identification)) write(bytes(self.header)) if dst == self.name: self.close() Path.unlink(self.name) Path.rename(destination, self.name) self.groups.clear() self.header = None self.identification = None self.channels_db.clear() self.masters_db.clear() self._master_channel_cache.clear() self._tempfile = TemporaryFile() self._file = open(self.name, "rb") self._read() if self._callback: self._callback(100, 100) return dst
java
public static ConfusionMatrix parseFromText(String text) throws IllegalArgumentException { try { String[] lines = text.split("\n"); String[] l = lines[0].split("\\s+"); List<String> labels = new ArrayList<>(); for (String aL : l) { if (!aL.isEmpty()) { labels.add(aL); } } ConfusionMatrix result = new ConfusionMatrix(); for (int i = 1; i < lines.length; i++) { String line = lines[i]; String[] split = line.split("\\s+"); List<String> row = new ArrayList<>(); for (String aSplit : split) { if (!aSplit.isEmpty()) { row.add(aSplit); } } String predictedLabel = row.get(0); for (int r = 1; r < row.size(); r++) { String s = row.get(r); Integer val = Integer.valueOf(s); String acutalLabel = labels.get(r - 1); result.increaseValue(predictedLabel, acutalLabel, val); } } return result; } catch (Exception e) { throw new IllegalArgumentException("Wrong input format", e); } }
java
public static boolean delete( String path ) { if (path == null || path.trim().length() == 0) return false; return delete(new File(path)); }
python
def get_neighbor_sentence_ngrams( mention, d=1, attrib="words", n_min=1, n_max=1, lower=True ): """Get the ngrams that are in the neighoring Sentences of the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose neighbor Sentences are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in span.sentence.document.sentences if abs(sentence.position - span.sentence.position) <= d and sentence != span.sentence ] ): yield ngram
java
protected void applyContext( ShuttleListBinding binding, Map context ) { if( context.containsKey(MODEL_KEY) ) { binding.setModel((ListModel) context.get(MODEL_KEY)); } if( context.containsKey(SELECTABLE_ITEMS_HOLDER_KEY) ) { binding.setSelectableItemsHolder((ValueModel) context.get(SELECTABLE_ITEMS_HOLDER_KEY)); } if( context.containsKey(SELECTED_ITEMS_HOLDER_KEY) ) { binding.setSelectedItemsHolder((ValueModel) context.get(SELECTED_ITEMS_HOLDER_KEY)); } if( context.containsKey(RENDERER_KEY) ) { binding.setRenderer((ListCellRenderer) context.get(RENDERER_KEY)); } if( context.containsKey(COMPARATOR_KEY) ) { binding.setComparator((Comparator) context.get(COMPARATOR_KEY)); } if( context.containsKey(SELECTED_ITEM_TYPE_KEY) ) { binding.setSelectedItemType((Class) context.get(SELECTED_ITEM_TYPE_KEY)); } if( context.containsKey(FORM_ID) ) { binding.setFormId((String) context.get(FORM_ID)); } }
java
protected final void validateForMSBuild() throws MojoExecutionException { if ( !MSBuildPackaging.isValid( mavenProject.getPackaging() ) ) { throw new MojoExecutionException( "Please set packaging to one of " + MSBuildPackaging.validPackaging() ); } findMSBuild(); validateProjectFile(); platforms = MojoHelper.validatePlatforms( platforms ); }
java
static public List<String> hasIssues(Vertex.RuntimeVertex vertex) { List<String> issues = new ArrayList<>(ElementChecker.hasIssues(vertex)); if (vertex.getName() == null) { issues.add("Name of vertex cannot be null"); } else { if (vertex.getName().isEmpty()) { issues.add("Name of vertex cannot be an empty string"); } if (CharMatcher.whitespace().matchesAnyOf(vertex.getName())) { issues.add("Name of vertex cannot have any white spaces."); } } return issues; }
java
private final DiceOperand getDiceOperand(final DiceContext ctx) { final Dice dice; // Parsed dice final Integer quantity; // Number of dice final Integer sides; // Number of sides final Iterator<TerminalNode> digits; // Parsed digits // Parses the dice data digits = ctx.DIGIT().iterator(); if (Iterables.size(ctx.DIGIT()) > 1) { if ((ctx.ADDOPERATOR() != null) && (SUBTRACTION_OPERATOR .equals(ctx.ADDOPERATOR().getText()))) { LOGGER.debug("This is part of a subtraction. Reversing sign."); quantity = 0 - Integer.parseInt(digits.next().getText()); } else { quantity = Integer.parseInt(digits.next().getText()); } } else { // No quantity of dice defined // Defaults to 1 quantity = 1; } sides = Integer.parseInt(digits.next().getText()); // Creates the dice dice = new DefaultDice(quantity, sides); return new DefaultDiceOperand(dice); }
java
public boolean updatePostAuthor(final long postId, final long authorId) throws SQLException { Connection conn = null; PreparedStatement stmt = null; try { conn = connectionSupplier.getConnection(); stmt = conn.prepareStatement(updatePostAuthorSQL); stmt.setLong(1, authorId); stmt.setLong(2, postId); return stmt.executeUpdate() > 0; } finally { SQLUtil.closeQuietly(conn, stmt); } }
java
public DescribeEnvironmentsResult withEnvironments(Environment... environments) { if (this.environments == null) { setEnvironments(new java.util.ArrayList<Environment>(environments.length)); } for (Environment ele : environments) { this.environments.add(ele); } return this; }
python
def _prepare_defaults(self): """Trigger assignment of default values.""" for name, field in self.__fields__.items(): if field.assign: getattr(self, name)
python
def translate(self, dct): """ Translate leaf names using a dictionary of names :param dct: Dictionary of current names -> updated names :return: Copy of tree with names changed """ new_tree = self.copy() for leaf in new_tree._tree.leaf_node_iter(): curr_name = leaf.taxon.label leaf.taxon.label = dct.get(curr_name, curr_name) return new_tree
java
public ServiceFuture<List<ApplicationInfoResponse>> listAsync(ListAppsOptionalParameter listOptionalParameter, final ServiceCallback<List<ApplicationInfoResponse>> serviceCallback) { return ServiceFuture.fromResponse(listWithServiceResponseAsync(listOptionalParameter), serviceCallback); }
java
public static double[] getReal(ComplexNumber[] cn) { double[] n = new double[cn.length]; for (int i = 0; i < n.length; i++) { n[i] = cn[i].real; } return n; }
java
public void printStream(final ByteArrayOutputStream stream, final String hostName, final String printerName, final String documentName) throws IOException { String controlFile = ""; byte buffer[] = new byte[1000]; String s; String strJobNumber; strJobNumber = "" + jobNumber; while (strJobNumber.length() < 3) strJobNumber = "0" + strJobNumber; String userName = System.getProperty("user.name"); if (userName == null) userName = "Unknown"; socketLpr.setSoTimeout(30000); final OutputStream sOut = socketLpr.getOutputStream(); final InputStream sIn = socketLpr.getInputStream(); //Open printer s = "\002" + printerName + "\n"; sOut.write(s.getBytes()); sOut.flush(); acknowledge(sIn, "Failed to open printer"); //Send control file controlFile += "H" + hostName + "\n"; controlFile += "P" + userName + "\n"; controlFile += (printRaw ? "o" : "p") + "dfA" + strJobNumber + hostName + "\n"; controlFile += "UdfA" + strJobNumber + hostName + "\n"; controlFile += "N" + documentName + "\n"; s = "\002" + controlFile.length() + " cfA" + strJobNumber + hostName + "\n"; sOut.write(s.getBytes()); acknowledge(sIn, "Failed to send control header"); buffer = controlFile.getBytes(); sOut.write(buffer); buffer[0] = 0; sOut.write(buffer, 0, 1); sOut.flush(); acknowledge(sIn, "Failed to send control file"); s = "\003" + stream.size() + " dfA" + strJobNumber + hostName + "\n"; sOut.write(s.getBytes()); sOut.flush(); acknowledge(sIn, "Failed to send print file command"); stream.writeTo(sOut); sOut.flush(); buffer[0] = 0; sOut.write(buffer, 0, 1); sOut.flush(); acknowledge(sIn, "Failed to send print file"); socketLpr.close(); }
python
def where_entry_category(query, category, recurse=False): """ Generate a where clause for a particular category """ category = str(category) if category and recurse: # We're recursing and aren't in /, so add the prefix clause return orm.select( e for e in query if e.category == category or e.category.startswith(category + '/') ) if not recurse: # We're not recursing, so we need an exact match on a possibly-empty # category return orm.select(e for e in query if e.category == category) # We're recursing and have no category, which means we're doing nothing return query
java
public TypeInfo getSchemaTypeInfo() { // dynamic load to support jre 1.4 and 1.5 try { Method m = element.getClass().getMethod("getSchemaTypeInfo", new Class[] {}); return (TypeInfo) m.invoke(element, ArrayUtil.OBJECT_EMPTY); } catch (Exception e) { throw new PageRuntimeException(Caster.toPageException(e)); } }
java
private void authenticatePacketHMCSHA1(RawPacket pkt, int rocIn) { ByteBuffer buf = pkt.getBuffer(); buf.rewind(); int len = buf.remaining(); buf.get(tempBuffer, 0, len); mac.update(tempBuffer, 0, len); rbStore[0] = (byte) (rocIn >> 24); rbStore[1] = (byte) (rocIn >> 16); rbStore[2] = (byte) (rocIn >> 8); rbStore[3] = (byte) rocIn; mac.update(rbStore, 0, rbStore.length); mac.doFinal(tagStore, 0); }
java
private ScriptValidationContext getScriptValidationContext() { if (scriptValidationContext == null) { scriptValidationContext = new ScriptValidationContext(messageType.toString()); getAction().getValidationContexts().add(scriptValidationContext); } return scriptValidationContext; }
java
public static URI asURI(final String s) { try { return new URI(s); } catch (final URISyntaxException e) { throw new TechnicalException("Cannot make an URI from: " + s, e); } }
java
@SuppressWarnings("unchecked") static final <T> Beans<T> of(Class<T> clazz) { return ((CachedBeans<T>)CachedBeans.CACHE).getBeans(clazz); }
python
def get_view(self, columns: Sequence[str], query: str = None) -> PopulationView: """Get a time-varying view of the population state table. The requested population view can be used to view the current state or to update the state with new values. Parameters ---------- columns : A subset of the state table columns that will be available in the returned view. query : A filter on the population state. This filters out particular rows (simulants) based on their current state. The query should be provided in a way that is understood by the ``pandas.DataFrame.query`` method and may reference state table columns not requested in the ``columns`` argument. Returns ------- PopulationView A filtered view of the requested columns of the population state table. """ return self._population_manager.get_view(columns, query)
java
protected final void reinit() { pad.set(AnswerAnnotation.class, flags.backgroundSymbol); pad.set(GoldAnswerAnnotation.class, flags.backgroundSymbol); featureFactory.init(flags); defaultReaderAndWriter = makeReaderAndWriter(); if (flags.readerAndWriter != null && flags.readerAndWriter.equals(flags.plainTextDocumentReaderAndWriter)) { plainTextReaderAndWriter = defaultReaderAndWriter; } else { plainTextReaderAndWriter = makePlainTextReaderAndWriter(); } }
java
public static List<CmsRelationType> filterWeak(Collection<CmsRelationType> relationTypes) { List<CmsRelationType> result = new ArrayList<CmsRelationType>(relationTypes); Iterator<CmsRelationType> it = result.iterator(); while (it.hasNext()) { CmsRelationType type = it.next(); if (type.isStrong()) { it.remove(); } } return result; }
python
def sync_from_spec(redis, schema): """ Takes an input experiment spec and creates/modifies/archives the existing experiments to match the spec. If there's an experiment in the spec that currently doesn't exist, it will be created along with the associated choices. If there's an experiment in the spec that currently exists, and the set of choices are different, that experiment's choices will be modified to match the spec. If there's an experiment not in the spec that currently exists, it will be archived. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] } """ def get_experiments_dict(active=True): """Returns a dictionary of experiment names -> experiment objects""" return dict((experiment.name, experiment) for experiment in get_experiments(redis, active=active)) # Get the current experiments active_experiments = get_experiments_dict() archived_experiments = get_experiments_dict(active=False) # Get the newly defined experiment names and the names of the experiments # already setup new_experiment_names = set(schema.keys()) active_experiment_names = set(active_experiments.keys()) # Find all the experiments that are in the schema and are defined among the # archived experiments, but not the active ones (we check against active # experiments to prevent the edge case where an experiment might be defined # doubly in both active and archived experiments) unarchivable_experiment_names = (new_experiment_names - active_experiment_names) & set(archived_experiments.keys()) # De-archive the necessary experiments for unarchivable_experiment_name in unarchivable_experiment_names: print("- De-archiving %s" % unarchivable_experiment_name) # Because there is no function to de-archive an experiment, it must # be done manually pipe = redis.pipeline(transaction=True) pipe.sadd(ACTIVE_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name) pipe.srem(ARCHIVED_EXPERIMENTS_REDIS_KEY, unarchivable_experiment_name) pipe.execute() # Reload the active experiments if we de-archived any if unarchivable_experiment_names: active_experiments = get_experiments_dict() active_experiment_names = set(active_experiments.keys()) # Create the new experiments for new_experiment_name in new_experiment_names - active_experiment_names: print("- Creating experiment %s" % new_experiment_name) experiment = add_experiment(redis, new_experiment_name) for choice in schema[new_experiment_name]: print(" - Adding choice %s" % choice) experiment.add_choice(choice) # Archive experiments not defined in the schema for archivable_experiment_name in active_experiment_names - new_experiment_names: print("- Archiving %s" % archivable_experiment_name) active_experiments[archivable_experiment_name].archive() # Update the choices for existing experiments that are also defined in the # schema for experiment_name in new_experiment_names & active_experiment_names: experiment = active_experiments[experiment_name] new_choice_names = set(schema[experiment_name]) old_choice_names = set(experiment.choice_names) # Add choices in the schema that do not exist yet for new_choice_name in new_choice_names - old_choice_names: print("- Adding choice %s to existing experiment %s" % (new_choice_name, experiment_name)) experiment.add_choice(new_choice_name) # Remove choices that aren't in the schema for removable_choice_name in old_choice_names - new_choice_names: print("- Removing choice %s from existing experiment %s" % (removable_choice_name, experiment_name)) experiment.remove_choice(removable_choice_name)
java
@NonNegative public int frequency(@NonNull E e) { if (isNotInitialized()) { return 0; } int hash = spread(e.hashCode()); int start = (hash & 3) << 2; int frequency = Integer.MAX_VALUE; for (int i = 0; i < 4; i++) { int index = indexOf(hash, i); int count = (int) ((table[index] >>> ((start + i) << 2)) & 0xfL); frequency = Math.min(frequency, count); } return frequency; }
python
def filter_by_hoys(self, hoys): """Filter the Data Collection based onva list of hoys. Args: hoys: A List of hours of the year 0..8759 Return: A new Data Collection with filtered data """ existing_hoys = self.header.analysis_period.hoys hoys = [h for h in hoys if h in existing_hoys] _moys = tuple(int(hour * 60) for hour in hoys) return self.filter_by_moys(_moys)
python
def warning_ret(f, *args, **kwargs): """Automatically log progress on function entry and exit. Logging value: warning. The function's return value will be included in the logs. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args. """ kwargs.update({'log': logging.WARNING}) kwargs.update({'print_return': True}) return _stump(f, *args, **kwargs)
java
private boolean skipCellVersion(Cell cell) { return skipColumn != null && skipColumn.matchingRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) && skipColumn.matchingColumn(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); }
python
def pairwise(fun, v): """ >>> pairwise(operator.sub, [4,3,2,1,-10]) [1, 1, 1, 11] >>> import numpy >>> pairwise(numpy.subtract, numpy.array([4,3,2,1,-10])) array([ 1, 1, 1, 11]) """ if not hasattr(v, 'shape'): return list(ipairwise(fun,v)) else: return fun(v[:-1],v[1:])
java
public final void copyTo(int offset, MemorySegment target, int targetOffset, int numBytes) { final byte[] thisHeapRef = this.heapMemory; final byte[] otherHeapRef = target.heapMemory; final long thisPointer = this.address + offset; final long otherPointer = target.address + targetOffset; if ((numBytes | offset | targetOffset) >= 0 && thisPointer <= this.addressLimit - numBytes && otherPointer <= target.addressLimit - numBytes) { UNSAFE.copyMemory(thisHeapRef, thisPointer, otherHeapRef, otherPointer, numBytes); } else if (this.address > this.addressLimit) { throw new IllegalStateException("this memory segment has been freed."); } else if (target.address > target.addressLimit) { throw new IllegalStateException("target memory segment has been freed."); } else { throw new IndexOutOfBoundsException( String.format("offset=%d, targetOffset=%d, numBytes=%d, address=%d, targetAddress=%d", offset, targetOffset, numBytes, this.address, target.address)); } }
python
def list_tables(self): ''' Load existing tables and their descriptions. :return: ''' if not self._tables: for table_name in os.listdir(self.db_path): self._tables[table_name] = self._load_table(table_name) return self._tables.keys()
python
def set_result(self, result): """Complete all tasks. """ for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. future.set_result(result) if not self.done(): super().set_result(result)
python
def check_alert(self, text): """ Assert an alert is showing with the given text. """ try: alert = Alert(world.browser) if alert.text != text: raise AssertionError( "Alert text expected to be {!r}, got {!r}.".format( text, alert.text)) except WebDriverException: # PhantomJS is kinda poor pass
java
@Override public boolean eIsSet(int featureID) { switch (featureID) { case BpsimPackage.GAMMA_DISTRIBUTION_TYPE__SCALE: return isSetScale(); case BpsimPackage.GAMMA_DISTRIBUTION_TYPE__SHAPE: return isSetShape(); } return super.eIsSet(featureID); }
python
def increase_reads_in_units( current_provisioning, units, max_provisioned_reads, consumed_read_units_percent, log_tag): """ Increase the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we increase with :returns: int -- New provisioning value :type max_provisioned_reads: int :param max_provisioned_reads: Configured max provisioned reads :returns: int -- New provisioning value :type consumed_read_units_percent: float :param consumed_read_units_percent: Number of consumed read units :type log_tag: str :param log_tag: Prefix for the log """ units = int(units) current_provisioning = float(current_provisioning) consumed_read_units_percent = float(consumed_read_units_percent) consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_read_units_percent/100))) if consumption_based_current_provisioning > current_provisioning: updated_provisioning = consumption_based_current_provisioning + units else: updated_provisioning = int(current_provisioning) + units if max_provisioned_reads > 0: if updated_provisioning > max_provisioned_reads: logger.info( '{0} - Reached provisioned reads max limit: {1}'.format( log_tag, max_provisioned_reads)) return max_provisioned_reads logger.debug( '{0} - Read provisioning will be increased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
java
public static void writeLong(byte[] buf, int pos, long v) { checkBoundary(buf, pos, 8); buf[pos++] = (byte) (0xff & (v >> 56)); buf[pos++] = (byte) (0xff & (v >> 48)); buf[pos++] = (byte) (0xff & (v >> 40)); buf[pos++] = (byte) (0xff & (v >> 32)); buf[pos++] = (byte) (0xff & (v >> 24)); buf[pos++] = (byte) (0xff & (v >> 16)); buf[pos++] = (byte) (0xff & (v >> 8)); buf[pos] = (byte) (0xff & v); }
java
public void cancel() { try { executorService.shutdown(); if (!executorService.awaitTermination(30, TimeUnit.MINUTES)) { executorService.shutdownNow(); // Cancel currently executing tasks if (!executorService.awaitTermination(30, TimeUnit.MINUTES)) { LOGGER.error("Pool did not terminate... FATAL ERROR"); throw new RuntimeException("Parallel SAX pool did not terminate... FATAL ERROR"); } } else { LOGGER.error("Parallel SAX was interrupted by a request"); } } catch (InterruptedException ie) { LOGGER.error("Error while waiting interrupting.", ie); // (Re-)Cancel if current thread also interrupted executorService.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
java
private boolean pushEdge(final KamEdge edge, final SetStack<KamNode> nodeStack, final SetStack<KamEdge> edgeStack) { if (edgeStack.contains(edge)) { return false; } final KamNode currentNode = nodeStack.peek(); final KamNode edgeOppositeNode = (edge.getSourceNode() == currentNode ? edge .getTargetNode() : edge.getSourceNode()); if (nodeStack.contains(edgeOppositeNode)) { return false; } nodeStack.push(edgeOppositeNode); edgeStack.push(edge); return true; }
python
def namedb_get_all_names( cur, current_block, offset=None, count=None, include_expired=False ): """ Get a list of all names in the database, optionally paginated with offset and count. Exclude expired names. Include revoked names. """ unexpired_query = "" unexpired_args = () if not include_expired: # all names, including expired ones unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) unexpired_query = 'WHERE {}'.format(unexpired_query) query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + unexpired_query + " ORDER BY name " args = unexpired_args offset_count_query, offset_count_args = namedb_offset_count_predicate( offset=offset, count=count ) query += offset_count_query + ";" args += offset_count_args name_rows = namedb_query_execute( cur, query, tuple(args) ) ret = [] for name_row in name_rows: rec = {} rec.update( name_row ) ret.append( rec['name'] ) return ret
java
public <U extends T> OngoingMatchingR0<T, U, R> when( DecomposableMatchBuilder0<U> decomposableMatchBuilder) { return new OngoingMatchingR0<>(this, decomposableMatchBuilder.build()); }
python
def build_self_reference(filename, clean_wcs=False): """ This function creates a reference, undistorted WCS that can be used to apply a correction to the WCS of the input file. Parameters ---------- filename : str Filename of image which will be corrected, and which will form the basis of the undistorted WCS. clean_wcs : bool Specify whether or not to return the WCS object without any distortion information, or any history of the original input image. This converts the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object. Returns ------- customwcs : `stwcs.wcsutils.HSTWCS` HSTWCS object which contains the undistorted WCS representing the entire field-of-view for the input image. Examples -------- This function can be used with the following syntax to apply a shift/rot/scale change to the same image: >>> import buildref >>> from drizzlepac import updatehdr >>> filename = "jce501erq_flc.fits" >>> wcslin = buildref.build_self_reference(filename) >>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694, ... ysh=19.2203, rot = 359.998, scale = 0.9999964) """ if 'sipwcs' in filename: sciname = 'sipwcs' else: sciname = 'sci' wcslin = build_reference_wcs([filename], sciname=sciname) if clean_wcs: wcsbase = wcslin.wcs customwcs = build_hstwcs(wcsbase.crval[0], wcsbase.crval[1], wcsbase.crpix[0], wcsbase.crpix[1], wcslin._naxis1, wcslin._naxis2, wcslin.pscale, wcslin.orientat) else: customwcs = wcslin return customwcs
java
public void marshall(SubscriptionFilter subscriptionFilter, ProtocolMarshaller protocolMarshaller) { if (subscriptionFilter == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(subscriptionFilter.getFilterName(), FILTERNAME_BINDING); protocolMarshaller.marshall(subscriptionFilter.getLogGroupName(), LOGGROUPNAME_BINDING); protocolMarshaller.marshall(subscriptionFilter.getFilterPattern(), FILTERPATTERN_BINDING); protocolMarshaller.marshall(subscriptionFilter.getDestinationArn(), DESTINATIONARN_BINDING); protocolMarshaller.marshall(subscriptionFilter.getRoleArn(), ROLEARN_BINDING); protocolMarshaller.marshall(subscriptionFilter.getDistribution(), DISTRIBUTION_BINDING); protocolMarshaller.marshall(subscriptionFilter.getCreationTime(), CREATIONTIME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def _clean_html(html): """\ Removes links (``<a href="...">...</a>``) from the provided HTML input. Further, it replaces "&#x000A;" with ``\n`` and removes "¶" from the texts. """ content = html.replace(u'&#x000A;', u'\n').replace(u'¶', '') content = _LINK_PATTERN.sub(u'', content) content = _HTML_TAG_PATTERN.sub(u'', content) content = _BACKSLASH_PATTERN.sub(u'\n', content) return content
python
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid): "Create an instance of `ClassificationInterpretation`" preds = learn.get_preds(ds_type=ds_type, with_loss=True) return cls(learn, *preds)
python
def reverseComplement(self, isRNA=None): """ Reverse complement this sequence in-place. :param isRNA: if True, treat this sequence as RNA. If False, treat it as DNA. If None (default), inspect the sequence and make a guess as to whether it is RNA or DNA. """ isRNA_l = self.isRNA() if isRNA is None else isRNA tmp = "" for n in self.sequenceData: if isRNA_l: tmp += RNA_COMPLEMENTS[n] else: tmp += DNA_COMPLEMENTS[n] self.sequenceData = tmp[::-1]
java
public static <GeneralVisitor extends Visitor> GeneralVisitor visit( Visitable visitable, GeneralVisitor visitor ) { if (visitable != null) visitable.accept(visitor); return visitor; }
java
@SuppressWarnings("deprecation") public void failedTask( TaskInProgress tip, TaskAttemptID taskid, String reason, TaskStatus.Phase phase, boolean isFailed, String trackerName, TaskTrackerInfo ttStatus) { TaskStatus.State state = isFailed ? TaskStatus.State.FAILED: TaskStatus.State.KILLED; TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskid, 0.0f, 1, state, reason, reason, trackerName, phase, new Counters()); synchronized (lockObject) { // update the actual start-time of the attempt TaskStatus oldStatus = tip.getTaskStatus(taskid); long startTime = oldStatus == null ? JobTracker.getClock().getTime() : oldStatus.getStartTime(); if (startTime < 0) { startTime = JobTracker.getClock().getTime(); } status.setStartTime(startTime); status.setFinishTime(JobTracker.getClock().getTime()); boolean wasComplete = tip.isComplete(); updateTaskStatus(tip, status, ttStatus); boolean isComplete = tip.isComplete(); if (wasComplete && !isComplete) { // mark a successful tip as failed String taskType = getTaskType(tip); JobHistory.Task.logFailed(tip.getTIPId(), taskType, tip.getExecFinishTime(), reason, taskid); } } }
java
public static Service createService(Enum<?> classNameKey,String defaultClassName,ConfigurationHolder configurationHolder,String propertyPart) { //validate input if(classNameKey==null) { throw new FaxException("Service class name key not provided."); } //convert to string String classNameKeyString=classNameKey.toString(); //create service Service service=ServiceFactory.createService(classNameKeyString,defaultClassName,configurationHolder,propertyPart); return service; }
python
def to_docx( self, filename=None, input_dataset=True, summary_table=True, recommendation_details=True, recommended_model=True, all_models=False, ): """ Write batch sessions to a Word file. Parameters ---------- filename : str or None If provided, the file is saved to this location, otherwise this method returns a docx.Document input_dataset : bool Include input dataset data table summary_table : bool Include model summary table recommendation_details : bool Include model recommendation details table recommended_model : bool Include the recommended model output and dose-response plot, if one exists all_models : bool Include all models output and dose-response plots Returns ------- bmds.Reporter The bmds.Reporter object. """ rep = Reporter() for model in self: rep.add_session( model, input_dataset, summary_table, recommendation_details, recommended_model, all_models, ) if filename: rep.save(filename) return rep
java
@Nonnull public static <T> Supplier<T> supplierFrom(Consumer<SupplierBuilder<T>> buildingFunction) { SupplierBuilder builder = new SupplierBuilder(); buildingFunction.accept(builder); return builder.build(); }
python
def _create_connection(self): """Create a connection. :return: """ attempts = 0 while True: attempts += 1 if self._stopped.is_set(): break try: self._connection = Connection(self.hostname, self.username, self.password) break except amqpstorm.AMQPError as why: LOGGER.warning(why) if self.max_retries and attempts > self.max_retries: raise Exception('max number of retries reached') time.sleep(min(attempts * 2, 30)) except KeyboardInterrupt: break
python
def _detect(self): """ Detects pragma statements that allow for outdated solc versions. :return: Returns the relevant JSON data for the findings. """ # Detect all version related pragmas and check if they are disallowed. results = [] pragma = self.slither.pragma_directives disallowed_pragmas = [] detected_version = False for p in pragma: # Skip any pragma directives which do not refer to version if len(p.directive) < 1 or p.directive[0] != "solidity": continue # This is version, so we test if this is disallowed. detected_version = True reason = self._check_pragma(p.version) if reason: disallowed_pragmas.append((reason, p)) # If we found any disallowed pragmas, we output our findings. if disallowed_pragmas: info = "Detected issues with version pragma in {}:\n".format(self.filename) for (reason, p) in disallowed_pragmas: info += "\t- {} ({}): {}\n".format(p, p.source_mapping_str, reason) json = self.generate_json_result(info) # follow the same format than add_nodes_to_json json['elements'] = [{'type': 'expression', 'expression': p.version, 'source_mapping': p.source_mapping} for (reason, p) in disallowed_pragmas] results.append(json) return results
python
def dotted_completion(cls, line, sorted_keys, compositor_defs): """ Supply the appropriate key in Store.options and supply suggestions for further completion. """ completion_key, suggestions = None, [] tokens = [t for t in reversed(line.replace('.', ' ').split())] for i, token in enumerate(tokens): key_checks =[] if i >= 0: # Undotted key key_checks.append(tokens[i]) if i >= 1: # Single dotted key key_checks.append('.'.join([key_checks[-1], tokens[i-1]])) if i >= 2: # Double dotted key key_checks.append('.'.join([key_checks[-1], tokens[i-2]])) # Check for longest potential dotted match first for key in reversed(key_checks): if key in sorted_keys: completion_key = key depth = completion_key.count('.') suggestions = [k.split('.')[depth+1] for k in sorted_keys if k.startswith(completion_key+'.')] return completion_key, suggestions # Attempting to match compositor definitions if token in compositor_defs: completion_key = compositor_defs[token] break return completion_key, suggestions
java
public Collection<String> getJobNames() { List<String> names = new ArrayList<>(); for (Job j : allItems(Job.class)) names.add(j.getFullName()); names.sort(String.CASE_INSENSITIVE_ORDER); return names; }
java
private void readBitmap() { // (sub)image position & size. header.currentFrame.ix = readShort(); header.currentFrame.iy = readShort(); header.currentFrame.iw = readShort(); header.currentFrame.ih = readShort(); int packed = read(); // 1 - local color table flag interlace boolean lctFlag = (packed & 0x80) != 0; int lctSize = (int) Math.pow(2, (packed & 0x07) + 1); // 3 - sort flag // 4-5 - reserved lctSize = 2 << (packed & 7); // 6-8 - local color // table size header.currentFrame.interlace = (packed & 0x40) != 0; if (lctFlag) { // Read table. header.currentFrame.lct = readColorTable(lctSize); } else { // No local color table. header.currentFrame.lct = null; } // Save this as the decoding position pointer. header.currentFrame.bufferFrameStart = rawData.position(); // False decode pixel data to advance buffer. skipImageData(); if (err()) { return; } header.frameCount++; // Add image to frame. header.frames.add(header.currentFrame); }
java
public List<LDAPEntry> getParentUserGroupEntries(LDAPConnection ldapConnection, String userDN) throws GuacamoleException { // Do not return any user groups if base DN is not specified String groupBaseDN = confService.getGroupBaseDN(); if (groupBaseDN == null) return Collections.emptyList(); // Get all groups the user is a member of starting at the groupBaseDN, // excluding guacConfigGroups return queryService.search( ldapConnection, groupBaseDN, getGroupSearchFilter(), Collections.singleton(confService.getMemberAttribute()), userDN ); }